6bbc887449ca7fe33934027cac930bd424ae23ad
[linux-2.6.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/pci.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/delay.h>
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/wireless.h>
39 #include <linux/firmware.h>
40 #include <linux/etherdevice.h>
41 #include <linux/if_arp.h>
42
43 #include <net/ieee80211_radiotap.h>
44 #include <net/lib80211.h>
45 #include <net/mac80211.h>
46
47 #include <asm/div64.h>
48
49 #define DRV_NAME        "iwl3945"
50
51 #include "iwl-commands.h"
52 #include "iwl-3945.h"
53 #include "iwl-3945-fh.h"
54 #include "iwl-helpers.h"
55 #include "iwl-core.h"
56 #include "iwl-dev.h"
57
58 static int iwl3945_tx_queue_update_write_ptr(struct iwl_priv *priv,
59                                   struct iwl3945_tx_queue *txq);
60
61 /******************************************************************************
62  *
63  * module boiler plate
64  *
65  ******************************************************************************/
66
67 /* module parameters */
68 static int iwl3945_param_disable_hw_scan; /* def: 0 = use 3945's h/w scan */
69 static u32 iwl3945_param_debug;    /* def: 0 = minimal debug log messages */
70 static int iwl3945_param_disable;  /* def: 0 = enable radio */
71 static int iwl3945_param_antenna;  /* def: 0 = both antennas (use diversity) */
72 int iwl3945_param_hwcrypto;        /* def: 0 = use software encryption */
73 int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
74
75 /*
76  * module name, copyright, version, etc.
77  */
78
79 #define DRV_DESCRIPTION \
80 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
81
82 #ifdef CONFIG_IWL3945_DEBUG
83 #define VD "d"
84 #else
85 #define VD
86 #endif
87
88 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
89 #define VS "s"
90 #else
91 #define VS
92 #endif
93
94 #define IWL39_VERSION "1.2.26k" VD VS
95 #define DRV_COPYRIGHT   "Copyright(c) 2003-2008 Intel Corporation"
96 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
97 #define DRV_VERSION     IWL39_VERSION
98
99
100 MODULE_DESCRIPTION(DRV_DESCRIPTION);
101 MODULE_VERSION(DRV_VERSION);
102 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
103 MODULE_LICENSE("GPL");
104
105 static const struct ieee80211_supported_band *iwl3945_get_band(
106                 struct iwl_priv *priv, enum ieee80211_band band)
107 {
108         return priv->hw->wiphy->bands[band];
109 }
110
111 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
112  * DMA services
113  *
114  * Theory of operation
115  *
116  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
117  * of buffer descriptors, each of which points to one or more data buffers for
118  * the device to read from or fill.  Driver and device exchange status of each
119  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
120  * entries in each circular buffer, to protect against confusing empty and full
121  * queue states.
122  *
123  * The device reads or writes the data in the queues via the device's several
124  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
125  *
126  * For Tx queue, there are low mark and high mark limits. If, after queuing
127  * the packet for Tx, free space become < low mark, Tx queue stopped. When
128  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
129  * Tx queue resumed.
130  *
131  * The 3945 operates with six queues:  One receive queue, one transmit queue
132  * (#4) for sending commands to the device firmware, and four transmit queues
133  * (#0-3) for data tx via EDCA.  An additional 2 HCCA queues are unused.
134  ***************************************************/
135
136 int iwl3945_x2_queue_used(const struct iwl_queue *q, int i)
137 {
138         return q->write_ptr > q->read_ptr ?
139                 (i >= q->read_ptr && i < q->write_ptr) :
140                 !(i < q->read_ptr && i >= q->write_ptr);
141 }
142
143 /**
144  * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
145  */
146 static int iwl3945_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
147                           int count, int slots_num, u32 id)
148 {
149         q->n_bd = count;
150         q->n_window = slots_num;
151         q->id = id;
152
153         /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
154          * and iwl_queue_dec_wrap are broken. */
155         BUG_ON(!is_power_of_2(count));
156
157         /* slots_num must be power-of-two size, otherwise
158          * get_cmd_index is broken. */
159         BUG_ON(!is_power_of_2(slots_num));
160
161         q->low_mark = q->n_window / 4;
162         if (q->low_mark < 4)
163                 q->low_mark = 4;
164
165         q->high_mark = q->n_window / 8;
166         if (q->high_mark < 2)
167                 q->high_mark = 2;
168
169         q->write_ptr = q->read_ptr = 0;
170
171         return 0;
172 }
173
174 /**
175  * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
176  */
177 static int iwl3945_tx_queue_alloc(struct iwl_priv *priv,
178                               struct iwl3945_tx_queue *txq, u32 id)
179 {
180         struct pci_dev *dev = priv->pci_dev;
181
182         /* Driver private data, only for Tx (not command) queues,
183          * not shared with device. */
184         if (id != IWL_CMD_QUEUE_NUM) {
185                 txq->txb = kmalloc(sizeof(txq->txb[0]) *
186                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
187                 if (!txq->txb) {
188                         IWL_ERROR("kmalloc for auxiliary BD "
189                                   "structures failed\n");
190                         goto error;
191                 }
192         } else
193                 txq->txb = NULL;
194
195         /* Circular buffer of transmit frame descriptors (TFDs),
196          * shared with device */
197         txq->bd = pci_alloc_consistent(dev,
198                         sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
199                         &txq->q.dma_addr);
200
201         if (!txq->bd) {
202                 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
203                           sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
204                 goto error;
205         }
206         txq->q.id = id;
207
208         return 0;
209
210  error:
211         kfree(txq->txb);
212         txq->txb = NULL;
213
214         return -ENOMEM;
215 }
216
217 /**
218  * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
219  */
220 int iwl3945_tx_queue_init(struct iwl_priv *priv,
221                       struct iwl3945_tx_queue *txq, int slots_num, u32 txq_id)
222 {
223         struct pci_dev *dev = priv->pci_dev;
224         int len;
225         int rc = 0;
226
227         /*
228          * Alloc buffer array for commands (Tx or other types of commands).
229          * For the command queue (#4), allocate command space + one big
230          * command for scan, since scan command is very huge; the system will
231          * not have two scans at the same time, so only one is needed.
232          * For data Tx queues (all other queues), no super-size command
233          * space is needed.
234          */
235         len = sizeof(struct iwl3945_cmd) * slots_num;
236         if (txq_id == IWL_CMD_QUEUE_NUM)
237                 len +=  IWL_MAX_SCAN_SIZE;
238         txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
239         if (!txq->cmd)
240                 return -ENOMEM;
241
242         /* Alloc driver data array and TFD circular buffer */
243         rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
244         if (rc) {
245                 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
246
247                 return -ENOMEM;
248         }
249         txq->need_update = 0;
250
251         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
252          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
253         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
254
255         /* Initialize queue high/low-water, head/tail indexes */
256         iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
257
258         /* Tell device where to find queue, enable DMA channel. */
259         iwl3945_hw_tx_queue_init(priv, txq);
260
261         return 0;
262 }
263
264 /**
265  * iwl3945_tx_queue_free - Deallocate DMA queue.
266  * @txq: Transmit queue to deallocate.
267  *
268  * Empty queue by removing and destroying all BD's.
269  * Free all buffers.
270  * 0-fill, but do not free "txq" descriptor structure.
271  */
272 void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl3945_tx_queue *txq)
273 {
274         struct iwl_queue *q = &txq->q;
275         struct pci_dev *dev = priv->pci_dev;
276         int len;
277
278         if (q->n_bd == 0)
279                 return;
280
281         /* first, empty all BD's */
282         for (; q->write_ptr != q->read_ptr;
283              q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
284                 iwl3945_hw_txq_free_tfd(priv, txq);
285
286         len = sizeof(struct iwl3945_cmd) * q->n_window;
287         if (q->id == IWL_CMD_QUEUE_NUM)
288                 len += IWL_MAX_SCAN_SIZE;
289
290         /* De-alloc array of command/tx buffers */
291         pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
292
293         /* De-alloc circular buffer of TFDs */
294         if (txq->q.n_bd)
295                 pci_free_consistent(dev, sizeof(struct iwl3945_tfd_frame) *
296                                     txq->q.n_bd, txq->bd, txq->q.dma_addr);
297
298         /* De-alloc array of per-TFD driver data */
299         kfree(txq->txb);
300         txq->txb = NULL;
301
302         /* 0-fill queue descriptor structure */
303         memset(txq, 0, sizeof(*txq));
304 }
305
306 /*************** STATION TABLE MANAGEMENT ****
307  * mac80211 should be examined to determine if sta_info is duplicating
308  * the functionality provided here
309  */
310
311 /**************************************************************/
312 #if 0 /* temporary disable till we add real remove station */
313 /**
314  * iwl3945_remove_station - Remove driver's knowledge of station.
315  *
316  * NOTE:  This does not remove station from device's station table.
317  */
318 static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
319 {
320         int index = IWL_INVALID_STATION;
321         int i;
322         unsigned long flags;
323
324         spin_lock_irqsave(&priv->sta_lock, flags);
325
326         if (is_ap)
327                 index = IWL_AP_ID;
328         else if (is_broadcast_ether_addr(addr))
329                 index = priv->hw_params.bcast_sta_id;
330         else
331                 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
332                         if (priv->stations_39[i].used &&
333                             !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
334                                                 addr)) {
335                                 index = i;
336                                 break;
337                         }
338
339         if (unlikely(index == IWL_INVALID_STATION))
340                 goto out;
341
342         if (priv->stations_39[index].used) {
343                 priv->stations_39[index].used = 0;
344                 priv->num_stations--;
345         }
346
347         BUG_ON(priv->num_stations < 0);
348
349 out:
350         spin_unlock_irqrestore(&priv->sta_lock, flags);
351         return 0;
352 }
353 #endif
354
355 /**
356  * iwl3945_clear_stations_table - Clear the driver's station table
357  *
358  * NOTE:  This does not clear or otherwise alter the device's station table.
359  */
360 static void iwl3945_clear_stations_table(struct iwl_priv *priv)
361 {
362         unsigned long flags;
363
364         spin_lock_irqsave(&priv->sta_lock, flags);
365
366         priv->num_stations = 0;
367         memset(priv->stations_39, 0, sizeof(priv->stations_39));
368
369         spin_unlock_irqrestore(&priv->sta_lock, flags);
370 }
371
372 /**
373  * iwl3945_add_station - Add station to station tables in driver and device
374  */
375 u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
376 {
377         int i;
378         int index = IWL_INVALID_STATION;
379         struct iwl3945_station_entry *station;
380         unsigned long flags_spin;
381         u8 rate;
382
383         spin_lock_irqsave(&priv->sta_lock, flags_spin);
384         if (is_ap)
385                 index = IWL_AP_ID;
386         else if (is_broadcast_ether_addr(addr))
387                 index = priv->hw_params.bcast_sta_id;
388         else
389                 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
390                         if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
391                                                 addr)) {
392                                 index = i;
393                                 break;
394                         }
395
396                         if (!priv->stations_39[i].used &&
397                             index == IWL_INVALID_STATION)
398                                 index = i;
399                 }
400
401         /* These two conditions has the same outcome but keep them separate
402           since they have different meaning */
403         if (unlikely(index == IWL_INVALID_STATION)) {
404                 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
405                 return index;
406         }
407
408         if (priv->stations_39[index].used &&
409            !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
410                 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
411                 return index;
412         }
413
414         IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
415         station = &priv->stations_39[index];
416         station->used = 1;
417         priv->num_stations++;
418
419         /* Set up the REPLY_ADD_STA command to send to device */
420         memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
421         memcpy(station->sta.sta.addr, addr, ETH_ALEN);
422         station->sta.mode = 0;
423         station->sta.sta.sta_id = index;
424         station->sta.station_flags = 0;
425
426         if (priv->band == IEEE80211_BAND_5GHZ)
427                 rate = IWL_RATE_6M_PLCP;
428         else
429                 rate =  IWL_RATE_1M_PLCP;
430
431         /* Turn on both antennas for the station... */
432         station->sta.rate_n_flags =
433                         iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
434
435         spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
436
437         /* Add station to device's station table */
438         iwl3945_send_add_station(priv, &station->sta, flags);
439         return index;
440
441 }
442
443 /*************** DRIVER STATUS FUNCTIONS   *****/
444
445 static inline int iwl3945_is_ready(struct iwl_priv *priv)
446 {
447         /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
448          * set but EXIT_PENDING is not */
449         return test_bit(STATUS_READY, &priv->status) &&
450                test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
451                !test_bit(STATUS_EXIT_PENDING, &priv->status);
452 }
453
454 static inline int iwl3945_is_alive(struct iwl_priv *priv)
455 {
456         return test_bit(STATUS_ALIVE, &priv->status);
457 }
458
459 static inline int iwl3945_is_init(struct iwl_priv *priv)
460 {
461         return test_bit(STATUS_INIT, &priv->status);
462 }
463
464 static inline int iwl3945_is_rfkill_sw(struct iwl_priv *priv)
465 {
466         return test_bit(STATUS_RF_KILL_SW, &priv->status);
467 }
468
469 static inline int iwl3945_is_rfkill_hw(struct iwl_priv *priv)
470 {
471         return test_bit(STATUS_RF_KILL_HW, &priv->status);
472 }
473
474 static inline int iwl3945_is_rfkill(struct iwl_priv *priv)
475 {
476         return iwl3945_is_rfkill_hw(priv) ||
477                 iwl3945_is_rfkill_sw(priv);
478 }
479
480 static inline int iwl3945_is_ready_rf(struct iwl_priv *priv)
481 {
482
483         if (iwl3945_is_rfkill(priv))
484                 return 0;
485
486         return iwl3945_is_ready(priv);
487 }
488
489 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
490
491 #define IWL_CMD(x) case x: return #x
492 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
493
494 /**
495  * iwl3945_enqueue_hcmd - enqueue a uCode command
496  * @priv: device private data point
497  * @cmd: a point to the ucode command structure
498  *
499  * The function returns < 0 values to indicate the operation is
500  * failed. On success, it turns the index (> 0) of command in the
501  * command queue.
502  */
503 static int iwl3945_enqueue_hcmd(struct iwl_priv *priv, struct iwl3945_host_cmd *cmd)
504 {
505         struct iwl3945_tx_queue *txq = &priv->txq39[IWL_CMD_QUEUE_NUM];
506         struct iwl_queue *q = &txq->q;
507         struct iwl3945_tfd_frame *tfd;
508         u32 *control_flags;
509         struct iwl3945_cmd *out_cmd;
510         u32 idx;
511         u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
512         dma_addr_t phys_addr;
513         int pad;
514         u16 count;
515         int ret;
516         unsigned long flags;
517
518         /* If any of the command structures end up being larger than
519          * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
520          * we will need to increase the size of the TFD entries */
521         BUG_ON((fix_size > TFD39_MAX_PAYLOAD_SIZE) &&
522                !(cmd->meta.flags & CMD_SIZE_HUGE));
523
524
525         if (iwl3945_is_rfkill(priv)) {
526                 IWL_DEBUG_INFO("Not sending command - RF KILL");
527                 return -EIO;
528         }
529
530         if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
531                 IWL_ERROR("No space for Tx\n");
532                 return -ENOSPC;
533         }
534
535         spin_lock_irqsave(&priv->hcmd_lock, flags);
536
537         tfd = &txq->bd[q->write_ptr];
538         memset(tfd, 0, sizeof(*tfd));
539
540         control_flags = (u32 *) tfd;
541
542         idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
543         out_cmd = &txq->cmd[idx];
544
545         out_cmd->hdr.cmd = cmd->id;
546         memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
547         memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
548
549         /* At this point, the out_cmd now has all of the incoming cmd
550          * information */
551
552         out_cmd->hdr.flags = 0;
553         out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
554                         INDEX_TO_SEQ(q->write_ptr));
555         if (out_cmd->meta.flags & CMD_SIZE_HUGE)
556                 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
557
558         phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
559                         offsetof(struct iwl3945_cmd, hdr);
560         iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
561
562         pad = U32_PAD(cmd->len);
563         count = TFD_CTL_COUNT_GET(*control_flags);
564         *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad);
565
566         IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
567                      "%d bytes at %d[%d]:%d\n",
568                      get_cmd_string(out_cmd->hdr.cmd),
569                      out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
570                      fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
571
572         txq->need_update = 1;
573
574         /* Increment and update queue's write index */
575         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
576         ret = iwl3945_tx_queue_update_write_ptr(priv, txq);
577
578         spin_unlock_irqrestore(&priv->hcmd_lock, flags);
579         return ret ? ret : idx;
580 }
581
582 static int iwl3945_send_cmd_async(struct iwl_priv *priv, struct iwl3945_host_cmd *cmd)
583 {
584         int ret;
585
586         BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
587
588         /* An asynchronous command can not expect an SKB to be set. */
589         BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
590
591         /* An asynchronous command MUST have a callback. */
592         BUG_ON(!cmd->meta.u.callback);
593
594         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
595                 return -EBUSY;
596
597         ret = iwl3945_enqueue_hcmd(priv, cmd);
598         if (ret < 0) {
599                 IWL_ERROR("Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
600                           get_cmd_string(cmd->id), ret);
601                 return ret;
602         }
603         return 0;
604 }
605
606 static int iwl3945_send_cmd_sync(struct iwl_priv *priv, struct iwl3945_host_cmd *cmd)
607 {
608         int cmd_idx;
609         int ret;
610
611         BUG_ON(cmd->meta.flags & CMD_ASYNC);
612
613          /* A synchronous command can not have a callback set. */
614         BUG_ON(cmd->meta.u.callback != NULL);
615
616         if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
617                 IWL_ERROR("Error sending %s: Already sending a host command\n",
618                           get_cmd_string(cmd->id));
619                 ret = -EBUSY;
620                 goto out;
621         }
622
623         set_bit(STATUS_HCMD_ACTIVE, &priv->status);
624
625         if (cmd->meta.flags & CMD_WANT_SKB)
626                 cmd->meta.source = &cmd->meta;
627
628         cmd_idx = iwl3945_enqueue_hcmd(priv, cmd);
629         if (cmd_idx < 0) {
630                 ret = cmd_idx;
631                 IWL_ERROR("Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
632                           get_cmd_string(cmd->id), ret);
633                 goto out;
634         }
635
636         ret = wait_event_interruptible_timeout(priv->wait_command_queue,
637                         !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
638                         HOST_COMPLETE_TIMEOUT);
639         if (!ret) {
640                 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
641                         IWL_ERROR("Error sending %s: time out after %dms.\n",
642                                   get_cmd_string(cmd->id),
643                                   jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
644
645                         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
646                         ret = -ETIMEDOUT;
647                         goto cancel;
648                 }
649         }
650
651         if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
652                 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
653                                get_cmd_string(cmd->id));
654                 ret = -ECANCELED;
655                 goto fail;
656         }
657         if (test_bit(STATUS_FW_ERROR, &priv->status)) {
658                 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
659                                get_cmd_string(cmd->id));
660                 ret = -EIO;
661                 goto fail;
662         }
663         if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
664                 IWL_ERROR("Error: Response NULL in '%s'\n",
665                           get_cmd_string(cmd->id));
666                 ret = -EIO;
667                 goto cancel;
668         }
669
670         ret = 0;
671         goto out;
672
673 cancel:
674         if (cmd->meta.flags & CMD_WANT_SKB) {
675                 struct iwl3945_cmd *qcmd;
676
677                 /* Cancel the CMD_WANT_SKB flag for the cmd in the
678                  * TX cmd queue. Otherwise in case the cmd comes
679                  * in later, it will possibly set an invalid
680                  * address (cmd->meta.source). */
681                 qcmd = &priv->txq39[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
682                 qcmd->meta.flags &= ~CMD_WANT_SKB;
683         }
684 fail:
685         if (cmd->meta.u.skb) {
686                 dev_kfree_skb_any(cmd->meta.u.skb);
687                 cmd->meta.u.skb = NULL;
688         }
689 out:
690         clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
691         return ret;
692 }
693
694 int iwl3945_send_cmd(struct iwl_priv *priv, struct iwl3945_host_cmd *cmd)
695 {
696         if (cmd->meta.flags & CMD_ASYNC)
697                 return iwl3945_send_cmd_async(priv, cmd);
698
699         return iwl3945_send_cmd_sync(priv, cmd);
700 }
701
702 int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
703 {
704         struct iwl3945_host_cmd cmd = {
705                 .id = id,
706                 .len = len,
707                 .data = data,
708         };
709
710         return iwl3945_send_cmd_sync(priv, &cmd);
711 }
712
713 static int __must_check iwl3945_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
714 {
715         struct iwl3945_host_cmd cmd = {
716                 .id = id,
717                 .len = sizeof(val),
718                 .data = &val,
719         };
720
721         return iwl3945_send_cmd_sync(priv, &cmd);
722 }
723
724 int iwl3945_send_statistics_request(struct iwl_priv *priv)
725 {
726         return iwl3945_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
727 }
728
729 /**
730  * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
731  * @band: 2.4 or 5 GHz band
732  * @channel: Any channel valid for the requested band
733
734  * In addition to setting the staging RXON, priv->band is also set.
735  *
736  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
737  * in the staging RXON flag structure based on the band
738  */
739 static int iwl3945_set_rxon_channel(struct iwl_priv *priv,
740                                     enum ieee80211_band band,
741                                     u16 channel)
742 {
743         if (!iwl3945_get_channel_info(priv, band, channel)) {
744                 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
745                                channel, band);
746                 return -EINVAL;
747         }
748
749         if ((le16_to_cpu(priv->staging39_rxon.channel) == channel) &&
750             (priv->band == band))
751                 return 0;
752
753         priv->staging39_rxon.channel = cpu_to_le16(channel);
754         if (band == IEEE80211_BAND_5GHZ)
755                 priv->staging39_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
756         else
757                 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
758
759         priv->band = band;
760
761         IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
762
763         return 0;
764 }
765
766 /**
767  * iwl3945_check_rxon_cmd - validate RXON structure is valid
768  *
769  * NOTE:  This is really only useful during development and can eventually
770  * be #ifdef'd out once the driver is stable and folks aren't actively
771  * making changes
772  */
773 static int iwl3945_check_rxon_cmd(struct iwl_priv *priv)
774 {
775         int error = 0;
776         int counter = 1;
777         struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
778
779         if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
780                 error |= le32_to_cpu(rxon->flags &
781                                 (RXON_FLG_TGJ_NARROW_BAND_MSK |
782                                  RXON_FLG_RADAR_DETECT_MSK));
783                 if (error)
784                         IWL_WARNING("check 24G fields %d | %d\n",
785                                     counter++, error);
786         } else {
787                 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
788                                 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
789                 if (error)
790                         IWL_WARNING("check 52 fields %d | %d\n",
791                                     counter++, error);
792                 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
793                 if (error)
794                         IWL_WARNING("check 52 CCK %d | %d\n",
795                                     counter++, error);
796         }
797         error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
798         if (error)
799                 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
800
801         /* make sure basic rates 6Mbps and 1Mbps are supported */
802         error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
803                   ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
804         if (error)
805                 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
806
807         error |= (le16_to_cpu(rxon->assoc_id) > 2007);
808         if (error)
809                 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
810
811         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
812                         == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
813         if (error)
814                 IWL_WARNING("check CCK and short slot %d | %d\n",
815                             counter++, error);
816
817         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
818                         == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
819         if (error)
820                 IWL_WARNING("check CCK & auto detect %d | %d\n",
821                             counter++, error);
822
823         error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
824                         RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
825         if (error)
826                 IWL_WARNING("check TGG and auto detect %d | %d\n",
827                             counter++, error);
828
829         if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
830                 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
831                                 RXON_FLG_ANT_A_MSK)) == 0);
832         if (error)
833                 IWL_WARNING("check antenna %d %d\n", counter++, error);
834
835         if (error)
836                 IWL_WARNING("Tuning to channel %d\n",
837                             le16_to_cpu(rxon->channel));
838
839         if (error) {
840                 IWL_ERROR("Not a valid iwl3945_rxon_assoc_cmd field values\n");
841                 return -1;
842         }
843         return 0;
844 }
845
846 /**
847  * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
848  * @priv: staging_rxon is compared to active_rxon
849  *
850  * If the RXON structure is changing enough to require a new tune,
851  * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
852  * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
853  */
854 static int iwl3945_full_rxon_required(struct iwl_priv *priv)
855 {
856
857         /* These items are only settable from the full RXON command */
858         if (!(iwl3945_is_associated(priv)) ||
859             compare_ether_addr(priv->staging39_rxon.bssid_addr,
860                                priv->active39_rxon.bssid_addr) ||
861             compare_ether_addr(priv->staging39_rxon.node_addr,
862                                priv->active39_rxon.node_addr) ||
863             compare_ether_addr(priv->staging39_rxon.wlap_bssid_addr,
864                                priv->active39_rxon.wlap_bssid_addr) ||
865             (priv->staging39_rxon.dev_type != priv->active39_rxon.dev_type) ||
866             (priv->staging39_rxon.channel != priv->active39_rxon.channel) ||
867             (priv->staging39_rxon.air_propagation !=
868              priv->active39_rxon.air_propagation) ||
869             (priv->staging39_rxon.assoc_id != priv->active39_rxon.assoc_id))
870                 return 1;
871
872         /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
873          * be updated with the RXON_ASSOC command -- however only some
874          * flag transitions are allowed using RXON_ASSOC */
875
876         /* Check if we are not switching bands */
877         if ((priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
878             (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK))
879                 return 1;
880
881         /* Check if we are switching association toggle */
882         if ((priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
883                 (priv->active39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
884                 return 1;
885
886         return 0;
887 }
888
889 static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
890 {
891         int rc = 0;
892         struct iwl_rx_packet *res = NULL;
893         struct iwl3945_rxon_assoc_cmd rxon_assoc;
894         struct iwl3945_host_cmd cmd = {
895                 .id = REPLY_RXON_ASSOC,
896                 .len = sizeof(rxon_assoc),
897                 .meta.flags = CMD_WANT_SKB,
898                 .data = &rxon_assoc,
899         };
900         const struct iwl3945_rxon_cmd *rxon1 = &priv->staging39_rxon;
901         const struct iwl3945_rxon_cmd *rxon2 = &priv->active39_rxon;
902
903         if ((rxon1->flags == rxon2->flags) &&
904             (rxon1->filter_flags == rxon2->filter_flags) &&
905             (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
906             (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
907                 IWL_DEBUG_INFO("Using current RXON_ASSOC.  Not resending.\n");
908                 return 0;
909         }
910
911         rxon_assoc.flags = priv->staging39_rxon.flags;
912         rxon_assoc.filter_flags = priv->staging39_rxon.filter_flags;
913         rxon_assoc.ofdm_basic_rates = priv->staging39_rxon.ofdm_basic_rates;
914         rxon_assoc.cck_basic_rates = priv->staging39_rxon.cck_basic_rates;
915         rxon_assoc.reserved = 0;
916
917         rc = iwl3945_send_cmd_sync(priv, &cmd);
918         if (rc)
919                 return rc;
920
921         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
922         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
923                 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
924                 rc = -EIO;
925         }
926
927         priv->alloc_rxb_skb--;
928         dev_kfree_skb_any(cmd.meta.u.skb);
929
930         return rc;
931 }
932
933 /**
934  * iwl3945_commit_rxon - commit staging_rxon to hardware
935  *
936  * The RXON command in staging_rxon is committed to the hardware and
937  * the active_rxon structure is updated with the new data.  This
938  * function correctly transitions out of the RXON_ASSOC_MSK state if
939  * a HW tune is required based on the RXON structure changes.
940  */
941 static int iwl3945_commit_rxon(struct iwl_priv *priv)
942 {
943         /* cast away the const for active_rxon in this function */
944         struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active39_rxon;
945         int rc = 0;
946
947         if (!iwl3945_is_alive(priv))
948                 return -1;
949
950         /* always get timestamp with Rx frame */
951         priv->staging39_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
952
953         /* select antenna */
954         priv->staging39_rxon.flags &=
955             ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
956         priv->staging39_rxon.flags |= iwl3945_get_antenna_flags(priv);
957
958         rc = iwl3945_check_rxon_cmd(priv);
959         if (rc) {
960                 IWL_ERROR("Invalid RXON configuration.  Not committing.\n");
961                 return -EINVAL;
962         }
963
964         /* If we don't need to send a full RXON, we can use
965          * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
966          * and other flags for the current radio configuration. */
967         if (!iwl3945_full_rxon_required(priv)) {
968                 rc = iwl3945_send_rxon_assoc(priv);
969                 if (rc) {
970                         IWL_ERROR("Error setting RXON_ASSOC "
971                                   "configuration (%d).\n", rc);
972                         return rc;
973                 }
974
975                 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
976
977                 return 0;
978         }
979
980         /* If we are currently associated and the new config requires
981          * an RXON_ASSOC and the new config wants the associated mask enabled,
982          * we must clear the associated from the active configuration
983          * before we apply the new config */
984         if (iwl3945_is_associated(priv) &&
985             (priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
986                 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
987                 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
988
989                 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
990                                       sizeof(struct iwl3945_rxon_cmd),
991                                       &priv->active39_rxon);
992
993                 /* If the mask clearing failed then we set
994                  * active_rxon back to what it was previously */
995                 if (rc) {
996                         active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
997                         IWL_ERROR("Error clearing ASSOC_MSK on current "
998                                   "configuration (%d).\n", rc);
999                         return rc;
1000                 }
1001         }
1002
1003         IWL_DEBUG_INFO("Sending RXON\n"
1004                        "* with%s RXON_FILTER_ASSOC_MSK\n"
1005                        "* channel = %d\n"
1006                        "* bssid = %pM\n",
1007                        ((priv->staging39_rxon.filter_flags &
1008                          RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1009                        le16_to_cpu(priv->staging39_rxon.channel),
1010                        priv->staging_rxon.bssid_addr);
1011
1012         /* Apply the new configuration */
1013         rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
1014                               sizeof(struct iwl3945_rxon_cmd), &priv->staging39_rxon);
1015         if (rc) {
1016                 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1017                 return rc;
1018         }
1019
1020         memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
1021
1022         iwl3945_clear_stations_table(priv);
1023
1024         /* If we issue a new RXON command which required a tune then we must
1025          * send a new TXPOWER command or we won't be able to Tx any frames */
1026         rc = iwl3945_hw_reg_send_txpower(priv);
1027         if (rc) {
1028                 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1029                 return rc;
1030         }
1031
1032         /* Add the broadcast address so we can send broadcast frames */
1033         if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
1034             IWL_INVALID_STATION) {
1035                 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1036                 return -EIO;
1037         }
1038
1039         /* If we have set the ASSOC_MSK and we are in BSS mode then
1040          * add the IWL_AP_ID to the station rate table */
1041         if (iwl3945_is_associated(priv) &&
1042             (priv->iw_mode == NL80211_IFTYPE_STATION))
1043                 if (iwl3945_add_station(priv, priv->active39_rxon.bssid_addr, 1, 0)
1044                     == IWL_INVALID_STATION) {
1045                         IWL_ERROR("Error adding AP address for transmit.\n");
1046                         return -EIO;
1047                 }
1048
1049         /* Init the hardware's rate fallback order based on the band */
1050         rc = iwl3945_init_hw_rate_table(priv);
1051         if (rc) {
1052                 IWL_ERROR("Error setting HW rate table: %02X\n", rc);
1053                 return -EIO;
1054         }
1055
1056         return 0;
1057 }
1058
1059 static int iwl3945_send_bt_config(struct iwl_priv *priv)
1060 {
1061         struct iwl_bt_cmd bt_cmd = {
1062                 .flags = 3,
1063                 .lead_time = 0xAA,
1064                 .max_kill = 1,
1065                 .kill_ack_mask = 0,
1066                 .kill_cts_mask = 0,
1067         };
1068
1069         return iwl3945_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1070                                         sizeof(bt_cmd), &bt_cmd);
1071 }
1072
1073 static int iwl3945_send_scan_abort(struct iwl_priv *priv)
1074 {
1075         int rc = 0;
1076         struct iwl_rx_packet *res;
1077         struct iwl3945_host_cmd cmd = {
1078                 .id = REPLY_SCAN_ABORT_CMD,
1079                 .meta.flags = CMD_WANT_SKB,
1080         };
1081
1082         /* If there isn't a scan actively going on in the hardware
1083          * then we are in between scan bands and not actually
1084          * actively scanning, so don't send the abort command */
1085         if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1086                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1087                 return 0;
1088         }
1089
1090         rc = iwl3945_send_cmd_sync(priv, &cmd);
1091         if (rc) {
1092                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1093                 return rc;
1094         }
1095
1096         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1097         if (res->u.status != CAN_ABORT_STATUS) {
1098                 /* The scan abort will return 1 for success or
1099                  * 2 for "failure".  A failure condition can be
1100                  * due to simply not being in an active scan which
1101                  * can occur if we send the scan abort before we
1102                  * the microcode has notified us that a scan is
1103                  * completed. */
1104                 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1105                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1106                 clear_bit(STATUS_SCAN_HW, &priv->status);
1107         }
1108
1109         dev_kfree_skb_any(cmd.meta.u.skb);
1110
1111         return rc;
1112 }
1113
1114 static int iwl3945_card_state_sync_callback(struct iwl_priv *priv,
1115                                         struct iwl3945_cmd *cmd,
1116                                         struct sk_buff *skb)
1117 {
1118         return 1;
1119 }
1120
1121 /*
1122  * CARD_STATE_CMD
1123  *
1124  * Use: Sets the device's internal card state to enable, disable, or halt
1125  *
1126  * When in the 'enable' state the card operates as normal.
1127  * When in the 'disable' state, the card enters into a low power mode.
1128  * When in the 'halt' state, the card is shut down and must be fully
1129  * restarted to come back on.
1130  */
1131 static int iwl3945_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1132 {
1133         struct iwl3945_host_cmd cmd = {
1134                 .id = REPLY_CARD_STATE_CMD,
1135                 .len = sizeof(u32),
1136                 .data = &flags,
1137                 .meta.flags = meta_flag,
1138         };
1139
1140         if (meta_flag & CMD_ASYNC)
1141                 cmd.meta.u.callback = iwl3945_card_state_sync_callback;
1142
1143         return iwl3945_send_cmd(priv, &cmd);
1144 }
1145
1146 static int iwl3945_add_sta_sync_callback(struct iwl_priv *priv,
1147                                      struct iwl3945_cmd *cmd, struct sk_buff *skb)
1148 {
1149         struct iwl_rx_packet *res = NULL;
1150
1151         if (!skb) {
1152                 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1153                 return 1;
1154         }
1155
1156         res = (struct iwl_rx_packet *)skb->data;
1157         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1158                 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1159                           res->hdr.flags);
1160                 return 1;
1161         }
1162
1163         switch (res->u.add_sta.status) {
1164         case ADD_STA_SUCCESS_MSK:
1165                 break;
1166         default:
1167                 break;
1168         }
1169
1170         /* We didn't cache the SKB; let the caller free it */
1171         return 1;
1172 }
1173
1174 int iwl3945_send_add_station(struct iwl_priv *priv,
1175                          struct iwl3945_addsta_cmd *sta, u8 flags)
1176 {
1177         struct iwl_rx_packet *res = NULL;
1178         int rc = 0;
1179         struct iwl3945_host_cmd cmd = {
1180                 .id = REPLY_ADD_STA,
1181                 .len = sizeof(struct iwl3945_addsta_cmd),
1182                 .meta.flags = flags,
1183                 .data = sta,
1184         };
1185
1186         if (flags & CMD_ASYNC)
1187                 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
1188         else
1189                 cmd.meta.flags |= CMD_WANT_SKB;
1190
1191         rc = iwl3945_send_cmd(priv, &cmd);
1192
1193         if (rc || (flags & CMD_ASYNC))
1194                 return rc;
1195
1196         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1197         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1198                 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1199                           res->hdr.flags);
1200                 rc = -EIO;
1201         }
1202
1203         if (rc == 0) {
1204                 switch (res->u.add_sta.status) {
1205                 case ADD_STA_SUCCESS_MSK:
1206                         IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1207                         break;
1208                 default:
1209                         rc = -EIO;
1210                         IWL_WARNING("REPLY_ADD_STA failed\n");
1211                         break;
1212                 }
1213         }
1214
1215         priv->alloc_rxb_skb--;
1216         dev_kfree_skb_any(cmd.meta.u.skb);
1217
1218         return rc;
1219 }
1220
1221 static int iwl3945_update_sta_key_info(struct iwl_priv *priv,
1222                                    struct ieee80211_key_conf *keyconf,
1223                                    u8 sta_id)
1224 {
1225         unsigned long flags;
1226         __le16 key_flags = 0;
1227
1228         switch (keyconf->alg) {
1229         case ALG_CCMP:
1230                 key_flags |= STA_KEY_FLG_CCMP;
1231                 key_flags |= cpu_to_le16(
1232                                 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1233                 key_flags &= ~STA_KEY_FLG_INVALID;
1234                 break;
1235         case ALG_TKIP:
1236         case ALG_WEP:
1237         default:
1238                 return -EINVAL;
1239         }
1240         spin_lock_irqsave(&priv->sta_lock, flags);
1241         priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
1242         priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
1243         memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
1244                keyconf->keylen);
1245
1246         memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
1247                keyconf->keylen);
1248         priv->stations_39[sta_id].sta.key.key_flags = key_flags;
1249         priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1250         priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1251
1252         spin_unlock_irqrestore(&priv->sta_lock, flags);
1253
1254         IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1255         iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
1256         return 0;
1257 }
1258
1259 static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1260 {
1261         unsigned long flags;
1262
1263         spin_lock_irqsave(&priv->sta_lock, flags);
1264         memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
1265         memset(&priv->stations_39[sta_id].sta.key, 0,
1266                 sizeof(struct iwl4965_keyinfo));
1267         priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1268         priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1269         priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1270         spin_unlock_irqrestore(&priv->sta_lock, flags);
1271
1272         IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1273         iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
1274         return 0;
1275 }
1276
1277 static void iwl3945_clear_free_frames(struct iwl_priv *priv)
1278 {
1279         struct list_head *element;
1280
1281         IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1282                        priv->frames_count);
1283
1284         while (!list_empty(&priv->free_frames)) {
1285                 element = priv->free_frames.next;
1286                 list_del(element);
1287                 kfree(list_entry(element, struct iwl3945_frame, list));
1288                 priv->frames_count--;
1289         }
1290
1291         if (priv->frames_count) {
1292                 IWL_WARNING("%d frames still in use.  Did we lose one?\n",
1293                             priv->frames_count);
1294                 priv->frames_count = 0;
1295         }
1296 }
1297
1298 static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
1299 {
1300         struct iwl3945_frame *frame;
1301         struct list_head *element;
1302         if (list_empty(&priv->free_frames)) {
1303                 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1304                 if (!frame) {
1305                         IWL_ERROR("Could not allocate frame!\n");
1306                         return NULL;
1307                 }
1308
1309                 priv->frames_count++;
1310                 return frame;
1311         }
1312
1313         element = priv->free_frames.next;
1314         list_del(element);
1315         return list_entry(element, struct iwl3945_frame, list);
1316 }
1317
1318 static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
1319 {
1320         memset(frame, 0, sizeof(*frame));
1321         list_add(&frame->list, &priv->free_frames);
1322 }
1323
1324 unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
1325                                 struct ieee80211_hdr *hdr,
1326                                 int left)
1327 {
1328
1329         if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
1330             ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1331              (priv->iw_mode != NL80211_IFTYPE_AP)))
1332                 return 0;
1333
1334         if (priv->ibss_beacon->len > left)
1335                 return 0;
1336
1337         memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1338
1339         return priv->ibss_beacon->len;
1340 }
1341
1342 static u8 iwl3945_rate_get_lowest_plcp(struct iwl_priv *priv)
1343 {
1344         u8 i;
1345         int rate_mask;
1346
1347         /* Set rate mask*/
1348         if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
1349                 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
1350         else
1351                 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
1352
1353         for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1354              i = iwl3945_rates[i].next_ieee) {
1355                 if (rate_mask & (1 << i))
1356                         return iwl3945_rates[i].plcp;
1357         }
1358
1359         /* No valid rate was found. Assign the lowest one */
1360         if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
1361                 return IWL_RATE_1M_PLCP;
1362         else
1363                 return IWL_RATE_6M_PLCP;
1364 }
1365
1366 static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
1367 {
1368         struct iwl3945_frame *frame;
1369         unsigned int frame_size;
1370         int rc;
1371         u8 rate;
1372
1373         frame = iwl3945_get_free_frame(priv);
1374
1375         if (!frame) {
1376                 IWL_ERROR("Could not obtain free frame buffer for beacon "
1377                           "command.\n");
1378                 return -ENOMEM;
1379         }
1380
1381         rate = iwl3945_rate_get_lowest_plcp(priv);
1382
1383         frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
1384
1385         rc = iwl3945_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1386                               &frame->u.cmd[0]);
1387
1388         iwl3945_free_frame(priv, frame);
1389
1390         return rc;
1391 }
1392
1393 /******************************************************************************
1394  *
1395  * EEPROM related functions
1396  *
1397  ******************************************************************************/
1398
1399 static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1400 {
1401         memcpy(mac, priv->eeprom39.mac_address, 6);
1402 }
1403
1404 /*
1405  * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1406  * embedded controller) as EEPROM reader; each read is a series of pulses
1407  * to/from the EEPROM chip, not a single event, so even reads could conflict
1408  * if they weren't arbitrated by some ownership mechanism.  Here, the driver
1409  * simply claims ownership, which should be safe when this function is called
1410  * (i.e. before loading uCode!).
1411  */
1412 static inline int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
1413 {
1414         _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
1415         return 0;
1416 }
1417
1418 /**
1419  * iwl3945_eeprom_init - read EEPROM contents
1420  *
1421  * Load the EEPROM contents from adapter into priv->eeprom39
1422  *
1423  * NOTE:  This routine uses the non-debug IO access functions.
1424  */
1425 int iwl3945_eeprom_init(struct iwl_priv *priv)
1426 {
1427         u16 *e = (u16 *)&priv->eeprom39;
1428         u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1429         int sz = sizeof(priv->eeprom39);
1430         int ret;
1431         u16 addr;
1432
1433         /* The EEPROM structure has several padding buffers within it
1434          * and when adding new EEPROM maps is subject to programmer errors
1435          * which may be very difficult to identify without explicitly
1436          * checking the resulting size of the eeprom map. */
1437         BUILD_BUG_ON(sizeof(priv->eeprom39) != IWL_EEPROM_IMAGE_SIZE);
1438
1439         if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1440                 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
1441                 return -ENOENT;
1442         }
1443
1444         /* Make sure driver (instead of uCode) is allowed to read EEPROM */
1445         ret = iwl3945_eeprom_acquire_semaphore(priv);
1446         if (ret < 0) {
1447                 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
1448                 return -ENOENT;
1449         }
1450
1451         /* eeprom is an array of 16bit values */
1452         for (addr = 0; addr < sz; addr += sizeof(u16)) {
1453                 u32 r;
1454
1455                 _iwl_write32(priv, CSR_EEPROM_REG,
1456                                  CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
1457                 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1458                 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
1459                                               CSR_EEPROM_REG_READ_VALID_MSK,
1460                                               IWL_EEPROM_ACCESS_TIMEOUT);
1461                 if (ret < 0) {
1462                         IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
1463                         return ret;
1464                 }
1465
1466                 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
1467                 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1468         }
1469
1470         return 0;
1471 }
1472
1473 static void iwl3945_unset_hw_params(struct iwl_priv *priv)
1474 {
1475         if (priv->shared_virt)
1476                 pci_free_consistent(priv->pci_dev,
1477                                     sizeof(struct iwl3945_shared),
1478                                     priv->shared_virt,
1479                                     priv->shared_phys);
1480 }
1481
1482 /**
1483  * iwl3945_supported_rate_to_ie - fill in the supported rate in IE field
1484  *
1485  * return : set the bit for each supported rate insert in ie
1486  */
1487 static u16 iwl3945_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1488                                     u16 basic_rate, int *left)
1489 {
1490         u16 ret_rates = 0, bit;
1491         int i;
1492         u8 *cnt = ie;
1493         u8 *rates = ie + 1;
1494
1495         for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1496                 if (bit & supported_rate) {
1497                         ret_rates |= bit;
1498                         rates[*cnt] = iwl3945_rates[i].ieee |
1499                                 ((bit & basic_rate) ? 0x80 : 0x00);
1500                         (*cnt)++;
1501                         (*left)--;
1502                         if ((*left <= 0) ||
1503                             (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
1504                                 break;
1505                 }
1506         }
1507
1508         return ret_rates;
1509 }
1510
1511 /**
1512  * iwl3945_fill_probe_req - fill in all required fields and IE for probe request
1513  */
1514 static u16 iwl3945_fill_probe_req(struct iwl_priv *priv,
1515                               struct ieee80211_mgmt *frame,
1516                               int left)
1517 {
1518         int len = 0;
1519         u8 *pos = NULL;
1520         u16 active_rates, ret_rates, cck_rates;
1521
1522         /* Make sure there is enough space for the probe request,
1523          * two mandatory IEs and the data */
1524         left -= 24;
1525         if (left < 0)
1526                 return 0;
1527         len += 24;
1528
1529         frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1530         memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
1531         memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1532         memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
1533         frame->seq_ctrl = 0;
1534
1535         /* fill in our indirect SSID IE */
1536         /* ...next IE... */
1537
1538         left -= 2;
1539         if (left < 0)
1540                 return 0;
1541         len += 2;
1542         pos = &(frame->u.probe_req.variable[0]);
1543         *pos++ = WLAN_EID_SSID;
1544         *pos++ = 0;
1545
1546         /* fill in supported rate */
1547         /* ...next IE... */
1548         left -= 2;
1549         if (left < 0)
1550                 return 0;
1551
1552         /* ... fill it in... */
1553         *pos++ = WLAN_EID_SUPP_RATES;
1554         *pos = 0;
1555
1556         priv->active_rate = priv->rates_mask;
1557         active_rates = priv->active_rate;
1558         priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1559
1560         cck_rates = IWL_CCK_RATES_MASK & active_rates;
1561         ret_rates = iwl3945_supported_rate_to_ie(pos, cck_rates,
1562                         priv->active_rate_basic, &left);
1563         active_rates &= ~ret_rates;
1564
1565         ret_rates = iwl3945_supported_rate_to_ie(pos, active_rates,
1566                                  priv->active_rate_basic, &left);
1567         active_rates &= ~ret_rates;
1568
1569         len += 2 + *pos;
1570         pos += (*pos) + 1;
1571         if (active_rates == 0)
1572                 goto fill_end;
1573
1574         /* fill in supported extended rate */
1575         /* ...next IE... */
1576         left -= 2;
1577         if (left < 0)
1578                 return 0;
1579         /* ... fill it in... */
1580         *pos++ = WLAN_EID_EXT_SUPP_RATES;
1581         *pos = 0;
1582         iwl3945_supported_rate_to_ie(pos, active_rates,
1583                                  priv->active_rate_basic, &left);
1584         if (*pos > 0)
1585                 len += 2 + *pos;
1586
1587  fill_end:
1588         return (u16)len;
1589 }
1590
1591 /*
1592  * QoS  support
1593 */
1594 static int iwl3945_send_qos_params_command(struct iwl_priv *priv,
1595                                        struct iwl_qosparam_cmd *qos)
1596 {
1597
1598         return iwl3945_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1599                                 sizeof(struct iwl_qosparam_cmd), qos);
1600 }
1601
1602 static void iwl3945_reset_qos(struct iwl_priv *priv)
1603 {
1604         u16 cw_min = 15;
1605         u16 cw_max = 1023;
1606         u8 aifs = 2;
1607         u8 is_legacy = 0;
1608         unsigned long flags;
1609         int i;
1610
1611         spin_lock_irqsave(&priv->lock, flags);
1612         priv->qos_data.qos_active = 0;
1613
1614         /* QoS always active in AP and ADHOC mode
1615          * In STA mode wait for association
1616          */
1617         if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
1618             priv->iw_mode == NL80211_IFTYPE_AP)
1619                 priv->qos_data.qos_active = 1;
1620         else
1621                 priv->qos_data.qos_active = 0;
1622
1623
1624         /* check for legacy mode */
1625         if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
1626              (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
1627             (priv->iw_mode == NL80211_IFTYPE_STATION &&
1628              (priv->staging39_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
1629                 cw_min = 31;
1630                 is_legacy = 1;
1631         }
1632
1633         if (priv->qos_data.qos_active)
1634                 aifs = 3;
1635
1636         priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1637         priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1638         priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1639         priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1640         priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1641
1642         if (priv->qos_data.qos_active) {
1643                 i = 1;
1644                 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1645                 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1646                 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1647                 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1648                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1649
1650                 i = 2;
1651                 priv->qos_data.def_qos_parm.ac[i].cw_min =
1652                         cpu_to_le16((cw_min + 1) / 2 - 1);
1653                 priv->qos_data.def_qos_parm.ac[i].cw_max =
1654                         cpu_to_le16(cw_max);
1655                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1656                 if (is_legacy)
1657                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1658                                 cpu_to_le16(6016);
1659                 else
1660                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1661                                 cpu_to_le16(3008);
1662                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1663
1664                 i = 3;
1665                 priv->qos_data.def_qos_parm.ac[i].cw_min =
1666                         cpu_to_le16((cw_min + 1) / 4 - 1);
1667                 priv->qos_data.def_qos_parm.ac[i].cw_max =
1668                         cpu_to_le16((cw_max + 1) / 2 - 1);
1669                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1670                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1671                 if (is_legacy)
1672                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1673                                 cpu_to_le16(3264);
1674                 else
1675                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1676                                 cpu_to_le16(1504);
1677         } else {
1678                 for (i = 1; i < 4; i++) {
1679                         priv->qos_data.def_qos_parm.ac[i].cw_min =
1680                                 cpu_to_le16(cw_min);
1681                         priv->qos_data.def_qos_parm.ac[i].cw_max =
1682                                 cpu_to_le16(cw_max);
1683                         priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
1684                         priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1685                         priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1686                 }
1687         }
1688         IWL_DEBUG_QOS("set QoS to default \n");
1689
1690         spin_unlock_irqrestore(&priv->lock, flags);
1691 }
1692
1693 static void iwl3945_activate_qos(struct iwl_priv *priv, u8 force)
1694 {
1695         unsigned long flags;
1696
1697         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1698                 return;
1699
1700         spin_lock_irqsave(&priv->lock, flags);
1701         priv->qos_data.def_qos_parm.qos_flags = 0;
1702
1703         if (priv->qos_data.qos_cap.q_AP.queue_request &&
1704             !priv->qos_data.qos_cap.q_AP.txop_request)
1705                 priv->qos_data.def_qos_parm.qos_flags |=
1706                         QOS_PARAM_FLG_TXOP_TYPE_MSK;
1707
1708         if (priv->qos_data.qos_active)
1709                 priv->qos_data.def_qos_parm.qos_flags |=
1710                         QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1711
1712         spin_unlock_irqrestore(&priv->lock, flags);
1713
1714         if (force || iwl3945_is_associated(priv)) {
1715                 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
1716                               priv->qos_data.qos_active);
1717
1718                 iwl3945_send_qos_params_command(priv,
1719                                 &(priv->qos_data.def_qos_parm));
1720         }
1721 }
1722
1723 /*
1724  * Power management (not Tx power!) functions
1725  */
1726 #define MSEC_TO_USEC 1024
1727
1728
1729 #define NOSLP __constant_cpu_to_le16(0), 0, 0
1730 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
1731 #define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1732 #define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1733                                      __constant_cpu_to_le32(X1), \
1734                                      __constant_cpu_to_le32(X2), \
1735                                      __constant_cpu_to_le32(X3), \
1736                                      __constant_cpu_to_le32(X4)}
1737
1738 /* default power management (not Tx power) table values */
1739 /* for TIM  0-10 */
1740 static struct iwl_power_vec_entry range_0[IWL39_POWER_AC] = {
1741         {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1742         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1743         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1744         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1745         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1746         {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1747 };
1748
1749 /* for TIM > 10 */
1750 static struct iwl_power_vec_entry range_1[IWL39_POWER_AC] = {
1751         {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1752         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1753                  SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1754         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1755                  SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1756         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1757                  SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1758         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1759         {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1760                  SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1761 };
1762
1763 int iwl3945_power_init_handle(struct iwl_priv *priv)
1764 {
1765         int rc = 0, i;
1766         struct iwl3945_power_mgr *pow_data;
1767         int size = sizeof(struct iwl_power_vec_entry) * IWL39_POWER_AC;
1768         u16 pci_pm;
1769
1770         IWL_DEBUG_POWER("Initialize power \n");
1771
1772         pow_data = &(priv->power_data_39);
1773
1774         memset(pow_data, 0, sizeof(*pow_data));
1775
1776         pow_data->active_index = IWL_POWER_RANGE_0;
1777         pow_data->dtim_val = 0xffff;
1778
1779         memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1780         memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1781
1782         rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1783         if (rc != 0)
1784                 return 0;
1785         else {
1786                 struct iwl_powertable_cmd *cmd;
1787
1788                 IWL_DEBUG_POWER("adjust power command flags\n");
1789
1790                 for (i = 0; i < IWL39_POWER_AC; i++) {
1791                         cmd = &pow_data->pwr_range_0[i].cmd;
1792
1793                         if (pci_pm & 0x1)
1794                                 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1795                         else
1796                                 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1797                 }
1798         }
1799         return rc;
1800 }
1801
1802 static int iwl3945_update_power_cmd(struct iwl_priv *priv,
1803                                 struct iwl_powertable_cmd *cmd, u32 mode)
1804 {
1805         int rc = 0, i;
1806         u8 skip;
1807         u32 max_sleep = 0;
1808         struct iwl_power_vec_entry *range;
1809         u8 period = 0;
1810         struct iwl3945_power_mgr *pow_data;
1811
1812         if (mode > IWL_POWER_INDEX_5) {
1813                 IWL_DEBUG_POWER("Error invalid power mode \n");
1814                 return -1;
1815         }
1816         pow_data = &(priv->power_data_39);
1817
1818         if (pow_data->active_index == IWL_POWER_RANGE_0)
1819                 range = &pow_data->pwr_range_0[0];
1820         else
1821                 range = &pow_data->pwr_range_1[1];
1822
1823         memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
1824
1825 #ifdef IWL_MAC80211_DISABLE
1826         if (priv->assoc_network != NULL) {
1827                 unsigned long flags;
1828
1829                 period = priv->assoc_network->tim.tim_period;
1830         }
1831 #endif  /*IWL_MAC80211_DISABLE */
1832         skip = range[mode].no_dtim;
1833
1834         if (period == 0) {
1835                 period = 1;
1836                 skip = 0;
1837         }
1838
1839         if (skip == 0) {
1840                 max_sleep = period;
1841                 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1842         } else {
1843                 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1844                 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1845                 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1846         }
1847
1848         for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1849                 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1850                         cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1851         }
1852
1853         IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1854         IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1855         IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1856         IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1857                         le32_to_cpu(cmd->sleep_interval[0]),
1858                         le32_to_cpu(cmd->sleep_interval[1]),
1859                         le32_to_cpu(cmd->sleep_interval[2]),
1860                         le32_to_cpu(cmd->sleep_interval[3]),
1861                         le32_to_cpu(cmd->sleep_interval[4]));
1862
1863         return rc;
1864 }
1865
1866 static int iwl3945_send_power_mode(struct iwl_priv *priv, u32 mode)
1867 {
1868         u32 uninitialized_var(final_mode);
1869         int rc;
1870         struct iwl_powertable_cmd cmd;
1871
1872         /* If on battery, set to 3,
1873          * if plugged into AC power, set to CAM ("continuously aware mode"),
1874          * else user level */
1875         switch (mode) {
1876         case IWL39_POWER_BATTERY:
1877                 final_mode = IWL_POWER_INDEX_3;
1878                 break;
1879         case IWL39_POWER_AC:
1880                 final_mode = IWL_POWER_MODE_CAM;
1881                 break;
1882         default:
1883                 final_mode = mode;
1884                 break;
1885         }
1886
1887         iwl3945_update_power_cmd(priv, &cmd, final_mode);
1888
1889         /* FIXME use get_hcmd_size 3945 command is 4 bytes shorter */
1890         rc = iwl3945_send_cmd_pdu(priv, POWER_TABLE_CMD,
1891                                 sizeof(struct iwl3945_powertable_cmd), &cmd);
1892
1893         if (final_mode == IWL_POWER_MODE_CAM)
1894                 clear_bit(STATUS_POWER_PMI, &priv->status);
1895         else
1896                 set_bit(STATUS_POWER_PMI, &priv->status);
1897
1898         return rc;
1899 }
1900
1901 /**
1902  * iwl3945_scan_cancel - Cancel any currently executing HW scan
1903  *
1904  * NOTE: priv->mutex is not required before calling this function
1905  */
1906 static int iwl3945_scan_cancel(struct iwl_priv *priv)
1907 {
1908         if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1909                 clear_bit(STATUS_SCANNING, &priv->status);
1910                 return 0;
1911         }
1912
1913         if (test_bit(STATUS_SCANNING, &priv->status)) {
1914                 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1915                         IWL_DEBUG_SCAN("Queuing scan abort.\n");
1916                         set_bit(STATUS_SCAN_ABORTING, &priv->status);
1917                         queue_work(priv->workqueue, &priv->abort_scan);
1918
1919                 } else
1920                         IWL_DEBUG_SCAN("Scan abort already in progress.\n");
1921
1922                 return test_bit(STATUS_SCANNING, &priv->status);
1923         }
1924
1925         return 0;
1926 }
1927
1928 /**
1929  * iwl3945_scan_cancel_timeout - Cancel any currently executing HW scan
1930  * @ms: amount of time to wait (in milliseconds) for scan to abort
1931  *
1932  * NOTE: priv->mutex must be held before calling this function
1933  */
1934 static int iwl3945_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
1935 {
1936         unsigned long now = jiffies;
1937         int ret;
1938
1939         ret = iwl3945_scan_cancel(priv);
1940         if (ret && ms) {
1941                 mutex_unlock(&priv->mutex);
1942                 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
1943                                 test_bit(STATUS_SCANNING, &priv->status))
1944                         msleep(1);
1945                 mutex_lock(&priv->mutex);
1946
1947                 return test_bit(STATUS_SCANNING, &priv->status);
1948         }
1949
1950         return ret;
1951 }
1952
1953 #define MAX_UCODE_BEACON_INTERVAL       1024
1954 #define INTEL_CONN_LISTEN_INTERVAL      __constant_cpu_to_le16(0xA)
1955
1956 static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
1957 {
1958         u16 new_val = 0;
1959         u16 beacon_factor = 0;
1960
1961         beacon_factor =
1962             (beacon_val + MAX_UCODE_BEACON_INTERVAL)
1963                 / MAX_UCODE_BEACON_INTERVAL;
1964         new_val = beacon_val / beacon_factor;
1965
1966         return cpu_to_le16(new_val);
1967 }
1968
1969 static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
1970 {
1971         u64 interval_tm_unit;
1972         u64 tsf, result;
1973         unsigned long flags;
1974         struct ieee80211_conf *conf = NULL;
1975         u16 beacon_int = 0;
1976
1977         conf = ieee80211_get_hw_conf(priv->hw);
1978
1979         spin_lock_irqsave(&priv->lock, flags);
1980         priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
1981         priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
1982
1983         tsf = priv->timestamp;
1984
1985         beacon_int = priv->beacon_int;
1986         spin_unlock_irqrestore(&priv->lock, flags);
1987
1988         if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1989                 if (beacon_int == 0) {
1990                         priv->rxon_timing.beacon_interval = cpu_to_le16(100);
1991                         priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
1992                 } else {
1993                         priv->rxon_timing.beacon_interval =
1994                                 cpu_to_le16(beacon_int);
1995                         priv->rxon_timing.beacon_interval =
1996                             iwl3945_adjust_beacon_interval(
1997                                 le16_to_cpu(priv->rxon_timing.beacon_interval));
1998                 }
1999
2000                 priv->rxon_timing.atim_window = 0;
2001         } else {
2002                 priv->rxon_timing.beacon_interval =
2003                         iwl3945_adjust_beacon_interval(conf->beacon_int);
2004                 /* TODO: we need to get atim_window from upper stack
2005                  * for now we set to 0 */
2006                 priv->rxon_timing.atim_window = 0;
2007         }
2008
2009         interval_tm_unit =
2010                 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2011         result = do_div(tsf, interval_tm_unit);
2012         priv->rxon_timing.beacon_init_val =
2013             cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2014
2015         IWL_DEBUG_ASSOC
2016             ("beacon interval %d beacon timer %d beacon tim %d\n",
2017                 le16_to_cpu(priv->rxon_timing.beacon_interval),
2018                 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2019                 le16_to_cpu(priv->rxon_timing.atim_window));
2020 }
2021
2022 static int iwl3945_scan_initiate(struct iwl_priv *priv)
2023 {
2024         if (!iwl3945_is_ready_rf(priv)) {
2025                 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2026                 return -EIO;
2027         }
2028
2029         if (test_bit(STATUS_SCANNING, &priv->status)) {
2030                 IWL_DEBUG_SCAN("Scan already in progress.\n");
2031                 return -EAGAIN;
2032         }
2033
2034         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2035                 IWL_DEBUG_SCAN("Scan request while abort pending.  "
2036                                "Queuing.\n");
2037                 return -EAGAIN;
2038         }
2039
2040         IWL_DEBUG_INFO("Starting scan...\n");
2041         if (priv->cfg->sku & IWL_SKU_G)
2042                 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
2043         if (priv->cfg->sku & IWL_SKU_A)
2044                 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
2045         set_bit(STATUS_SCANNING, &priv->status);
2046         priv->scan_start = jiffies;
2047         priv->scan_pass_start = priv->scan_start;
2048
2049         queue_work(priv->workqueue, &priv->request_scan);
2050
2051         return 0;
2052 }
2053
2054 static int iwl3945_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2055 {
2056         struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
2057
2058         if (hw_decrypt)
2059                 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2060         else
2061                 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2062
2063         return 0;
2064 }
2065
2066 static void iwl3945_set_flags_for_phymode(struct iwl_priv *priv,
2067                                           enum ieee80211_band band)
2068 {
2069         if (band == IEEE80211_BAND_5GHZ) {
2070                 priv->staging39_rxon.flags &=
2071                     ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2072                       | RXON_FLG_CCK_MSK);
2073                 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2074         } else {
2075                 /* Copied from iwl3945_bg_post_associate() */
2076                 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2077                         priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2078                 else
2079                         priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2080
2081                 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2082                         priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2083
2084                 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2085                 priv->staging39_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2086                 priv->staging39_rxon.flags &= ~RXON_FLG_CCK_MSK;
2087         }
2088 }
2089
2090 /*
2091  * initialize rxon structure with default values from eeprom
2092  */
2093 static void iwl3945_connection_init_rx_config(struct iwl_priv *priv,
2094                                               int mode)
2095 {
2096         const struct iwl_channel_info *ch_info;
2097
2098         memset(&priv->staging39_rxon, 0, sizeof(priv->staging39_rxon));
2099
2100         switch (mode) {
2101         case NL80211_IFTYPE_AP:
2102                 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_AP;
2103                 break;
2104
2105         case NL80211_IFTYPE_STATION:
2106                 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_ESS;
2107                 priv->staging39_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2108                 break;
2109
2110         case NL80211_IFTYPE_ADHOC:
2111                 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2112                 priv->staging39_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2113                 priv->staging39_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2114                                                   RXON_FILTER_ACCEPT_GRP_MSK;
2115                 break;
2116
2117         case NL80211_IFTYPE_MONITOR:
2118                 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2119                 priv->staging39_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2120                     RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2121                 break;
2122         default:
2123                 IWL_ERROR("Unsupported interface type %d\n", mode);
2124                 break;
2125         }
2126
2127 #if 0
2128         /* TODO:  Figure out when short_preamble would be set and cache from
2129          * that */
2130         if (!hw_to_local(priv->hw)->short_preamble)
2131                 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2132         else
2133                 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2134 #endif
2135
2136         ch_info = iwl3945_get_channel_info(priv, priv->band,
2137                                        le16_to_cpu(priv->active39_rxon.channel));
2138
2139         if (!ch_info)
2140                 ch_info = &priv->channel_info[0];
2141
2142         /*
2143          * in some case A channels are all non IBSS
2144          * in this case force B/G channel
2145          */
2146         if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
2147                 ch_info = &priv->channel_info[0];
2148
2149         priv->staging39_rxon.channel = cpu_to_le16(ch_info->channel);
2150         if (is_channel_a_band(ch_info))
2151                 priv->band = IEEE80211_BAND_5GHZ;
2152         else
2153                 priv->band = IEEE80211_BAND_2GHZ;
2154
2155         iwl3945_set_flags_for_phymode(priv, priv->band);
2156
2157         priv->staging39_rxon.ofdm_basic_rates =
2158             (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2159         priv->staging39_rxon.cck_basic_rates =
2160             (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2161 }
2162
2163 static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
2164 {
2165         if (mode == NL80211_IFTYPE_ADHOC) {
2166                 const struct iwl_channel_info *ch_info;
2167
2168                 ch_info = iwl3945_get_channel_info(priv,
2169                         priv->band,
2170                         le16_to_cpu(priv->staging39_rxon.channel));
2171
2172                 if (!ch_info || !is_channel_ibss(ch_info)) {
2173                         IWL_ERROR("channel %d not IBSS channel\n",
2174                                   le16_to_cpu(priv->staging39_rxon.channel));
2175                         return -EINVAL;
2176                 }
2177         }
2178
2179         iwl3945_connection_init_rx_config(priv, mode);
2180         memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2181
2182         iwl3945_clear_stations_table(priv);
2183
2184         /* don't commit rxon if rf-kill is on*/
2185         if (!iwl3945_is_ready_rf(priv))
2186                 return -EAGAIN;
2187
2188         cancel_delayed_work(&priv->scan_check);
2189         if (iwl3945_scan_cancel_timeout(priv, 100)) {
2190                 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2191                 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2192                 return -EAGAIN;
2193         }
2194
2195         iwl3945_commit_rxon(priv);
2196
2197         return 0;
2198 }
2199
2200 static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2201                                       struct ieee80211_tx_info *info,
2202                                       struct iwl3945_cmd *cmd,
2203                                       struct sk_buff *skb_frag,
2204                                       int last_frag)
2205 {
2206         struct iwl3945_hw_key *keyinfo =
2207             &priv->stations_39[info->control.hw_key->hw_key_idx].keyinfo;
2208
2209         switch (keyinfo->alg) {
2210         case ALG_CCMP:
2211                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2212                 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2213                 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
2214                 break;
2215
2216         case ALG_TKIP:
2217 #if 0
2218                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2219
2220                 if (last_frag)
2221                         memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2222                                8);
2223                 else
2224                         memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2225 #endif
2226                 break;
2227
2228         case ALG_WEP:
2229                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2230                     (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2231
2232                 if (keyinfo->keylen == 13)
2233                         cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2234
2235                 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2236
2237                 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2238                              "with key %d\n", info->control.hw_key->hw_key_idx);
2239                 break;
2240
2241         default:
2242                 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2243                 break;
2244         }
2245 }
2246
2247 /*
2248  * handle build REPLY_TX command notification.
2249  */
2250 static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
2251                                   struct iwl3945_cmd *cmd,
2252                                   struct ieee80211_tx_info *info,
2253                                   struct ieee80211_hdr *hdr,
2254                                   int is_unicast, u8 std_id)
2255 {
2256         __le16 fc = hdr->frame_control;
2257         __le32 tx_flags = cmd->cmd.tx.tx_flags;
2258         u8 rc_flags = info->control.rates[0].flags;
2259
2260         cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2261         if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
2262                 tx_flags |= TX_CMD_FLG_ACK_MSK;
2263                 if (ieee80211_is_mgmt(fc))
2264                         tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2265                 if (ieee80211_is_probe_resp(fc) &&
2266                     !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2267                         tx_flags |= TX_CMD_FLG_TSF_MSK;
2268         } else {
2269                 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2270                 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2271         }
2272
2273         cmd->cmd.tx.sta_id = std_id;
2274         if (ieee80211_has_morefrags(fc))
2275                 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2276
2277         if (ieee80211_is_data_qos(fc)) {
2278                 u8 *qc = ieee80211_get_qos_ctl(hdr);
2279                 cmd->cmd.tx.tid_tspec = qc[0] & 0xf;
2280                 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2281         } else {
2282                 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2283         }
2284
2285         if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2286                 tx_flags |= TX_CMD_FLG_RTS_MSK;
2287                 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2288         } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
2289                 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2290                 tx_flags |= TX_CMD_FLG_CTS_MSK;
2291         }
2292
2293         if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2294                 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2295
2296         tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2297         if (ieee80211_is_mgmt(fc)) {
2298                 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
2299                         cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2300                 else
2301                         cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
2302         } else {
2303                 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2304 #ifdef CONFIG_IWL3945_LEDS
2305                 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
2306 #endif
2307         }
2308
2309         cmd->cmd.tx.driver_txop = 0;
2310         cmd->cmd.tx.tx_flags = tx_flags;
2311         cmd->cmd.tx.next_frame_len = 0;
2312 }
2313
2314 /**
2315  * iwl3945_get_sta_id - Find station's index within station table
2316  */
2317 static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2318 {
2319         int sta_id;
2320         u16 fc = le16_to_cpu(hdr->frame_control);
2321
2322         /* If this frame is broadcast or management, use broadcast station id */
2323         if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2324             is_multicast_ether_addr(hdr->addr1))
2325                 return priv->hw_params.bcast_sta_id;
2326
2327         switch (priv->iw_mode) {
2328
2329         /* If we are a client station in a BSS network, use the special
2330          * AP station entry (that's the only station we communicate with) */
2331         case NL80211_IFTYPE_STATION:
2332                 return IWL_AP_ID;
2333
2334         /* If we are an AP, then find the station, or use BCAST */
2335         case NL80211_IFTYPE_AP:
2336                 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
2337                 if (sta_id != IWL_INVALID_STATION)
2338                         return sta_id;
2339                 return priv->hw_params.bcast_sta_id;
2340
2341         /* If this frame is going out to an IBSS network, find the station,
2342          * or create a new station table entry */
2343         case NL80211_IFTYPE_ADHOC: {
2344                 /* Create new station table entry */
2345                 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
2346                 if (sta_id != IWL_INVALID_STATION)
2347                         return sta_id;
2348
2349                 sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2350
2351                 if (sta_id != IWL_INVALID_STATION)
2352                         return sta_id;
2353
2354                 IWL_DEBUG_DROP("Station %pM not in station map. "
2355                                "Defaulting to broadcast...\n",
2356                                hdr->addr1);
2357                 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2358                 return priv->hw_params.bcast_sta_id;
2359         }
2360         /* If we are in monitor mode, use BCAST. This is required for
2361          * packet injection. */
2362         case NL80211_IFTYPE_MONITOR:
2363                 return priv->hw_params.bcast_sta_id;
2364
2365         default:
2366                 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
2367                 return priv->hw_params.bcast_sta_id;
2368         }
2369 }
2370
2371 /*
2372  * start REPLY_TX command process
2373  */
2374 static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2375 {
2376         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2377         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2378         struct iwl3945_tfd_frame *tfd;
2379         u32 *control_flags;
2380         int txq_id = skb_get_queue_mapping(skb);
2381         struct iwl3945_tx_queue *txq = NULL;
2382         struct iwl_queue *q = NULL;
2383         dma_addr_t phys_addr;
2384         dma_addr_t txcmd_phys;
2385         struct iwl3945_cmd *out_cmd = NULL;
2386         u16 len, idx, len_org, hdr_len;
2387         u8 id;
2388         u8 unicast;
2389         u8 sta_id;
2390         u8 tid = 0;
2391         u16 seq_number = 0;
2392         __le16 fc;
2393         u8 wait_write_ptr = 0;
2394         u8 *qc = NULL;
2395         unsigned long flags;
2396         int rc;
2397
2398         spin_lock_irqsave(&priv->lock, flags);
2399         if (iwl3945_is_rfkill(priv)) {
2400                 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2401                 goto drop_unlock;
2402         }
2403
2404         if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
2405                 IWL_ERROR("ERROR: No TX rate available.\n");
2406                 goto drop_unlock;
2407         }
2408
2409         unicast = !is_multicast_ether_addr(hdr->addr1);
2410         id = 0;
2411
2412         fc = hdr->frame_control;
2413
2414 #ifdef CONFIG_IWL3945_DEBUG
2415         if (ieee80211_is_auth(fc))
2416                 IWL_DEBUG_TX("Sending AUTH frame\n");
2417         else if (ieee80211_is_assoc_req(fc))
2418                 IWL_DEBUG_TX("Sending ASSOC frame\n");
2419         else if (ieee80211_is_reassoc_req(fc))
2420                 IWL_DEBUG_TX("Sending REASSOC frame\n");
2421 #endif
2422
2423         /* drop all data frame if we are not associated */
2424         if (ieee80211_is_data(fc) &&
2425             (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
2426             (!iwl3945_is_associated(priv) ||
2427              ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
2428                 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
2429                 goto drop_unlock;
2430         }
2431
2432         spin_unlock_irqrestore(&priv->lock, flags);
2433
2434         hdr_len = ieee80211_hdrlen(fc);
2435
2436         /* Find (or create) index into station table for destination station */
2437         sta_id = iwl3945_get_sta_id(priv, hdr);
2438         if (sta_id == IWL_INVALID_STATION) {
2439                 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
2440                                hdr->addr1);
2441                 goto drop;
2442         }
2443
2444         IWL_DEBUG_RATE("station Id %d\n", sta_id);
2445
2446         if (ieee80211_is_data_qos(fc)) {
2447                 qc = ieee80211_get_qos_ctl(hdr);
2448                 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
2449                 seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
2450                                 IEEE80211_SCTL_SEQ;
2451                 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2452                         (hdr->seq_ctrl &
2453                                 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2454                 seq_number += 0x10;
2455         }
2456
2457         /* Descriptor for chosen Tx queue */
2458         txq = &priv->txq39[txq_id];
2459         q = &txq->q;
2460
2461         spin_lock_irqsave(&priv->lock, flags);
2462
2463         /* Set up first empty TFD within this queue's circular TFD buffer */
2464         tfd = &txq->bd[q->write_ptr];
2465         memset(tfd, 0, sizeof(*tfd));
2466         control_flags = (u32 *) tfd;
2467         idx = get_cmd_index(q, q->write_ptr, 0);
2468
2469         /* Set up driver data for this TFD */
2470         memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info));
2471         txq->txb[q->write_ptr].skb[0] = skb;
2472
2473         /* Init first empty entry in queue's array of Tx/cmd buffers */
2474         out_cmd = &txq->cmd[idx];
2475         memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2476         memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2477
2478         /*
2479          * Set up the Tx-command (not MAC!) header.
2480          * Store the chosen Tx queue and TFD index within the sequence field;
2481          * after Tx, uCode's Tx response will return this value so driver can
2482          * locate the frame within the tx queue and do post-tx processing.
2483          */
2484         out_cmd->hdr.cmd = REPLY_TX;
2485         out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2486                                 INDEX_TO_SEQ(q->write_ptr)));
2487
2488         /* Copy MAC header from skb into command buffer */
2489         memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2490
2491         /*
2492          * Use the first empty entry in this queue's command buffer array
2493          * to contain the Tx command and MAC header concatenated together
2494          * (payload data will be in another buffer).
2495          * Size of this varies, due to varying MAC header length.
2496          * If end is not dword aligned, we'll have 2 extra bytes at the end
2497          * of the MAC header (device reads on dword boundaries).
2498          * We'll tell device about this padding later.
2499          */
2500         len = sizeof(struct iwl3945_tx_cmd) +
2501                         sizeof(struct iwl_cmd_header) + hdr_len;
2502
2503         len_org = len;
2504         len = (len + 3) & ~3;
2505
2506         if (len_org != len)
2507                 len_org = 1;
2508         else
2509                 len_org = 0;
2510
2511         /* Physical address of this Tx command's header (not MAC header!),
2512          * within command buffer array. */
2513         txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl3945_cmd) * idx +
2514                      offsetof(struct iwl3945_cmd, hdr);
2515
2516         /* Add buffer containing Tx command and MAC(!) header to TFD's
2517          * first entry */
2518         iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2519
2520         if (info->control.hw_key)
2521                 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
2522
2523         /* Set up TFD's 2nd entry to point directly to remainder of skb,
2524          * if any (802.11 null frames have no payload). */
2525         len = skb->len - hdr_len;
2526         if (len) {
2527                 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2528                                            len, PCI_DMA_TODEVICE);
2529                 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2530         }
2531
2532         if (!len)
2533                 /* If there is no payload, then we use only one Tx buffer */
2534                 *control_flags = TFD_CTL_COUNT_SET(1);
2535         else
2536                 /* Else use 2 buffers.
2537                  * Tell 3945 about any padding after MAC header */
2538                 *control_flags = TFD_CTL_COUNT_SET(2) |
2539                         TFD_CTL_PAD_SET(U32_PAD(len));
2540
2541         /* Total # bytes to be transmitted */
2542         len = (u16)skb->len;
2543         out_cmd->cmd.tx.len = cpu_to_le16(len);
2544
2545         /* TODO need this for burst mode later on */
2546         iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, unicast, sta_id);
2547
2548         /* set is_hcca to 0; it probably will never be implemented */
2549         iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
2550
2551         out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2552         out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2553
2554         if (!ieee80211_has_morefrags(hdr->frame_control)) {
2555                 txq->need_update = 1;
2556                 if (qc)
2557                         priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
2558         } else {
2559                 wait_write_ptr = 1;
2560                 txq->need_update = 0;
2561         }
2562
2563         iwl_print_hex_dump(priv, IWL_DL_TX, out_cmd->cmd.payload,
2564                            sizeof(out_cmd->cmd.tx));
2565
2566         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2567                            ieee80211_hdrlen(fc));
2568
2569         /* Tell device the write index *just past* this latest filled TFD */
2570         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
2571         rc = iwl3945_tx_queue_update_write_ptr(priv, txq);
2572         spin_unlock_irqrestore(&priv->lock, flags);
2573
2574         if (rc)
2575                 return rc;
2576
2577         if ((iwl_queue_space(q) < q->high_mark)
2578             && priv->mac80211_registered) {
2579                 if (wait_write_ptr) {
2580                         spin_lock_irqsave(&priv->lock, flags);
2581                         txq->need_update = 1;
2582                         iwl3945_tx_queue_update_write_ptr(priv, txq);
2583                         spin_unlock_irqrestore(&priv->lock, flags);
2584                 }
2585
2586                 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
2587         }
2588
2589         return 0;
2590
2591 drop_unlock:
2592         spin_unlock_irqrestore(&priv->lock, flags);
2593 drop:
2594         return -1;
2595 }
2596
2597 static void iwl3945_set_rate(struct iwl_priv *priv)
2598 {
2599         const struct ieee80211_supported_band *sband = NULL;
2600         struct ieee80211_rate *rate;
2601         int i;
2602
2603         sband = iwl3945_get_band(priv, priv->band);
2604         if (!sband) {
2605                 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2606                 return;
2607         }
2608
2609         priv->active_rate = 0;
2610         priv->active_rate_basic = 0;
2611
2612         IWL_DEBUG_RATE("Setting rates for %s GHz\n",
2613                        sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
2614
2615         for (i = 0; i < sband->n_bitrates; i++) {
2616                 rate = &sband->bitrates[i];
2617                 if ((rate->hw_value < IWL_RATE_COUNT) &&
2618                     !(rate->flags & IEEE80211_CHAN_DISABLED)) {
2619                         IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
2620                                        rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
2621                         priv->active_rate |= (1 << rate->hw_value);
2622                 }
2623         }
2624
2625         IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2626                        priv->active_rate, priv->active_rate_basic);
2627
2628         /*
2629          * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2630          * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2631          * OFDM
2632          */
2633         if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2634                 priv->staging39_rxon.cck_basic_rates =
2635                     ((priv->active_rate_basic &
2636                       IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2637         else
2638                 priv->staging39_rxon.cck_basic_rates =
2639                     (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2640
2641         if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2642                 priv->staging39_rxon.ofdm_basic_rates =
2643                     ((priv->active_rate_basic &
2644                       (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2645                       IWL_FIRST_OFDM_RATE) & 0xFF;
2646         else
2647                 priv->staging39_rxon.ofdm_basic_rates =
2648                    (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2649 }
2650
2651 static void iwl3945_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2652 {
2653         unsigned long flags;
2654
2655         if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2656                 return;
2657
2658         IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2659                           disable_radio ? "OFF" : "ON");
2660
2661         if (disable_radio) {
2662                 iwl3945_scan_cancel(priv);
2663                 /* FIXME: This is a workaround for AP */
2664                 if (priv->iw_mode != NL80211_IFTYPE_AP) {
2665                         spin_lock_irqsave(&priv->lock, flags);
2666                         iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2667                                     CSR_UCODE_SW_BIT_RFKILL);
2668                         spin_unlock_irqrestore(&priv->lock, flags);
2669                         iwl3945_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
2670                         set_bit(STATUS_RF_KILL_SW, &priv->status);
2671                 }
2672                 return;
2673         }
2674
2675         spin_lock_irqsave(&priv->lock, flags);
2676         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2677
2678         clear_bit(STATUS_RF_KILL_SW, &priv->status);
2679         spin_unlock_irqrestore(&priv->lock, flags);
2680
2681         /* wake up ucode */
2682         msleep(10);
2683
2684         spin_lock_irqsave(&priv->lock, flags);
2685         iwl_read32(priv, CSR_UCODE_DRV_GP1);
2686         if (!iwl_grab_nic_access(priv))
2687                 iwl_release_nic_access(priv);
2688         spin_unlock_irqrestore(&priv->lock, flags);
2689
2690         if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2691                 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2692                                   "disabled by HW switch\n");
2693                 return;
2694         }
2695
2696         if (priv->is_open)
2697                 queue_work(priv->workqueue, &priv->restart);
2698         return;
2699 }
2700
2701 void iwl3945_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
2702                             u32 decrypt_res, struct ieee80211_rx_status *stats)
2703 {
2704         u16 fc =
2705             le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2706
2707         if (priv->active39_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2708                 return;
2709
2710         if (!(fc & IEEE80211_FCTL_PROTECTED))
2711                 return;
2712
2713         IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2714         switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2715         case RX_RES_STATUS_SEC_TYPE_TKIP:
2716                 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2717                     RX_RES_STATUS_BAD_ICV_MIC)
2718                         stats->flag |= RX_FLAG_MMIC_ERROR;
2719         case RX_RES_STATUS_SEC_TYPE_WEP:
2720         case RX_RES_STATUS_SEC_TYPE_CCMP:
2721                 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2722                     RX_RES_STATUS_DECRYPT_OK) {
2723                         IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2724                         stats->flag |= RX_FLAG_DECRYPTED;
2725                 }
2726                 break;
2727
2728         default:
2729                 break;
2730         }
2731 }
2732
2733 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
2734
2735 #include "iwl-spectrum.h"
2736
2737 #define BEACON_TIME_MASK_LOW    0x00FFFFFF
2738 #define BEACON_TIME_MASK_HIGH   0xFF000000
2739 #define TIME_UNIT               1024
2740
2741 /*
2742  * extended beacon time format
2743  * time in usec will be changed into a 32-bit value in 8:24 format
2744  * the high 1 byte is the beacon counts
2745  * the lower 3 bytes is the time in usec within one beacon interval
2746  */
2747
2748 static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
2749 {
2750         u32 quot;
2751         u32 rem;
2752         u32 interval = beacon_interval * 1024;
2753
2754         if (!interval || !usec)
2755                 return 0;
2756
2757         quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2758         rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2759
2760         return (quot << 24) + rem;
2761 }
2762
2763 /* base is usually what we get from ucode with each received frame,
2764  * the same as HW timer counter counting down
2765  */
2766
2767 static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
2768 {
2769         u32 base_low = base & BEACON_TIME_MASK_LOW;
2770         u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2771         u32 interval = beacon_interval * TIME_UNIT;
2772         u32 res = (base & BEACON_TIME_MASK_HIGH) +
2773             (addon & BEACON_TIME_MASK_HIGH);
2774
2775         if (base_low > addon_low)
2776                 res += base_low - addon_low;
2777         else if (base_low < addon_low) {
2778                 res += interval + base_low - addon_low;
2779                 res += (1 << 24);
2780         } else
2781                 res += (1 << 24);
2782
2783         return cpu_to_le32(res);
2784 }
2785
2786 static int iwl3945_get_measurement(struct iwl_priv *priv,
2787                                struct ieee80211_measurement_params *params,
2788                                u8 type)
2789 {
2790         struct iwl_spectrum_cmd spectrum;
2791         struct iwl_rx_packet *res;
2792         struct iwl3945_host_cmd cmd = {
2793                 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2794                 .data = (void *)&spectrum,
2795                 .meta.flags = CMD_WANT_SKB,
2796         };
2797         u32 add_time = le64_to_cpu(params->start_time);
2798         int rc;
2799         int spectrum_resp_status;
2800         int duration = le16_to_cpu(params->duration);
2801
2802         if (iwl3945_is_associated(priv))
2803                 add_time =
2804                     iwl3945_usecs_to_beacons(
2805                         le64_to_cpu(params->start_time) - priv->last_tsf,
2806                         le16_to_cpu(priv->rxon_timing.beacon_interval));
2807
2808         memset(&spectrum, 0, sizeof(spectrum));
2809
2810         spectrum.channel_count = cpu_to_le16(1);
2811         spectrum.flags =
2812             RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2813         spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2814         cmd.len = sizeof(spectrum);
2815         spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2816
2817         if (iwl3945_is_associated(priv))
2818                 spectrum.start_time =
2819                     iwl3945_add_beacon_time(priv->last_beacon_time,
2820                                 add_time,
2821                                 le16_to_cpu(priv->rxon_timing.beacon_interval));
2822         else
2823                 spectrum.start_time = 0;
2824
2825         spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2826         spectrum.channels[0].channel = params->channel;
2827         spectrum.channels[0].type = type;
2828         if (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK)
2829                 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2830                     RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2831
2832         rc = iwl3945_send_cmd_sync(priv, &cmd);
2833         if (rc)
2834                 return rc;
2835
2836         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
2837         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2838                 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
2839                 rc = -EIO;
2840         }
2841
2842         spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2843         switch (spectrum_resp_status) {
2844         case 0:         /* Command will be handled */
2845                 if (res->u.spectrum.id != 0xff) {
2846                         IWL_DEBUG_INFO("Replaced existing measurement: %d\n",
2847                                                 res->u.spectrum.id);
2848                         priv->measurement_status &= ~MEASUREMENT_READY;
2849                 }
2850                 priv->measurement_status |= MEASUREMENT_ACTIVE;
2851                 rc = 0;
2852                 break;
2853
2854         case 1:         /* Command will not be handled */
2855                 rc = -EAGAIN;
2856                 break;
2857         }
2858
2859         dev_kfree_skb_any(cmd.meta.u.skb);
2860
2861         return rc;
2862 }
2863 #endif
2864
2865 static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
2866                                struct iwl_rx_mem_buffer *rxb)
2867 {
2868         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2869         struct iwl_alive_resp *palive;
2870         struct delayed_work *pwork;
2871
2872         palive = &pkt->u.alive_frame;
2873
2874         IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
2875                        "0x%01X 0x%01X\n",
2876                        palive->is_valid, palive->ver_type,
2877                        palive->ver_subtype);
2878
2879         if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2880                 IWL_DEBUG_INFO("Initialization Alive received.\n");
2881                 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2882                        sizeof(struct iwl_alive_resp));
2883                 pwork = &priv->init_alive_start;
2884         } else {
2885                 IWL_DEBUG_INFO("Runtime Alive received.\n");
2886                 memcpy(&priv->card_alive, &pkt->u.alive_frame,
2887                        sizeof(struct iwl_alive_resp));
2888                 pwork = &priv->alive_start;
2889                 iwl3945_disable_events(priv);
2890         }
2891
2892         /* We delay the ALIVE response by 5ms to
2893          * give the HW RF Kill time to activate... */
2894         if (palive->is_valid == UCODE_VALID_OK)
2895                 queue_delayed_work(priv->workqueue, pwork,
2896                                    msecs_to_jiffies(5));
2897         else
2898                 IWL_WARNING("uCode did not respond OK.\n");
2899 }
2900
2901 static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
2902                                  struct iwl_rx_mem_buffer *rxb)
2903 {
2904         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2905
2906         IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2907         return;
2908 }
2909
2910 static void iwl3945_rx_reply_error(struct iwl_priv *priv,
2911                                struct iwl_rx_mem_buffer *rxb)
2912 {
2913         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2914
2915         IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
2916                 "seq 0x%04X ser 0x%08X\n",
2917                 le32_to_cpu(pkt->u.err_resp.error_type),
2918                 get_cmd_string(pkt->u.err_resp.cmd_id),
2919                 pkt->u.err_resp.cmd_id,
2920                 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2921                 le32_to_cpu(pkt->u.err_resp.error_info));
2922 }
2923
2924 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2925
2926 static void iwl3945_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
2927 {
2928         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2929         struct iwl3945_rxon_cmd *rxon = (void *)&priv->active39_rxon;
2930         struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
2931         IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
2932                       le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
2933         rxon->channel = csa->channel;
2934         priv->staging39_rxon.channel = csa->channel;
2935 }
2936
2937 static void iwl3945_rx_spectrum_measure_notif(struct iwl_priv *priv,
2938                                           struct iwl_rx_mem_buffer *rxb)
2939 {
2940 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
2941         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2942         struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
2943
2944         if (!report->state) {
2945                 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
2946                           "Spectrum Measure Notification: Start\n");
2947                 return;
2948         }
2949
2950         memcpy(&priv->measure_report, report, sizeof(*report));
2951         priv->measurement_status |= MEASUREMENT_READY;
2952 #endif
2953 }
2954
2955 static void iwl3945_rx_pm_sleep_notif(struct iwl_priv *priv,
2956                                   struct iwl_rx_mem_buffer *rxb)
2957 {
2958 #ifdef CONFIG_IWL3945_DEBUG
2959         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2960         struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
2961         IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
2962                      sleep->pm_sleep_mode, sleep->pm_wakeup_src);
2963 #endif
2964 }
2965
2966 static void iwl3945_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2967                                              struct iwl_rx_mem_buffer *rxb)
2968 {
2969         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2970         IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
2971                         "notification for %s:\n",
2972                         le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
2973         iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
2974                            le32_to_cpu(pkt->len));
2975 }
2976
2977 static void iwl3945_bg_beacon_update(struct work_struct *work)
2978 {
2979         struct iwl_priv *priv =
2980                 container_of(work, struct iwl_priv, beacon_update);
2981         struct sk_buff *beacon;
2982
2983         /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
2984         beacon = ieee80211_beacon_get(priv->hw, priv->vif);
2985
2986         if (!beacon) {
2987                 IWL_ERROR("update beacon failed\n");
2988                 return;
2989         }
2990
2991         mutex_lock(&priv->mutex);
2992         /* new beacon skb is allocated every time; dispose previous.*/
2993         if (priv->ibss_beacon)
2994                 dev_kfree_skb(priv->ibss_beacon);
2995
2996         priv->ibss_beacon = beacon;
2997         mutex_unlock(&priv->mutex);
2998
2999         iwl3945_send_beacon_cmd(priv);
3000 }
3001
3002 static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
3003                                 struct iwl_rx_mem_buffer *rxb)
3004 {
3005 #ifdef CONFIG_IWL3945_DEBUG
3006         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3007         struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
3008         u8 rate = beacon->beacon_notify_hdr.rate;
3009
3010         IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3011                 "tsf %d %d rate %d\n",
3012                 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3013                 beacon->beacon_notify_hdr.failure_frame,
3014                 le32_to_cpu(beacon->ibss_mgr_status),
3015                 le32_to_cpu(beacon->high_tsf),
3016                 le32_to_cpu(beacon->low_tsf), rate);
3017 #endif
3018
3019         if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
3020             (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3021                 queue_work(priv->workqueue, &priv->beacon_update);
3022 }
3023
3024 /* Service response to REPLY_SCAN_CMD (0x80) */
3025 static void iwl3945_rx_reply_scan(struct iwl_priv *priv,
3026                               struct iwl_rx_mem_buffer *rxb)
3027 {
3028 #ifdef CONFIG_IWL3945_DEBUG
3029         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3030         struct iwl_scanreq_notification *notif =
3031             (struct iwl_scanreq_notification *)pkt->u.raw;
3032
3033         IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3034 #endif
3035 }
3036
3037 /* Service SCAN_START_NOTIFICATION (0x82) */
3038 static void iwl3945_rx_scan_start_notif(struct iwl_priv *priv,
3039                                     struct iwl_rx_mem_buffer *rxb)
3040 {
3041         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3042         struct iwl_scanstart_notification *notif =
3043             (struct iwl_scanstart_notification *)pkt->u.raw;
3044         priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3045         IWL_DEBUG_SCAN("Scan start: "
3046                        "%d [802.11%s] "
3047                        "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3048                        notif->channel,
3049                        notif->band ? "bg" : "a",
3050                        notif->tsf_high,
3051                        notif->tsf_low, notif->status, notif->beacon_timer);
3052 }
3053
3054 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3055 static void iwl3945_rx_scan_results_notif(struct iwl_priv *priv,
3056                                       struct iwl_rx_mem_buffer *rxb)
3057 {
3058         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3059         struct iwl_scanresults_notification *notif =
3060             (struct iwl_scanresults_notification *)pkt->u.raw;
3061
3062         IWL_DEBUG_SCAN("Scan ch.res: "
3063                        "%d [802.11%s] "
3064                        "(TSF: 0x%08X:%08X) - %d "
3065                        "elapsed=%lu usec (%dms since last)\n",
3066                        notif->channel,
3067                        notif->band ? "bg" : "a",
3068                        le32_to_cpu(notif->tsf_high),
3069                        le32_to_cpu(notif->tsf_low),
3070                        le32_to_cpu(notif->statistics[0]),
3071                        le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3072                        jiffies_to_msecs(elapsed_jiffies
3073                                         (priv->last_scan_jiffies, jiffies)));
3074
3075         priv->last_scan_jiffies = jiffies;
3076         priv->next_scan_jiffies = 0;
3077 }
3078
3079 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3080 static void iwl3945_rx_scan_complete_notif(struct iwl_priv *priv,
3081                                        struct iwl_rx_mem_buffer *rxb)
3082 {
3083         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3084         struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3085
3086         IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3087                        scan_notif->scanned_channels,
3088                        scan_notif->tsf_low,
3089                        scan_notif->tsf_high, scan_notif->status);
3090
3091         /* The HW is no longer scanning */
3092         clear_bit(STATUS_SCAN_HW, &priv->status);
3093
3094         /* The scan completion notification came in, so kill that timer... */
3095         cancel_delayed_work(&priv->scan_check);
3096
3097         IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3098                        (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
3099                                                         "2.4" : "5.2",
3100                        jiffies_to_msecs(elapsed_jiffies
3101                                         (priv->scan_pass_start, jiffies)));
3102
3103         /* Remove this scanned band from the list of pending
3104          * bands to scan, band G precedes A in order of scanning
3105          * as seen in iwl3945_bg_request_scan */
3106         if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
3107                 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
3108         else if (priv->scan_bands &  BIT(IEEE80211_BAND_5GHZ))
3109                 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
3110
3111         /* If a request to abort was given, or the scan did not succeed
3112          * then we reset the scan state machine and terminate,
3113          * re-queuing another scan if one has been requested */
3114         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3115                 IWL_DEBUG_INFO("Aborted scan completed.\n");
3116                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3117         } else {
3118                 /* If there are more bands on this scan pass reschedule */
3119                 if (priv->scan_bands > 0)
3120                         goto reschedule;
3121         }
3122
3123         priv->last_scan_jiffies = jiffies;
3124         priv->next_scan_jiffies = 0;
3125         IWL_DEBUG_INFO("Setting scan to off\n");
3126
3127         clear_bit(STATUS_SCANNING, &priv->status);
3128
3129         IWL_DEBUG_INFO("Scan took %dms\n",
3130                 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3131
3132         queue_work(priv->workqueue, &priv->scan_completed);
3133
3134         return;
3135
3136 reschedule:
3137         priv->scan_pass_start = jiffies;
3138         queue_work(priv->workqueue, &priv->request_scan);
3139 }
3140
3141 /* Handle notification from uCode that card's power state is changing
3142  * due to software, hardware, or critical temperature RFKILL */
3143 static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
3144                                     struct iwl_rx_mem_buffer *rxb)
3145 {
3146         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3147         u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3148         unsigned long status = priv->status;
3149
3150         IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
3151                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3152                           (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3153
3154         iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3155                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3156
3157         if (flags & HW_CARD_DISABLED)
3158                 set_bit(STATUS_RF_KILL_HW, &priv->status);
3159         else
3160                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3161
3162
3163         if (flags & SW_CARD_DISABLED)
3164                 set_bit(STATUS_RF_KILL_SW, &priv->status);
3165         else
3166                 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3167
3168         iwl3945_scan_cancel(priv);
3169
3170         if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3171              test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
3172             (test_bit(STATUS_RF_KILL_SW, &status) !=
3173              test_bit(STATUS_RF_KILL_SW, &priv->status)))
3174                 queue_work(priv->workqueue, &priv->rf_kill);
3175         else
3176                 wake_up_interruptible(&priv->wait_command_queue);
3177 }
3178
3179 /**
3180  * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
3181  *
3182  * Setup the RX handlers for each of the reply types sent from the uCode
3183  * to the host.
3184  *
3185  * This function chains into the hardware specific files for them to setup
3186  * any hardware specific handlers as well.
3187  */
3188 static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
3189 {
3190         priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
3191         priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
3192         priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
3193         priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa;
3194         priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
3195             iwl3945_rx_spectrum_measure_notif;
3196         priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
3197         priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
3198             iwl3945_rx_pm_debug_statistics_notif;
3199         priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
3200
3201         /*
3202          * The same handler is used for both the REPLY to a discrete
3203          * statistics request from the host as well as for the periodic
3204          * statistics notifications (after received beacons) from the uCode.
3205          */
3206         priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
3207         priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
3208
3209         priv->rx_handlers[REPLY_SCAN_CMD] = iwl3945_rx_reply_scan;
3210         priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl3945_rx_scan_start_notif;
3211         priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3212             iwl3945_rx_scan_results_notif;
3213         priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3214             iwl3945_rx_scan_complete_notif;
3215         priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
3216
3217         /* Set up hardware specific Rx handlers */
3218         iwl3945_hw_rx_handler_setup(priv);
3219 }
3220
3221 /**
3222  * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
3223  * When FW advances 'R' index, all entries between old and new 'R' index
3224  * need to be reclaimed.
3225  */
3226 static void iwl3945_cmd_queue_reclaim(struct iwl_priv *priv,
3227                                       int txq_id, int index)
3228 {
3229         struct iwl3945_tx_queue *txq = &priv->txq39[txq_id];
3230         struct iwl_queue *q = &txq->q;
3231         int nfreed = 0;
3232
3233         if ((index >= q->n_bd) || (iwl3945_x2_queue_used(q, index) == 0)) {
3234                 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3235                           "is out of range [0-%d] %d %d.\n", txq_id,
3236                           index, q->n_bd, q->write_ptr, q->read_ptr);
3237                 return;
3238         }
3239
3240         for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
3241                 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3242                 if (nfreed > 1) {
3243                         IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3244                                         q->write_ptr, q->read_ptr);
3245                         queue_work(priv->workqueue, &priv->restart);
3246                         break;
3247                 }
3248                 nfreed++;
3249         }
3250 }
3251
3252
3253 /**
3254  * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3255  * @rxb: Rx buffer to reclaim
3256  *
3257  * If an Rx buffer has an async callback associated with it the callback
3258  * will be executed.  The attached skb (if present) will only be freed
3259  * if the callback returns 1
3260  */
3261 static void iwl3945_tx_cmd_complete(struct iwl_priv *priv,
3262                                 struct iwl_rx_mem_buffer *rxb)
3263 {
3264         struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3265         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3266         int txq_id = SEQ_TO_QUEUE(sequence);
3267         int index = SEQ_TO_INDEX(sequence);
3268         int huge =  !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3269         int cmd_index;
3270         struct iwl3945_cmd *cmd;
3271
3272         BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3273
3274         cmd_index = get_cmd_index(&priv->txq39[IWL_CMD_QUEUE_NUM].q, index, huge);
3275         cmd = &priv->txq39[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3276
3277         /* Input error checking is done when commands are added to queue. */
3278         if (cmd->meta.flags & CMD_WANT_SKB) {
3279                 cmd->meta.source->u.skb = rxb->skb;
3280                 rxb->skb = NULL;
3281         } else if (cmd->meta.u.callback &&
3282                    !cmd->meta.u.callback(priv, cmd, rxb->skb))
3283                 rxb->skb = NULL;
3284
3285         iwl3945_cmd_queue_reclaim(priv, txq_id, index);
3286
3287         if (!(cmd->meta.flags & CMD_ASYNC)) {
3288                 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3289                 wake_up_interruptible(&priv->wait_command_queue);
3290         }
3291 }
3292
3293 /************************** RX-FUNCTIONS ****************************/
3294 /*
3295  * Rx theory of operation
3296  *
3297  * The host allocates 32 DMA target addresses and passes the host address
3298  * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3299  * 0 to 31
3300  *
3301  * Rx Queue Indexes
3302  * The host/firmware share two index registers for managing the Rx buffers.
3303  *
3304  * The READ index maps to the first position that the firmware may be writing
3305  * to -- the driver can read up to (but not including) this position and get
3306  * good data.
3307  * The READ index is managed by the firmware once the card is enabled.
3308  *
3309  * The WRITE index maps to the last position the driver has read from -- the
3310  * position preceding WRITE is the last slot the firmware can place a packet.
3311  *
3312  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3313  * WRITE = READ.
3314  *
3315  * During initialization, the host sets up the READ queue position to the first
3316  * INDEX position, and WRITE to the last (READ - 1 wrapped)
3317  *
3318  * When the firmware places a packet in a buffer, it will advance the READ index
3319  * and fire the RX interrupt.  The driver can then query the READ index and
3320  * process as many packets as possible, moving the WRITE index forward as it
3321  * resets the Rx queue buffers with new memory.
3322  *
3323  * The management in the driver is as follows:
3324  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
3325  *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3326  *   to replenish the iwl->rxq->rx_free.
3327  * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
3328  *   iwl->rxq is replenished and the READ INDEX is updated (updating the
3329  *   'processed' and 'read' driver indexes as well)
3330  * + A received packet is processed and handed to the kernel network stack,
3331  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
3332  * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3333  *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3334  *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
3335  *   were enough free buffers and RX_STALLED is set it is cleared.
3336  *
3337  *
3338  * Driver sequence:
3339  *
3340  * iwl3945_rx_queue_alloc()   Allocates rx_free
3341  * iwl3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
3342  *                            iwl3945_rx_queue_restock
3343  * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
3344  *                            queue, updates firmware pointers, and updates
3345  *                            the WRITE index.  If insufficient rx_free buffers
3346  *                            are available, schedules iwl3945_rx_replenish
3347  *
3348  * -- enable interrupts --
3349  * ISR - iwl3945_rx()         Detach iwl_rx_mem_buffers from pool up to the
3350  *                            READ INDEX, detaching the SKB from the pool.
3351  *                            Moves the packet buffer from queue to rx_used.
3352  *                            Calls iwl3945_rx_queue_restock to refill any empty
3353  *                            slots.
3354  * ...
3355  *
3356  */
3357
3358 /**
3359  * iwl3945_rx_queue_space - Return number of free slots available in queue.
3360  */
3361 static int iwl3945_rx_queue_space(const struct iwl_rx_queue *q)
3362 {
3363         int s = q->read - q->write;
3364         if (s <= 0)
3365                 s += RX_QUEUE_SIZE;
3366         /* keep some buffer to not confuse full and empty queue */
3367         s -= 2;
3368         if (s < 0)
3369                 s = 0;
3370         return s;
3371 }
3372
3373 /**
3374  * iwl3945_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3375  */
3376 int iwl3945_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
3377 {
3378         u32 reg = 0;
3379         int rc = 0;
3380         unsigned long flags;
3381
3382         spin_lock_irqsave(&q->lock, flags);
3383
3384         if (q->need_update == 0)
3385                 goto exit_unlock;
3386
3387         /* If power-saving is in use, make sure device is awake */
3388         if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3389                 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3390
3391                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3392                         iwl_set_bit(priv, CSR_GP_CNTRL,
3393                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3394                         goto exit_unlock;
3395                 }
3396
3397                 rc = iwl_grab_nic_access(priv);
3398                 if (rc)
3399                         goto exit_unlock;
3400
3401                 /* Device expects a multiple of 8 */
3402                 iwl_write_direct32(priv, FH39_RSCSR_CHNL0_WPTR,
3403                                      q->write & ~0x7);
3404                 iwl_release_nic_access(priv);
3405
3406         /* Else device is assumed to be awake */
3407         } else
3408                 /* Device expects a multiple of 8 */
3409                 iwl_write32(priv, FH39_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3410
3411
3412         q->need_update = 0;
3413
3414  exit_unlock:
3415         spin_unlock_irqrestore(&q->lock, flags);
3416         return rc;
3417 }
3418
3419 /**
3420  * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
3421  */
3422 static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
3423                                           dma_addr_t dma_addr)
3424 {
3425         return cpu_to_le32((u32)dma_addr);
3426 }
3427
3428 /**
3429  * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
3430  *
3431  * If there are slots in the RX queue that need to be restocked,
3432  * and we have free pre-allocated buffers, fill the ranks as much
3433  * as we can, pulling from rx_free.
3434  *
3435  * This moves the 'write' index forward to catch up with 'processed', and
3436  * also updates the memory address in the firmware to reference the new
3437  * target buffer.
3438  */
3439 static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
3440 {
3441         struct iwl_rx_queue *rxq = &priv->rxq;
3442         struct list_head *element;
3443         struct iwl_rx_mem_buffer *rxb;
3444         unsigned long flags;
3445         int write, rc;
3446
3447         spin_lock_irqsave(&rxq->lock, flags);
3448         write = rxq->write & ~0x7;
3449         while ((iwl3945_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
3450                 /* Get next free Rx buffer, remove from free list */
3451                 element = rxq->rx_free.next;
3452                 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
3453                 list_del(element);
3454
3455                 /* Point to Rx buffer via next RBD in circular buffer */
3456                 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
3457                 rxq->queue[rxq->write] = rxb;
3458                 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3459                 rxq->free_count--;
3460         }
3461         spin_unlock_irqrestore(&rxq->lock, flags);
3462         /* If the pre-allocated buffer pool is dropping low, schedule to
3463          * refill it */
3464         if (rxq->free_count <= RX_LOW_WATERMARK)
3465                 queue_work(priv->workqueue, &priv->rx_replenish);
3466
3467
3468         /* If we've added more space for the firmware to place data, tell it.
3469          * Increment device's write pointer in multiples of 8. */
3470         if ((write != (rxq->write & ~0x7))
3471             || (abs(rxq->write - rxq->read) > 7)) {
3472                 spin_lock_irqsave(&rxq->lock, flags);
3473                 rxq->need_update = 1;
3474                 spin_unlock_irqrestore(&rxq->lock, flags);
3475                 rc = iwl3945_rx_queue_update_write_ptr(priv, rxq);
3476                 if (rc)
3477                         return rc;
3478         }
3479
3480         return 0;
3481 }
3482
3483 /**
3484  * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
3485  *
3486  * When moving to rx_free an SKB is allocated for the slot.
3487  *
3488  * Also restock the Rx queue via iwl3945_rx_queue_restock.
3489  * This is called as a scheduled work item (except for during initialization)
3490  */
3491 static void iwl3945_rx_allocate(struct iwl_priv *priv)
3492 {
3493         struct iwl_rx_queue *rxq = &priv->rxq;
3494         struct list_head *element;
3495         struct iwl_rx_mem_buffer *rxb;
3496         unsigned long flags;
3497         spin_lock_irqsave(&rxq->lock, flags);
3498         while (!list_empty(&rxq->rx_used)) {
3499                 element = rxq->rx_used.next;
3500                 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
3501
3502                 /* Alloc a new receive buffer */
3503                 rxb->skb =
3504                     alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
3505                 if (!rxb->skb) {
3506                         if (net_ratelimit())
3507                                 printk(KERN_CRIT DRV_NAME
3508                                        ": Can not allocate SKB buffers\n");
3509                         /* We don't reschedule replenish work here -- we will
3510                          * call the restock method and if it still needs
3511                          * more buffers it will schedule replenish */
3512                         break;
3513                 }
3514
3515                 /* If radiotap head is required, reserve some headroom here.
3516                  * The physical head count is a variable rx_stats->phy_count.
3517                  * We reserve 4 bytes here. Plus these extra bytes, the
3518                  * headroom of the physical head should be enough for the
3519                  * radiotap head that iwl3945 supported. See iwl3945_rt.
3520                  */
3521                 skb_reserve(rxb->skb, 4);
3522
3523                 priv->alloc_rxb_skb++;
3524                 list_del(element);
3525
3526                 /* Get physical address of RB/SKB */
3527                 rxb->real_dma_addr =
3528                     pci_map_single(priv->pci_dev, rxb->skb->data,
3529                                    IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3530                 list_add_tail(&rxb->list, &rxq->rx_free);
3531                 rxq->free_count++;
3532         }
3533         spin_unlock_irqrestore(&rxq->lock, flags);
3534 }
3535
3536 /*
3537  * this should be called while priv->lock is locked
3538  */
3539 static void __iwl3945_rx_replenish(void *data)
3540 {
3541         struct iwl_priv *priv = data;
3542
3543         iwl3945_rx_allocate(priv);
3544         iwl3945_rx_queue_restock(priv);
3545 }
3546
3547
3548 void iwl3945_rx_replenish(void *data)
3549 {
3550         struct iwl_priv *priv = data;
3551         unsigned long flags;
3552
3553         iwl3945_rx_allocate(priv);
3554
3555         spin_lock_irqsave(&priv->lock, flags);
3556         iwl3945_rx_queue_restock(priv);
3557         spin_unlock_irqrestore(&priv->lock, flags);
3558 }
3559
3560 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3561  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
3562  * This free routine walks the list of POOL entries and if SKB is set to
3563  * non NULL it is unmapped and freed
3564  */
3565 static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
3566 {
3567         int i;
3568         for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3569                 if (rxq->pool[i].skb != NULL) {
3570                         pci_unmap_single(priv->pci_dev,
3571                                          rxq->pool[i].real_dma_addr,
3572                                          IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3573                         dev_kfree_skb(rxq->pool[i].skb);
3574                 }
3575         }
3576
3577         pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3578                             rxq->dma_addr);
3579         rxq->bd = NULL;
3580 }
3581
3582 int iwl3945_rx_queue_alloc(struct iwl_priv *priv)
3583 {
3584         struct iwl_rx_queue *rxq = &priv->rxq;
3585         struct pci_dev *dev = priv->pci_dev;
3586         int i;
3587
3588         spin_lock_init(&rxq->lock);
3589         INIT_LIST_HEAD(&rxq->rx_free);
3590         INIT_LIST_HEAD(&rxq->rx_used);
3591
3592         /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
3593         rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3594         if (!rxq->bd)
3595                 return -ENOMEM;
3596
3597         /* Fill the rx_used queue with _all_ of the Rx buffers */
3598         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3599                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3600
3601         /* Set us so that we have processed and used all buffers, but have
3602          * not restocked the Rx queue with fresh buffers */
3603         rxq->read = rxq->write = 0;
3604         rxq->free_count = 0;
3605         rxq->need_update = 0;
3606         return 0;
3607 }
3608
3609 void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
3610 {
3611         unsigned long flags;
3612         int i;
3613         spin_lock_irqsave(&rxq->lock, flags);
3614         INIT_LIST_HEAD(&rxq->rx_free);
3615         INIT_LIST_HEAD(&rxq->rx_used);
3616         /* Fill the rx_used queue with _all_ of the Rx buffers */
3617         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3618                 /* In the reset function, these buffers may have been allocated
3619                  * to an SKB, so we need to unmap and free potential storage */
3620                 if (rxq->pool[i].skb != NULL) {
3621                         pci_unmap_single(priv->pci_dev,
3622                                          rxq->pool[i].real_dma_addr,
3623                                          IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3624                         priv->alloc_rxb_skb--;
3625                         dev_kfree_skb(rxq->pool[i].skb);
3626                         rxq->pool[i].skb = NULL;
3627                 }
3628                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3629         }
3630
3631         /* Set us so that we have processed and used all buffers, but have
3632          * not restocked the Rx queue with fresh buffers */
3633         rxq->read = rxq->write = 0;
3634         rxq->free_count = 0;
3635         spin_unlock_irqrestore(&rxq->lock, flags);
3636 }
3637
3638 /* Convert linear signal-to-noise ratio into dB */
3639 static u8 ratio2dB[100] = {
3640 /*       0   1   2   3   4   5   6   7   8   9 */
3641          0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3642         20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3643         26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3644         29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3645         32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3646         34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3647         36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3648         37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3649         38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3650         39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
3651 };
3652
3653 /* Calculates a relative dB value from a ratio of linear
3654  *   (i.e. not dB) signal levels.
3655  * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
3656 int iwl3945_calc_db_from_ratio(int sig_ratio)
3657 {
3658         /* 1000:1 or higher just report as 60 dB */
3659         if (sig_ratio >= 1000)
3660                 return 60;
3661
3662         /* 100:1 or higher, divide by 10 and use table,
3663          *   add 20 dB to make up for divide by 10 */
3664         if (sig_ratio >= 100)
3665                 return 20 + (int)ratio2dB[sig_ratio/10];
3666
3667         /* We shouldn't see this */
3668         if (sig_ratio < 1)
3669                 return 0;
3670
3671         /* Use table for ratios 1:1 - 99:1 */
3672         return (int)ratio2dB[sig_ratio];
3673 }
3674
3675 #define PERFECT_RSSI (-20) /* dBm */
3676 #define WORST_RSSI (-95)   /* dBm */
3677 #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3678
3679 /* Calculate an indication of rx signal quality (a percentage, not dBm!).
3680  * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3681  *   about formulas used below. */
3682 int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
3683 {
3684         int sig_qual;
3685         int degradation = PERFECT_RSSI - rssi_dbm;
3686
3687         /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3688          * as indicator; formula is (signal dbm - noise dbm).
3689          * SNR at or above 40 is a great signal (100%).
3690          * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3691          * Weakest usable signal is usually 10 - 15 dB SNR. */
3692         if (noise_dbm) {
3693                 if (rssi_dbm - noise_dbm >= 40)
3694                         return 100;
3695                 else if (rssi_dbm < noise_dbm)
3696                         return 0;
3697                 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3698
3699         /* Else use just the signal level.
3700          * This formula is a least squares fit of data points collected and
3701          *   compared with a reference system that had a percentage (%) display
3702          *   for signal quality. */
3703         } else
3704                 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3705                             (15 * RSSI_RANGE + 62 * degradation)) /
3706                            (RSSI_RANGE * RSSI_RANGE);
3707
3708         if (sig_qual > 100)
3709                 sig_qual = 100;
3710         else if (sig_qual < 1)
3711                 sig_qual = 0;
3712
3713         return sig_qual;
3714 }
3715
3716 /**
3717  * iwl3945_rx_handle - Main entry function for receiving responses from uCode
3718  *
3719  * Uses the priv->rx_handlers callback function array to invoke
3720  * the appropriate handlers, including command responses,
3721  * frame-received notifications, and other notifications.
3722  */
3723 static void iwl3945_rx_handle(struct iwl_priv *priv)
3724 {
3725         struct iwl_rx_mem_buffer *rxb;
3726         struct iwl_rx_packet *pkt;
3727         struct iwl_rx_queue *rxq = &priv->rxq;
3728         u32 r, i;
3729         int reclaim;
3730         unsigned long flags;
3731         u8 fill_rx = 0;
3732         u32 count = 8;
3733
3734         /* uCode's read index (stored in shared DRAM) indicates the last Rx
3735          * buffer that the driver may process (last buffer filled by ucode). */
3736         r = iwl3945_hw_get_rx_read(priv);
3737         i = rxq->read;
3738
3739         if (iwl3945_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3740                 fill_rx = 1;
3741         /* Rx interrupt, but nothing sent from uCode */
3742         if (i == r)
3743                 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
3744
3745         while (i != r) {
3746                 rxb = rxq->queue[i];
3747
3748                 /* If an RXB doesn't have a Rx queue slot associated with it,
3749                  * then a bug has been introduced in the queue refilling
3750                  * routines -- catch it here */
3751                 BUG_ON(rxb == NULL);
3752
3753                 rxq->queue[i] = NULL;
3754
3755                 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
3756                                             IWL_RX_BUF_SIZE,
3757                                             PCI_DMA_FROMDEVICE);
3758                 pkt = (struct iwl_rx_packet *)rxb->skb->data;
3759
3760                 /* Reclaim a command buffer only if this packet is a response
3761                  *   to a (driver-originated) command.
3762                  * If the packet (e.g. Rx frame) originated from uCode,
3763                  *   there is no command buffer to reclaim.
3764                  * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3765                  *   but apparently a few don't get set; catch them here. */
3766                 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3767                         (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
3768                         (pkt->hdr.cmd != REPLY_TX);
3769
3770                 /* Based on type of command response or notification,
3771                  *   handle those that need handling via function in
3772                  *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
3773                 if (priv->rx_handlers[pkt->hdr.cmd]) {
3774                         IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
3775                                 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3776                                 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3777                         priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3778                 } else {
3779                         /* No handling needed */
3780                         IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
3781                                 "r %d i %d No handler needed for %s, 0x%02x\n",
3782                                 r, i, get_cmd_string(pkt->hdr.cmd),
3783                                 pkt->hdr.cmd);
3784                 }
3785
3786                 if (reclaim) {
3787                         /* Invoke any callbacks, transfer the skb to caller, and
3788                          * fire off the (possibly) blocking iwl3945_send_cmd()
3789                          * as we reclaim the driver command queue */
3790                         if (rxb && rxb->skb)
3791                                 iwl3945_tx_cmd_complete(priv, rxb);
3792                         else
3793                                 IWL_WARNING("Claim null rxb?\n");
3794                 }
3795
3796                 /* For now we just don't re-use anything.  We can tweak this
3797                  * later to try and re-use notification packets and SKBs that
3798                  * fail to Rx correctly */
3799                 if (rxb->skb != NULL) {
3800                         priv->alloc_rxb_skb--;
3801                         dev_kfree_skb_any(rxb->skb);
3802                         rxb->skb = NULL;
3803                 }
3804
3805                 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
3806                                  IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3807                 spin_lock_irqsave(&rxq->lock, flags);
3808                 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3809                 spin_unlock_irqrestore(&rxq->lock, flags);
3810                 i = (i + 1) & RX_QUEUE_MASK;
3811                 /* If there are a lot of unused frames,
3812                  * restock the Rx queue so ucode won't assert. */
3813                 if (fill_rx) {
3814                         count++;
3815                         if (count >= 8) {
3816                                 priv->rxq.read = i;
3817                                 __iwl3945_rx_replenish(priv);
3818                                 count = 0;
3819                         }
3820                 }
3821         }
3822
3823         /* Backtrack one entry */
3824         priv->rxq.read = i;
3825         iwl3945_rx_queue_restock(priv);
3826 }
3827
3828 /**
3829  * iwl3945_tx_queue_update_write_ptr - Send new write index to hardware
3830  */
3831 static int iwl3945_tx_queue_update_write_ptr(struct iwl_priv *priv,
3832                                   struct iwl3945_tx_queue *txq)
3833 {
3834         u32 reg = 0;
3835         int rc = 0;
3836         int txq_id = txq->q.id;
3837
3838         if (txq->need_update == 0)
3839                 return rc;
3840
3841         /* if we're trying to save power */
3842         if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3843                 /* wake up nic if it's powered down ...
3844                  * uCode will wake up, and interrupt us again, so next
3845                  * time we'll skip this part. */
3846                 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3847
3848                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3849                         IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
3850                         iwl_set_bit(priv, CSR_GP_CNTRL,
3851                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3852                         return rc;
3853                 }
3854
3855                 /* restore this queue's parameters in nic hardware. */
3856                 rc = iwl_grab_nic_access(priv);
3857                 if (rc)
3858                         return rc;
3859                 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
3860                                      txq->q.write_ptr | (txq_id << 8));
3861                 iwl_release_nic_access(priv);
3862
3863         /* else not in power-save mode, uCode will never sleep when we're
3864          * trying to tx (during RFKILL, we're not trying to tx). */
3865         } else
3866                 iwl_write32(priv, HBUS_TARG_WRPTR,
3867                             txq->q.write_ptr | (txq_id << 8));
3868
3869         txq->need_update = 0;
3870
3871         return rc;
3872 }
3873
3874 #ifdef CONFIG_IWL3945_DEBUG
3875 static void iwl3945_print_rx_config_cmd(struct iwl_priv *priv,
3876                                         struct iwl3945_rxon_cmd *rxon)
3877 {
3878         IWL_DEBUG_RADIO("RX CONFIG:\n");
3879         iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
3880         IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3881         IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3882         IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3883                         le32_to_cpu(rxon->filter_flags));
3884         IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3885         IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3886                         rxon->ofdm_basic_rates);
3887         IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
3888         IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3889         IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
3890         IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3891 }
3892 #endif
3893
3894 static void iwl3945_enable_interrupts(struct iwl_priv *priv)
3895 {
3896         IWL_DEBUG_ISR("Enabling interrupts\n");
3897         set_bit(STATUS_INT_ENABLED, &priv->status);
3898         iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
3899 }
3900
3901
3902 /* call this function to flush any scheduled tasklet */
3903 static inline void iwl_synchronize_irq(struct iwl_priv *priv)
3904 {
3905         /* wait to make sure we flush pending tasklet*/
3906         synchronize_irq(priv->pci_dev->irq);
3907         tasklet_kill(&priv->irq_tasklet);
3908 }
3909
3910
3911 static inline void iwl3945_disable_interrupts(struct iwl_priv *priv)
3912 {
3913         clear_bit(STATUS_INT_ENABLED, &priv->status);
3914
3915         /* disable interrupts from uCode/NIC to host */
3916         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
3917
3918         /* acknowledge/clear/reset any interrupts still pending
3919          * from uCode or flow handler (Rx/Tx DMA) */
3920         iwl_write32(priv, CSR_INT, 0xffffffff);
3921         iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
3922         IWL_DEBUG_ISR("Disabled interrupts\n");
3923 }
3924
3925 static const char *desc_lookup(int i)
3926 {
3927         switch (i) {
3928         case 1:
3929                 return "FAIL";
3930         case 2:
3931                 return "BAD_PARAM";
3932         case 3:
3933                 return "BAD_CHECKSUM";
3934         case 4:
3935                 return "NMI_INTERRUPT";
3936         case 5:
3937                 return "SYSASSERT";
3938         case 6:
3939                 return "FATAL_ERROR";
3940         }
3941
3942         return "UNKNOWN";
3943 }
3944
3945 #define ERROR_START_OFFSET  (1 * sizeof(u32))
3946 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
3947
3948 static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
3949 {
3950         u32 i;
3951         u32 desc, time, count, base, data1;
3952         u32 blink1, blink2, ilink1, ilink2;
3953         int rc;
3954
3955         base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
3956
3957         if (!iwl3945_hw_valid_rtc_data_addr(base)) {
3958                 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
3959                 return;
3960         }
3961
3962         rc = iwl_grab_nic_access(priv);
3963         if (rc) {
3964                 IWL_WARNING("Can not read from adapter at this time.\n");
3965                 return;
3966         }
3967
3968         count = iwl_read_targ_mem(priv, base);
3969
3970         if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
3971                 IWL_ERROR("Start IWL Error Log Dump:\n");
3972                 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
3973         }
3974
3975         IWL_ERROR("Desc       Time       asrtPC  blink2 "
3976                   "ilink1  nmiPC   Line\n");
3977         for (i = ERROR_START_OFFSET;
3978              i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
3979              i += ERROR_ELEM_SIZE) {
3980                 desc = iwl_read_targ_mem(priv, base + i);
3981                 time =
3982                     iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
3983                 blink1 =
3984                     iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
3985                 blink2 =
3986                     iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
3987                 ilink1 =
3988                     iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
3989                 ilink2 =
3990                     iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
3991                 data1 =
3992                     iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
3993
3994                 IWL_ERROR
3995                     ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
3996                      desc_lookup(desc), desc, time, blink1, blink2,
3997                      ilink1, ilink2, data1);
3998         }
3999
4000         iwl_release_nic_access(priv);
4001
4002 }
4003
4004 #define EVENT_START_OFFSET  (6 * sizeof(u32))
4005
4006 /**
4007  * iwl3945_print_event_log - Dump error event log to syslog
4008  *
4009  * NOTE: Must be called with iwl_grab_nic_access() already obtained!
4010  */
4011 static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
4012                                 u32 num_events, u32 mode)
4013 {
4014         u32 i;
4015         u32 base;       /* SRAM byte address of event log header */
4016         u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4017         u32 ptr;        /* SRAM byte address of log data */
4018         u32 ev, time, data; /* event log data */
4019
4020         if (num_events == 0)
4021                 return;
4022
4023         base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4024
4025         if (mode == 0)
4026                 event_size = 2 * sizeof(u32);
4027         else
4028                 event_size = 3 * sizeof(u32);
4029
4030         ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4031
4032         /* "time" is actually "data" for mode 0 (no timestamp).
4033          * place event id # at far right for easier visual parsing. */
4034         for (i = 0; i < num_events; i++) {
4035                 ev = iwl_read_targ_mem(priv, ptr);
4036                 ptr += sizeof(u32);
4037                 time = iwl_read_targ_mem(priv, ptr);
4038                 ptr += sizeof(u32);
4039                 if (mode == 0)
4040                         IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4041                 else {
4042                         data = iwl_read_targ_mem(priv, ptr);
4043                         ptr += sizeof(u32);
4044                         IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4045                 }
4046         }
4047 }
4048
4049 static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
4050 {
4051         int rc;
4052         u32 base;       /* SRAM byte address of event log header */
4053         u32 capacity;   /* event log capacity in # entries */
4054         u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
4055         u32 num_wraps;  /* # times uCode wrapped to top of log */
4056         u32 next_entry; /* index of next entry to be written by uCode */
4057         u32 size;       /* # entries that we'll print */
4058
4059         base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4060         if (!iwl3945_hw_valid_rtc_data_addr(base)) {
4061                 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4062                 return;
4063         }
4064
4065         rc = iwl_grab_nic_access(priv);
4066         if (rc) {
4067                 IWL_WARNING("Can not read from adapter at this time.\n");
4068                 return;
4069         }
4070
4071         /* event log header */
4072         capacity = iwl_read_targ_mem(priv, base);
4073         mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
4074         num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
4075         next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
4076
4077         size = num_wraps ? capacity : next_entry;
4078
4079         /* bail out if nothing in log */
4080         if (size == 0) {
4081                 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
4082                 iwl_release_nic_access(priv);
4083                 return;
4084         }
4085
4086         IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
4087                   size, num_wraps);
4088
4089         /* if uCode has wrapped back to top of log, start at the oldest entry,
4090          * i.e the next one that uCode would fill. */
4091         if (num_wraps)
4092                 iwl3945_print_event_log(priv, next_entry,
4093                                     capacity - next_entry, mode);
4094
4095         /* (then/else) start at top of log */
4096         iwl3945_print_event_log(priv, 0, next_entry, mode);
4097
4098         iwl_release_nic_access(priv);
4099 }
4100
4101 /**
4102  * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
4103  */
4104 static void iwl3945_irq_handle_error(struct iwl_priv *priv)
4105 {
4106         /* Set the FW error flag -- cleared on iwl3945_down */
4107         set_bit(STATUS_FW_ERROR, &priv->status);
4108
4109         /* Cancel currently queued command. */
4110         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4111
4112 #ifdef CONFIG_IWL3945_DEBUG
4113         if (priv->debug_level & IWL_DL_FW_ERRORS) {
4114                 iwl3945_dump_nic_error_log(priv);
4115                 iwl3945_dump_nic_event_log(priv);
4116                 iwl3945_print_rx_config_cmd(priv, &priv->staging39_rxon);
4117         }
4118 #endif
4119
4120         wake_up_interruptible(&priv->wait_command_queue);
4121
4122         /* Keep the restart process from trying to send host
4123          * commands by clearing the INIT status bit */
4124         clear_bit(STATUS_READY, &priv->status);
4125
4126         if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4127                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4128                           "Restarting adapter due to uCode error.\n");
4129
4130                 if (iwl3945_is_associated(priv)) {
4131                         memcpy(&priv->recovery39_rxon, &priv->active39_rxon,
4132                                sizeof(priv->recovery39_rxon));
4133                         priv->error_recovering = 1;
4134                 }
4135                 queue_work(priv->workqueue, &priv->restart);
4136         }
4137 }
4138
4139 static void iwl3945_error_recovery(struct iwl_priv *priv)
4140 {
4141         unsigned long flags;
4142
4143         memcpy(&priv->staging39_rxon, &priv->recovery39_rxon,
4144                sizeof(priv->staging39_rxon));
4145         priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4146         iwl3945_commit_rxon(priv);
4147
4148         iwl3945_add_station(priv, priv->bssid, 1, 0);
4149
4150         spin_lock_irqsave(&priv->lock, flags);
4151         priv->assoc_id = le16_to_cpu(priv->staging39_rxon.assoc_id);
4152         priv->error_recovering = 0;
4153         spin_unlock_irqrestore(&priv->lock, flags);
4154 }
4155
4156 static void iwl3945_irq_tasklet(struct iwl_priv *priv)
4157 {
4158         u32 inta, handled = 0;
4159         u32 inta_fh;
4160         unsigned long flags;
4161 #ifdef CONFIG_IWL3945_DEBUG
4162         u32 inta_mask;
4163 #endif
4164
4165         spin_lock_irqsave(&priv->lock, flags);
4166
4167         /* Ack/clear/reset pending uCode interrupts.
4168          * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4169          *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
4170         inta = iwl_read32(priv, CSR_INT);
4171         iwl_write32(priv, CSR_INT, inta);
4172
4173         /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4174          * Any new interrupts that happen after this, either while we're
4175          * in this tasklet, or later, will show up in next ISR/tasklet. */
4176         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4177         iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4178
4179 #ifdef CONFIG_IWL3945_DEBUG
4180         if (priv->debug_level & IWL_DL_ISR) {
4181                 /* just for debug */
4182                 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4183                 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4184                               inta, inta_mask, inta_fh);
4185         }
4186 #endif
4187
4188         /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4189          * atomic, make sure that inta covers all the interrupts that
4190          * we've discovered, even if FH interrupt came in just after
4191          * reading CSR_INT. */
4192         if (inta_fh & CSR39_FH_INT_RX_MASK)
4193                 inta |= CSR_INT_BIT_FH_RX;
4194         if (inta_fh & CSR39_FH_INT_TX_MASK)
4195                 inta |= CSR_INT_BIT_FH_TX;
4196
4197         /* Now service all interrupt bits discovered above. */
4198         if (inta & CSR_INT_BIT_HW_ERR) {
4199                 IWL_ERROR("Microcode HW error detected.  Restarting.\n");
4200
4201                 /* Tell the device to stop sending interrupts */
4202                 iwl3945_disable_interrupts(priv);
4203
4204                 iwl3945_irq_handle_error(priv);
4205
4206                 handled |= CSR_INT_BIT_HW_ERR;
4207
4208                 spin_unlock_irqrestore(&priv->lock, flags);
4209
4210                 return;
4211         }
4212
4213 #ifdef CONFIG_IWL3945_DEBUG
4214         if (priv->debug_level & (IWL_DL_ISR)) {
4215                 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4216                 if (inta & CSR_INT_BIT_SCD)
4217                         IWL_DEBUG_ISR("Scheduler finished to transmit "
4218                                       "the frame/frames.\n");
4219
4220                 /* Alive notification via Rx interrupt will do the real work */
4221                 if (inta & CSR_INT_BIT_ALIVE)
4222                         IWL_DEBUG_ISR("Alive interrupt\n");
4223         }
4224 #endif
4225         /* Safely ignore these bits for debug checks below */
4226         inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4227
4228         /* Error detected by uCode */
4229         if (inta & CSR_INT_BIT_SW_ERR) {
4230                 IWL_ERROR("Microcode SW error detected.  Restarting 0x%X.\n",
4231                           inta);
4232                 iwl3945_irq_handle_error(priv);
4233                 handled |= CSR_INT_BIT_SW_ERR;
4234         }
4235
4236         /* uCode wakes up after power-down sleep */
4237         if (inta & CSR_INT_BIT_WAKEUP) {
4238                 IWL_DEBUG_ISR("Wakeup interrupt\n");
4239                 iwl3945_rx_queue_update_write_ptr(priv, &priv->rxq);
4240                 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq39[0]);
4241                 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq39[1]);
4242                 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq39[2]);
4243                 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq39[3]);
4244                 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq39[4]);
4245                 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq39[5]);
4246
4247                 handled |= CSR_INT_BIT_WAKEUP;
4248         }
4249
4250         /* All uCode command responses, including Tx command responses,
4251          * Rx "responses" (frame-received notification), and other
4252          * notifications from uCode come through here*/
4253         if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4254                 iwl3945_rx_handle(priv);
4255                 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4256         }
4257
4258         if (inta & CSR_INT_BIT_FH_TX) {
4259                 IWL_DEBUG_ISR("Tx interrupt\n");
4260
4261                 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
4262                 if (!iwl_grab_nic_access(priv)) {
4263                         iwl_write_direct32(priv, FH39_TCSR_CREDIT
4264                                              (FH39_SRVC_CHNL), 0x0);
4265                         iwl_release_nic_access(priv);
4266                 }
4267                 handled |= CSR_INT_BIT_FH_TX;
4268         }
4269
4270         if (inta & ~handled)
4271                 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4272
4273         if (inta & ~CSR_INI_SET_MASK) {
4274                 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
4275                          inta & ~CSR_INI_SET_MASK);
4276                 IWL_WARNING("   with FH_INT = 0x%08x\n", inta_fh);
4277         }
4278
4279         /* Re-enable all interrupts */
4280         /* only Re-enable if disabled by irq */
4281         if (test_bit(STATUS_INT_ENABLED, &priv->status))
4282                 iwl3945_enable_interrupts(priv);
4283
4284 #ifdef CONFIG_IWL3945_DEBUG
4285         if (priv->debug_level & (IWL_DL_ISR)) {
4286                 inta = iwl_read32(priv, CSR_INT);
4287                 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4288                 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4289                 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4290                         "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4291         }
4292 #endif
4293         spin_unlock_irqrestore(&priv->lock, flags);
4294 }
4295
4296 static irqreturn_t iwl3945_isr(int irq, void *data)
4297 {
4298         struct iwl_priv *priv = data;
4299         u32 inta, inta_mask;
4300         u32 inta_fh;
4301         if (!priv)
4302                 return IRQ_NONE;
4303
4304         spin_lock(&priv->lock);
4305
4306         /* Disable (but don't clear!) interrupts here to avoid
4307          *    back-to-back ISRs and sporadic interrupts from our NIC.
4308          * If we have something to service, the tasklet will re-enable ints.
4309          * If we *don't* have something, we'll re-enable before leaving here. */
4310         inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
4311         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4312
4313         /* Discover which interrupts are active/pending */
4314         inta = iwl_read32(priv, CSR_INT);
4315         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4316
4317         /* Ignore interrupt if there's nothing in NIC to service.
4318          * This may be due to IRQ shared with another device,
4319          * or due to sporadic interrupts thrown from our NIC. */
4320         if (!inta && !inta_fh) {
4321                 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
4322                 goto none;
4323         }
4324
4325         if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4326                 /* Hardware disappeared */
4327                 IWL_WARNING("HARDWARE GONE?? INTA == 0x%08x\n", inta);
4328                 goto unplugged;
4329         }
4330
4331         IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4332                       inta, inta_mask, inta_fh);
4333
4334         inta &= ~CSR_INT_BIT_SCD;
4335
4336         /* iwl3945_irq_tasklet() will service interrupts and re-enable them */
4337         if (likely(inta || inta_fh))
4338                 tasklet_schedule(&priv->irq_tasklet);
4339 unplugged:
4340         spin_unlock(&priv->lock);
4341
4342         return IRQ_HANDLED;
4343
4344  none:
4345         /* re-enable interrupts here since we don't have anything to service. */
4346         /* only Re-enable if disabled by irq */
4347         if (test_bit(STATUS_INT_ENABLED, &priv->status))
4348                 iwl3945_enable_interrupts(priv);
4349         spin_unlock(&priv->lock);
4350         return IRQ_NONE;
4351 }
4352
4353 /************************** EEPROM BANDS ****************************
4354  *
4355  * The iwl3945_eeprom_band definitions below provide the mapping from the
4356  * EEPROM contents to the specific channel number supported for each
4357  * band.
4358  *
4359  * For example, iwl3945_priv->eeprom39.band_3_channels[4] from the band_3
4360  * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4361  * The specific geography and calibration information for that channel
4362  * is contained in the eeprom map itself.
4363  *
4364  * During init, we copy the eeprom information and channel map
4365  * information into priv->channel_info_24/52 and priv->channel_map_24/52
4366  *
4367  * channel_map_24/52 provides the index in the channel_info array for a
4368  * given channel.  We have to have two separate maps as there is channel
4369  * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4370  * band_2
4371  *
4372  * A value of 0xff stored in the channel_map indicates that the channel
4373  * is not supported by the hardware at all.
4374  *
4375  * A value of 0xfe in the channel_map indicates that the channel is not
4376  * valid for Tx with the current hardware.  This means that
4377  * while the system can tune and receive on a given channel, it may not
4378  * be able to associate or transmit any frames on that
4379  * channel.  There is no corresponding channel information for that
4380  * entry.
4381  *
4382  *********************************************************************/
4383
4384 /* 2.4 GHz */
4385 static const u8 iwl3945_eeprom_band_1[14] = {
4386         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4387 };
4388
4389 /* 5.2 GHz bands */
4390 static const u8 iwl3945_eeprom_band_2[] = {     /* 4915-5080MHz */
4391         183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4392 };
4393
4394 static const u8 iwl3945_eeprom_band_3[] = {     /* 5170-5320MHz */
4395         34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4396 };
4397
4398 static const u8 iwl3945_eeprom_band_4[] = {     /* 5500-5700MHz */
4399         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4400 };
4401
4402 static const u8 iwl3945_eeprom_band_5[] = {     /* 5725-5825MHz */
4403         145, 149, 153, 157, 161, 165
4404 };
4405
4406 static void iwl3945_init_band_reference(const struct iwl_priv *priv, int band,
4407                                     int *eeprom_ch_count,
4408                                     const struct iwl_eeprom_channel
4409                                     **eeprom_ch_info,
4410                                     const u8 **eeprom_ch_index)
4411 {
4412         switch (band) {
4413         case 1:         /* 2.4GHz band */
4414                 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
4415                 *eeprom_ch_info = priv->eeprom39.band_1_channels;
4416                 *eeprom_ch_index = iwl3945_eeprom_band_1;
4417                 break;
4418         case 2:         /* 4.9GHz band */
4419                 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
4420                 *eeprom_ch_info = priv->eeprom39.band_2_channels;
4421                 *eeprom_ch_index = iwl3945_eeprom_band_2;
4422                 break;
4423         case 3:         /* 5.2GHz band */
4424                 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
4425                 *eeprom_ch_info = priv->eeprom39.band_3_channels;
4426                 *eeprom_ch_index = iwl3945_eeprom_band_3;
4427                 break;
4428         case 4:         /* 5.5GHz band */
4429                 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
4430                 *eeprom_ch_info = priv->eeprom39.band_4_channels;
4431                 *eeprom_ch_index = iwl3945_eeprom_band_4;
4432                 break;
4433         case 5:         /* 5.7GHz band */
4434                 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
4435                 *eeprom_ch_info = priv->eeprom39.band_5_channels;
4436                 *eeprom_ch_index = iwl3945_eeprom_band_5;
4437                 break;
4438         default:
4439                 BUG();
4440                 return;
4441         }
4442 }
4443
4444 /**
4445  * iwl3945_get_channel_info - Find driver's private channel info
4446  *
4447  * Based on band and channel number.
4448  */
4449 const struct iwl_channel_info *
4450 iwl3945_get_channel_info(const struct iwl_priv *priv,
4451                          enum ieee80211_band band, u16 channel)
4452 {
4453         int i;
4454
4455         switch (band) {
4456         case IEEE80211_BAND_5GHZ:
4457                 for (i = 14; i < priv->channel_count; i++) {
4458                         if (priv->channel_info[i].channel == channel)
4459                                 return &priv->channel_info[i];
4460                 }
4461                 break;
4462
4463         case IEEE80211_BAND_2GHZ:
4464                 if (channel >= 1 && channel <= 14)
4465                         return &priv->channel_info[channel - 1];
4466                 break;
4467         case IEEE80211_NUM_BANDS:
4468                 WARN_ON(1);
4469         }
4470
4471         return NULL;
4472 }
4473
4474 #define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4475                             ? # x " " : "")
4476
4477 /**
4478  * iwl3945_init_channel_map - Set up driver's info for all possible channels
4479  */
4480 static int iwl3945_init_channel_map(struct iwl_priv *priv)
4481 {
4482         int eeprom_ch_count = 0;
4483         const u8 *eeprom_ch_index = NULL;
4484         const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
4485         int band, ch;
4486         struct iwl_channel_info *ch_info;
4487
4488         if (priv->channel_count) {
4489                 IWL_DEBUG_INFO("Channel map already initialized.\n");
4490                 return 0;
4491         }
4492
4493         if (priv->eeprom39.version < 0x2f) {
4494                 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
4495                             priv->eeprom39.version);
4496                 return -EINVAL;
4497         }
4498
4499         IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
4500
4501         priv->channel_count =
4502             ARRAY_SIZE(iwl3945_eeprom_band_1) +
4503             ARRAY_SIZE(iwl3945_eeprom_band_2) +
4504             ARRAY_SIZE(iwl3945_eeprom_band_3) +
4505             ARRAY_SIZE(iwl3945_eeprom_band_4) +
4506             ARRAY_SIZE(iwl3945_eeprom_band_5);
4507
4508         IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
4509
4510         priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
4511                                      priv->channel_count, GFP_KERNEL);
4512         if (!priv->channel_info) {
4513                 IWL_ERROR("Could not allocate channel_info\n");
4514                 priv->channel_count = 0;
4515                 return -ENOMEM;
4516         }
4517
4518         ch_info = priv->channel_info;
4519
4520         /* Loop through the 5 EEPROM bands adding them in order to the
4521          * channel map we maintain (that contains additional information than
4522          * what just in the EEPROM) */
4523         for (band = 1; band <= 5; band++) {
4524
4525                 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
4526                                         &eeprom_ch_info, &eeprom_ch_index);
4527
4528                 /* Loop through each band adding each of the channels */
4529                 for (ch = 0; ch < eeprom_ch_count; ch++) {
4530                         ch_info->channel = eeprom_ch_index[ch];
4531                         ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
4532                             IEEE80211_BAND_5GHZ;
4533
4534                         /* permanently store EEPROM's channel regulatory flags
4535                          *   and max power in channel info database. */
4536                         ch_info->eeprom = eeprom_ch_info[ch];
4537
4538                         /* Copy the run-time flags so they are there even on
4539                          * invalid channels */
4540                         ch_info->flags = eeprom_ch_info[ch].flags;
4541
4542                         if (!(is_channel_valid(ch_info))) {
4543                                 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
4544                                                "No traffic\n",
4545                                                ch_info->channel,
4546                                                ch_info->flags,
4547                                                is_channel_a_band(ch_info) ?
4548                                                "5.2" : "2.4");
4549                                 ch_info++;
4550                                 continue;
4551                         }
4552
4553                         /* Initialize regulatory-based run-time data */
4554                         ch_info->max_power_avg = ch_info->curr_txpow =
4555                             eeprom_ch_info[ch].max_power_avg;
4556                         ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4557                         ch_info->min_power = 0;
4558
4559                         IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
4560                                        " %ddBm): Ad-Hoc %ssupported\n",
4561                                        ch_info->channel,
4562                                        is_channel_a_band(ch_info) ?
4563                                        "5.2" : "2.4",
4564                                        CHECK_AND_PRINT(VALID),
4565                                        CHECK_AND_PRINT(IBSS),
4566                                        CHECK_AND_PRINT(ACTIVE),
4567                                        CHECK_AND_PRINT(RADAR),
4568                                        CHECK_AND_PRINT(WIDE),
4569                                        CHECK_AND_PRINT(DFS),
4570                                        eeprom_ch_info[ch].flags,
4571                                        eeprom_ch_info[ch].max_power_avg,
4572                                        ((eeprom_ch_info[ch].
4573                                          flags & EEPROM_CHANNEL_IBSS)
4574                                         && !(eeprom_ch_info[ch].
4575                                              flags & EEPROM_CHANNEL_RADAR))
4576                                        ? "" : "not ");
4577
4578                         /* Set the user_txpower_limit to the highest power
4579                          * supported by any channel */
4580                         if (eeprom_ch_info[ch].max_power_avg >
4581                             priv->user_txpower_limit)
4582                                 priv->user_txpower_limit =
4583                                     eeprom_ch_info[ch].max_power_avg;
4584
4585                         ch_info++;
4586                 }
4587         }
4588
4589         /* Set up txpower settings in driver for all channels */
4590         if (iwl3945_txpower_set_from_eeprom(priv))
4591                 return -EIO;
4592
4593         return 0;
4594 }
4595
4596 /*
4597  * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
4598  */
4599 static void iwl3945_free_channel_map(struct iwl_priv *priv)
4600 {
4601         kfree(priv->channel_info);
4602         priv->channel_count = 0;
4603 }
4604
4605 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4606  * sending probe req.  This should be set long enough to hear probe responses
4607  * from more than one AP.  */
4608 #define IWL_ACTIVE_DWELL_TIME_24    (30)        /* all times in msec */
4609 #define IWL_ACTIVE_DWELL_TIME_52    (20)
4610
4611 #define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
4612 #define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
4613
4614 /* For faster active scanning, scan will move to the next channel if fewer than
4615  * PLCP_QUIET_THRESH packets are heard on this channel within
4616  * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
4617  * time if it's a quiet channel (nothing responded to our probe, and there's
4618  * no other traffic).
4619  * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4620 #define IWL_PLCP_QUIET_THRESH       __constant_cpu_to_le16(1)   /* packets */
4621 #define IWL_ACTIVE_QUIET_TIME       __constant_cpu_to_le16(10)  /* msec */
4622
4623 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4624  * Must be set longer than active dwell time.
4625  * For the most reliable scan, set > AP beacon interval (typically 100msec). */
4626 #define IWL_PASSIVE_DWELL_TIME_24   (20)        /* all times in msec */
4627 #define IWL_PASSIVE_DWELL_TIME_52   (10)
4628 #define IWL_PASSIVE_DWELL_BASE      (100)
4629 #define IWL_CHANNEL_TUNE_TIME       5
4630
4631 #define IWL_SCAN_PROBE_MASK(n)   (BIT(n) | (BIT(n) - BIT(1)))
4632
4633 static inline u16 iwl3945_get_active_dwell_time(struct iwl_priv *priv,
4634                                                 enum ieee80211_band band,
4635                                                 u8 n_probes)
4636 {
4637         if (band == IEEE80211_BAND_5GHZ)
4638                 return IWL_ACTIVE_DWELL_TIME_52 +
4639                         IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
4640         else
4641                 return IWL_ACTIVE_DWELL_TIME_24 +
4642                         IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
4643 }
4644
4645 static u16 iwl3945_get_passive_dwell_time(struct iwl_priv *priv,
4646                                           enum ieee80211_band band)
4647 {
4648         u16 passive = (band == IEEE80211_BAND_2GHZ) ?
4649             IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4650             IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4651
4652         if (iwl3945_is_associated(priv)) {
4653                 /* If we're associated, we clamp the maximum passive
4654                  * dwell time to be 98% of the beacon interval (minus
4655                  * 2 * channel tune time) */
4656                 passive = priv->beacon_int;
4657                 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4658                         passive = IWL_PASSIVE_DWELL_BASE;
4659                 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4660         }
4661
4662         return passive;
4663 }
4664
4665 static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
4666                                          enum ieee80211_band band,
4667                                      u8 is_active, u8 n_probes,
4668                                      struct iwl3945_scan_channel *scan_ch)
4669 {
4670         const struct ieee80211_channel *channels = NULL;
4671         const struct ieee80211_supported_band *sband;
4672         const struct iwl_channel_info *ch_info;
4673         u16 passive_dwell = 0;
4674         u16 active_dwell = 0;
4675         int added, i;
4676
4677         sband = iwl3945_get_band(priv, band);
4678         if (!sband)
4679                 return 0;
4680
4681         channels = sband->channels;
4682
4683         active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
4684         passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
4685
4686         if (passive_dwell <= active_dwell)
4687                 passive_dwell = active_dwell + 1;
4688
4689         for (i = 0, added = 0; i < sband->n_channels; i++) {
4690                 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4691                         continue;
4692
4693                 scan_ch->channel = channels[i].hw_value;
4694
4695                 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
4696                 if (!is_channel_valid(ch_info)) {
4697                         IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
4698                                        scan_ch->channel);
4699                         continue;
4700                 }
4701
4702                 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4703                 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4704                 /* If passive , set up for auto-switch
4705                  *  and use long active_dwell time.
4706                  */
4707                 if (!is_active || is_channel_passive(ch_info) ||
4708                     (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
4709                         scan_ch->type = 0;      /* passive */
4710                         if (IWL_UCODE_API(priv->ucode_ver) == 1)
4711                                 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
4712                 } else {
4713                         scan_ch->type = 1;      /* active */
4714                 }
4715
4716                 /* Set direct probe bits. These may be used both for active
4717                  * scan channels (probes gets sent right away),
4718                  * or for passive channels (probes get se sent only after
4719                  * hearing clear Rx packet).*/
4720                 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
4721                         if (n_probes)
4722                                 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4723                 } else {
4724                         /* uCode v1 does not allow setting direct probe bits on
4725                          * passive channel. */
4726                         if ((scan_ch->type & 1) && n_probes)
4727                                 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4728                 }
4729
4730                 /* Set txpower levels to defaults */
4731                 scan_ch->tpc.dsp_atten = 110;
4732                 /* scan_pwr_info->tpc.dsp_atten; */
4733
4734                 /*scan_pwr_info->tpc.tx_gain; */
4735                 if (band == IEEE80211_BAND_5GHZ)
4736                         scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
4737                 else {
4738                         scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
4739                         /* NOTE: if we were doing 6Mb OFDM for scans we'd use
4740                          * power level:
4741                          * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
4742                          */
4743                 }
4744
4745                 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
4746                                scan_ch->channel,
4747                                (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4748                                (scan_ch->type & 1) ?
4749                                active_dwell : passive_dwell);
4750
4751                 scan_ch++;
4752                 added++;
4753         }
4754
4755         IWL_DEBUG_SCAN("total channels to scan %d \n", added);
4756         return added;
4757 }
4758
4759 static void iwl3945_init_hw_rates(struct iwl_priv *priv,
4760                               struct ieee80211_rate *rates)
4761 {
4762         int i;
4763
4764         for (i = 0; i < IWL_RATE_COUNT; i++) {
4765                 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
4766                 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4767                 rates[i].hw_value_short = i;
4768                 rates[i].flags = 0;
4769                 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
4770                         /*
4771                          * If CCK != 1M then set short preamble rate flag.
4772                          */
4773                         rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
4774                                 0 : IEEE80211_RATE_SHORT_PREAMBLE;
4775                 }
4776         }
4777 }
4778
4779 /**
4780  * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
4781  */
4782 static int iwl3945_init_geos(struct iwl_priv *priv)
4783 {
4784         struct iwl_channel_info *ch;
4785         struct ieee80211_supported_band *sband;
4786         struct ieee80211_channel *channels;
4787         struct ieee80211_channel *geo_ch;
4788         struct ieee80211_rate *rates;
4789         int i = 0;
4790
4791         if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4792             priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
4793                 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4794                 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4795                 return 0;
4796         }
4797
4798         channels = kzalloc(sizeof(struct ieee80211_channel) *
4799                            priv->channel_count, GFP_KERNEL);
4800         if (!channels)
4801                 return -ENOMEM;
4802
4803         rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
4804                         GFP_KERNEL);
4805         if (!rates) {
4806                 kfree(channels);
4807                 return -ENOMEM;
4808         }
4809
4810         /* 5.2GHz channels start after the 2.4GHz channels */
4811         sband = &priv->bands[IEEE80211_BAND_5GHZ];
4812         sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
4813         /* just OFDM */
4814         sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4815         sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4816
4817         sband = &priv->bands[IEEE80211_BAND_2GHZ];
4818         sband->channels = channels;
4819         /* OFDM & CCK */
4820         sband->bitrates = rates;
4821         sband->n_bitrates = IWL_RATE_COUNT;
4822
4823         priv->ieee_channels = channels;
4824         priv->ieee_rates = rates;
4825
4826         iwl3945_init_hw_rates(priv, rates);
4827
4828         for (i = 0;  i < priv->channel_count; i++) {
4829                 ch = &priv->channel_info[i];
4830
4831                 /* FIXME: might be removed if scan is OK*/
4832                 if (!is_channel_valid(ch))
4833                         continue;
4834
4835                 if (is_channel_a_band(ch))
4836                         sband =  &priv->bands[IEEE80211_BAND_5GHZ];
4837                 else
4838                         sband =  &priv->bands[IEEE80211_BAND_2GHZ];
4839
4840                 geo_ch = &sband->channels[sband->n_channels++];
4841
4842                 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
4843                 geo_ch->max_power = ch->max_power_avg;
4844                 geo_ch->max_antenna_gain = 0xff;
4845                 geo_ch->hw_value = ch->channel;
4846
4847                 if (is_channel_valid(ch)) {
4848                         if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4849                                 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
4850
4851                         if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4852                                 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
4853
4854                         if (ch->flags & EEPROM_CHANNEL_RADAR)
4855                                 geo_ch->flags |= IEEE80211_CHAN_RADAR;
4856
4857                         if (ch->max_power_avg > priv->max_channel_txpower_limit)
4858                                 priv->max_channel_txpower_limit =
4859                                     ch->max_power_avg;
4860                 } else {
4861                         geo_ch->flags |= IEEE80211_CHAN_DISABLED;
4862                 }
4863
4864                 /* Save flags for reg domain usage */
4865                 geo_ch->orig_flags = geo_ch->flags;
4866
4867                 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4868                                 ch->channel, geo_ch->center_freq,
4869                                 is_channel_a_band(ch) ?  "5.2" : "2.4",
4870                                 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4871                                 "restricted" : "valid",
4872                                  geo_ch->flags);
4873         }
4874
4875         if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4876              priv->cfg->sku & IWL_SKU_A) {
4877                 printk(KERN_INFO DRV_NAME
4878                        ": Incorrectly detected BG card as ABG.  Please send "
4879                        "your PCI ID 0x%04X:0x%04X to maintainer.\n",
4880                        priv->pci_dev->device, priv->pci_dev->subsystem_device);
4881                  priv->cfg->sku &= ~IWL_SKU_A;
4882         }
4883
4884         printk(KERN_INFO DRV_NAME
4885                ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
4886                priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4887                priv->bands[IEEE80211_BAND_5GHZ].n_channels);
4888
4889         if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4890                 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4891                         &priv->bands[IEEE80211_BAND_2GHZ];
4892         if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4893                 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4894                         &priv->bands[IEEE80211_BAND_5GHZ];
4895
4896         set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4897
4898         return 0;
4899 }
4900
4901 /*
4902  * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4903  */
4904 static void iwl3945_free_geos(struct iwl_priv *priv)
4905 {
4906         kfree(priv->ieee_channels);
4907         kfree(priv->ieee_rates);
4908         clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4909 }
4910
4911 /******************************************************************************
4912  *
4913  * uCode download functions
4914  *
4915  ******************************************************************************/
4916
4917 static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
4918 {
4919         iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4920         iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
4921         iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4922         iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
4923         iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4924         iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
4925 }
4926
4927 /**
4928  * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
4929  *     looking at all data.
4930  */
4931 static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
4932 {
4933         u32 val;
4934         u32 save_len = len;
4935         int rc = 0;
4936         u32 errcnt;
4937
4938         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4939
4940         rc = iwl_grab_nic_access(priv);
4941         if (rc)
4942                 return rc;
4943
4944         iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4945                                IWL39_RTC_INST_LOWER_BOUND);
4946
4947         errcnt = 0;
4948         for (; len > 0; len -= sizeof(u32), image++) {
4949                 /* read data comes through single port, auto-incr addr */
4950                 /* NOTE: Use the debugless read so we don't flood kernel log
4951                  * if IWL_DL_IO is set */
4952                 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4953                 if (val != le32_to_cpu(*image)) {
4954                         IWL_ERROR("uCode INST section is invalid at "
4955                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
4956                                   save_len - len, val, le32_to_cpu(*image));
4957                         rc = -EIO;
4958                         errcnt++;
4959                         if (errcnt >= 20)
4960                                 break;
4961                 }
4962         }
4963
4964         iwl_release_nic_access(priv);
4965
4966         if (!errcnt)
4967                 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n");
4968
4969         return rc;
4970 }
4971
4972
4973 /**
4974  * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
4975  *   using sample data 100 bytes apart.  If these sample points are good,
4976  *   it's a pretty good bet that everything between them is good, too.
4977  */
4978 static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
4979 {
4980         u32 val;
4981         int rc = 0;
4982         u32 errcnt = 0;
4983         u32 i;
4984
4985         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4986
4987         rc = iwl_grab_nic_access(priv);
4988         if (rc)
4989                 return rc;
4990
4991         for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4992                 /* read data comes through single port, auto-incr addr */
4993                 /* NOTE: Use the debugless read so we don't flood kernel log
4994                  * if IWL_DL_IO is set */
4995                 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4996                         i + IWL39_RTC_INST_LOWER_BOUND);
4997                 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4998                 if (val != le32_to_cpu(*image)) {
4999 #if 0 /* Enable this if you want to see details */
5000                         IWL_ERROR("uCode INST section is invalid at "
5001                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
5002                                   i, val, *image);
5003 #endif
5004                         rc = -EIO;
5005                         errcnt++;
5006                         if (errcnt >= 3)
5007                                 break;
5008                 }
5009         }
5010
5011         iwl_release_nic_access(priv);
5012
5013         return rc;
5014 }
5015
5016
5017 /**
5018  * iwl3945_verify_ucode - determine which instruction image is in SRAM,
5019  *    and verify its contents
5020  */
5021 static int iwl3945_verify_ucode(struct iwl_priv *priv)
5022 {
5023         __le32 *image;
5024         u32 len;
5025         int rc = 0;
5026
5027         /* Try bootstrap */
5028         image = (__le32 *)priv->ucode_boot.v_addr;
5029         len = priv->ucode_boot.len;
5030         rc = iwl3945_verify_inst_sparse(priv, image, len);
5031         if (rc == 0) {
5032                 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5033                 return 0;
5034         }
5035
5036         /* Try initialize */
5037         image = (__le32 *)priv->ucode_init.v_addr;
5038         len = priv->ucode_init.len;
5039         rc = iwl3945_verify_inst_sparse(priv, image, len);
5040         if (rc == 0) {
5041                 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5042                 return 0;
5043         }
5044
5045         /* Try runtime/protocol */
5046         image = (__le32 *)priv->ucode_code.v_addr;
5047         len = priv->ucode_code.len;
5048         rc = iwl3945_verify_inst_sparse(priv, image, len);
5049         if (rc == 0) {
5050                 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5051                 return 0;
5052         }
5053
5054         IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5055
5056         /* Since nothing seems to match, show first several data entries in
5057          * instruction SRAM, so maybe visual inspection will give a clue.
5058          * Selection of bootstrap image (vs. other images) is arbitrary. */
5059         image = (__le32 *)priv->ucode_boot.v_addr;
5060         len = priv->ucode_boot.len;
5061         rc = iwl3945_verify_inst_full(priv, image, len);
5062
5063         return rc;
5064 }
5065
5066
5067 /* check contents of special bootstrap uCode SRAM */
5068 static int iwl3945_verify_bsm(struct iwl_priv *priv)
5069 {
5070         __le32 *image = priv->ucode_boot.v_addr;
5071         u32 len = priv->ucode_boot.len;
5072         u32 reg;
5073         u32 val;
5074
5075         IWL_DEBUG_INFO("Begin verify bsm\n");
5076
5077         /* verify BSM SRAM contents */
5078         val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
5079         for (reg = BSM_SRAM_LOWER_BOUND;
5080              reg < BSM_SRAM_LOWER_BOUND + len;
5081              reg += sizeof(u32), image++) {
5082                 val = iwl_read_prph(priv, reg);
5083                 if (val != le32_to_cpu(*image)) {
5084                         IWL_ERROR("BSM uCode verification failed at "
5085                                   "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
5086                                   BSM_SRAM_LOWER_BOUND,
5087                                   reg - BSM_SRAM_LOWER_BOUND, len,
5088                                   val, le32_to_cpu(*image));
5089                         return -EIO;
5090                 }
5091         }
5092
5093         IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
5094
5095         return 0;
5096 }
5097
5098 /**
5099  * iwl3945_load_bsm - Load bootstrap instructions
5100  *
5101  * BSM operation:
5102  *
5103  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
5104  * in special SRAM that does not power down during RFKILL.  When powering back
5105  * up after power-saving sleeps (or during initial uCode load), the BSM loads
5106  * the bootstrap program into the on-board processor, and starts it.
5107  *
5108  * The bootstrap program loads (via DMA) instructions and data for a new
5109  * program from host DRAM locations indicated by the host driver in the
5110  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
5111  * automatically.
5112  *
5113  * When initializing the NIC, the host driver points the BSM to the
5114  * "initialize" uCode image.  This uCode sets up some internal data, then
5115  * notifies host via "initialize alive" that it is complete.
5116  *
5117  * The host then replaces the BSM_DRAM_* pointer values to point to the
5118  * normal runtime uCode instructions and a backup uCode data cache buffer
5119  * (filled initially with starting data values for the on-board processor),
5120  * then triggers the "initialize" uCode to load and launch the runtime uCode,
5121  * which begins normal operation.
5122  *
5123  * When doing a power-save shutdown, runtime uCode saves data SRAM into
5124  * the backup data cache in DRAM before SRAM is powered down.
5125  *
5126  * When powering back up, the BSM loads the bootstrap program.  This reloads
5127  * the runtime uCode instructions and the backup data cache into SRAM,
5128  * and re-launches the runtime uCode from where it left off.
5129  */
5130 static int iwl3945_load_bsm(struct iwl_priv *priv)
5131 {
5132         __le32 *image = priv->ucode_boot.v_addr;
5133         u32 len = priv->ucode_boot.len;
5134         dma_addr_t pinst;
5135         dma_addr_t pdata;
5136         u32 inst_len;
5137         u32 data_len;
5138         int rc;
5139         int i;
5140         u32 done;
5141         u32 reg_offset;
5142
5143         IWL_DEBUG_INFO("Begin load bsm\n");
5144
5145         /* make sure bootstrap program is no larger than BSM's SRAM size */
5146         if (len > IWL39_MAX_BSM_SIZE)
5147                 return -EINVAL;
5148
5149         /* Tell bootstrap uCode where to find the "Initialize" uCode
5150          *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
5151          * NOTE:  iwl3945_initialize_alive_start() will replace these values,
5152          *        after the "initialize" uCode has run, to point to
5153          *        runtime/protocol instructions and backup data cache. */
5154         pinst = priv->ucode_init.p_addr;
5155         pdata = priv->ucode_init_data.p_addr;
5156         inst_len = priv->ucode_init.len;
5157         data_len = priv->ucode_init_data.len;
5158
5159         rc = iwl_grab_nic_access(priv);
5160         if (rc)
5161                 return rc;
5162
5163         iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5164         iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5165         iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
5166         iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
5167
5168         /* Fill BSM memory with bootstrap instructions */
5169         for (reg_offset = BSM_SRAM_LOWER_BOUND;
5170              reg_offset < BSM_SRAM_LOWER_BOUND + len;
5171              reg_offset += sizeof(u32), image++)
5172                 _iwl_write_prph(priv, reg_offset,
5173                                           le32_to_cpu(*image));
5174
5175         rc = iwl3945_verify_bsm(priv);
5176         if (rc) {
5177                 iwl_release_nic_access(priv);
5178                 return rc;
5179         }
5180
5181         /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
5182         iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
5183         iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
5184                                  IWL39_RTC_INST_LOWER_BOUND);
5185         iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
5186
5187         /* Load bootstrap code into instruction SRAM now,
5188          *   to prepare to load "initialize" uCode */
5189         iwl_write_prph(priv, BSM_WR_CTRL_REG,
5190                 BSM_WR_CTRL_REG_BIT_START);
5191
5192         /* Wait for load of bootstrap uCode to finish */
5193         for (i = 0; i < 100; i++) {
5194                 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
5195                 if (!(done & BSM_WR_CTRL_REG_BIT_START))
5196                         break;
5197                 udelay(10);
5198         }
5199         if (i < 100)
5200                 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
5201         else {
5202                 IWL_ERROR("BSM write did not complete!\n");
5203                 return -EIO;
5204         }
5205
5206         /* Enable future boot loads whenever power management unit triggers it
5207          *   (e.g. when powering back up after power-save shutdown) */
5208         iwl_write_prph(priv, BSM_WR_CTRL_REG,
5209                 BSM_WR_CTRL_REG_BIT_START_EN);
5210
5211         iwl_release_nic_access(priv);
5212
5213         return 0;
5214 }
5215
5216 static void iwl3945_nic_start(struct iwl_priv *priv)
5217 {
5218         /* Remove all resets to allow NIC to operate */
5219         iwl_write32(priv, CSR_RESET, 0);
5220 }
5221
5222 /**
5223  * iwl3945_read_ucode - Read uCode images from disk file.
5224  *
5225  * Copy into buffers for card to fetch via bus-mastering
5226  */
5227 static int iwl3945_read_ucode(struct iwl_priv *priv)
5228 {
5229         struct iwl_ucode *ucode;
5230         int ret = -EINVAL, index;
5231         const struct firmware *ucode_raw;
5232         /* firmware file name contains uCode/driver compatibility version */
5233         const char *name_pre = priv->cfg->fw_name_pre;
5234         const unsigned int api_max = priv->cfg->ucode_api_max;
5235         const unsigned int api_min = priv->cfg->ucode_api_min;
5236         char buf[25];
5237         u8 *src;
5238         size_t len;
5239         u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
5240
5241         /* Ask kernel firmware_class module to get the boot firmware off disk.
5242          * request_firmware() is synchronous, file is in memory on return. */
5243         for (index = api_max; index >= api_min; index--) {
5244                 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
5245                 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
5246                 if (ret < 0) {
5247                         IWL_ERROR("%s firmware file req failed: Reason %d\n",
5248                                   buf, ret);
5249                         if (ret == -ENOENT)
5250                                 continue;
5251                         else
5252                                 goto error;
5253                 } else {
5254                         if (index < api_max)
5255                                 IWL_ERROR("Loaded firmware %s, which is deprecated. Please use API v%u instead.\n",
5256                                           buf, api_max);
5257                         IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5258                                        buf, ucode_raw->size);
5259                         break;
5260                 }
5261         }
5262
5263         if (ret < 0)
5264                 goto error;
5265
5266         /* Make sure that we got at least our header! */
5267         if (ucode_raw->size < sizeof(*ucode)) {
5268                 IWL_ERROR("File size way too small!\n");
5269                 ret = -EINVAL;
5270                 goto err_release;
5271         }
5272
5273         /* Data from ucode file:  header followed by uCode images */
5274         ucode = (void *)ucode_raw->data;
5275
5276         priv->ucode_ver = le32_to_cpu(ucode->ver);
5277         api_ver = IWL_UCODE_API(priv->ucode_ver);
5278         inst_size = le32_to_cpu(ucode->inst_size);
5279         data_size = le32_to_cpu(ucode->data_size);
5280         init_size = le32_to_cpu(ucode->init_size);
5281         init_data_size = le32_to_cpu(ucode->init_data_size);
5282         boot_size = le32_to_cpu(ucode->boot_size);
5283
5284         /* api_ver should match the api version forming part of the
5285          * firmware filename ... but we don't check for that and only rely
5286          * on the API version read from firware header from here on forward */
5287
5288         if (api_ver < api_min || api_ver > api_max) {
5289                 IWL_ERROR("Driver unable to support your firmware API. "
5290                           "Driver supports v%u, firmware is v%u.\n",
5291                           api_max, api_ver);
5292                 priv->ucode_ver = 0;
5293                 ret = -EINVAL;
5294                 goto err_release;
5295         }
5296         if (api_ver != api_max)
5297                 IWL_ERROR("Firmware has old API version. Expected %u, "
5298                           "got %u. New firmware can be obtained "
5299                           "from http://www.intellinuxwireless.org.\n",
5300                           api_max, api_ver);
5301
5302         printk(KERN_INFO DRV_NAME " loaded firmware version %u.%u.%u.%u\n",
5303                        IWL_UCODE_MAJOR(priv->ucode_ver),
5304                        IWL_UCODE_MINOR(priv->ucode_ver),
5305                        IWL_UCODE_API(priv->ucode_ver),
5306                        IWL_UCODE_SERIAL(priv->ucode_ver));
5307         IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
5308                        priv->ucode_ver);
5309         IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
5310         IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
5311         IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
5312         IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
5313         IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
5314
5315
5316         /* Verify size of file vs. image size info in file's header */
5317         if (ucode_raw->size < sizeof(*ucode) +
5318                 inst_size + data_size + init_size +
5319                 init_data_size + boot_size) {
5320
5321                 IWL_DEBUG_INFO("uCode file size %d too small\n",
5322                                (int)ucode_raw->size);
5323                 ret = -EINVAL;
5324                 goto err_release;
5325         }
5326
5327         /* Verify that uCode images will fit in card's SRAM */
5328         if (inst_size > IWL39_MAX_INST_SIZE) {
5329                 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
5330                                inst_size);
5331                 ret = -EINVAL;
5332                 goto err_release;
5333         }
5334
5335         if (data_size > IWL39_MAX_DATA_SIZE) {
5336                 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
5337                                data_size);
5338                 ret = -EINVAL;
5339                 goto err_release;
5340         }
5341         if (init_size > IWL39_MAX_INST_SIZE) {
5342                 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n",
5343                                 init_size);
5344                 ret = -EINVAL;
5345                 goto err_release;
5346         }
5347         if (init_data_size > IWL39_MAX_DATA_SIZE) {
5348                 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n",
5349                                 init_data_size);
5350                 ret = -EINVAL;
5351                 goto err_release;
5352         }
5353         if (boot_size > IWL39_MAX_BSM_SIZE) {
5354                 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n",
5355                                 boot_size);
5356                 ret = -EINVAL;
5357                 goto err_release;
5358         }
5359
5360         /* Allocate ucode buffers for card's bus-master loading ... */
5361
5362         /* Runtime instructions and 2 copies of data:
5363          * 1) unmodified from disk
5364          * 2) backup cache for save/restore during power-downs */
5365         priv->ucode_code.len = inst_size;
5366         iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
5367
5368         priv->ucode_data.len = data_size;
5369         iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
5370
5371         priv->ucode_data_backup.len = data_size;
5372         iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
5373
5374         if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
5375             !priv->ucode_data_backup.v_addr)
5376                 goto err_pci_alloc;
5377
5378         /* Initialization instructions and data */
5379         if (init_size && init_data_size) {
5380                 priv->ucode_init.len = init_size;
5381                 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
5382
5383                 priv->ucode_init_data.len = init_data_size;
5384                 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
5385
5386                 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
5387                         goto err_pci_alloc;
5388         }
5389
5390         /* Bootstrap (instructions only, no data) */
5391         if (boot_size) {
5392                 priv->ucode_boot.len = boot_size;
5393                 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
5394
5395                 if (!priv->ucode_boot.v_addr)
5396                         goto err_pci_alloc;
5397         }
5398
5399         /* Copy images into buffers for card's bus-master reads ... */
5400
5401         /* Runtime instructions (first block of data in file) */
5402         src = &ucode->data[0];
5403         len = priv->ucode_code.len;
5404         IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
5405         memcpy(priv->ucode_code.v_addr, src, len);
5406         IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5407                 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5408
5409         /* Runtime data (2nd block)
5410          * NOTE:  Copy into backup buffer will be done in iwl3945_up()  */
5411         src = &ucode->data[inst_size];
5412         len = priv->ucode_data.len;
5413         IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
5414         memcpy(priv->ucode_data.v_addr, src, len);
5415         memcpy(priv->ucode_data_backup.v_addr, src, len);
5416
5417         /* Initialization instructions (3rd block) */
5418         if (init_size) {
5419                 src = &ucode->data[inst_size + data_size];
5420                 len = priv->ucode_init.len;
5421                 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
5422                                len);
5423                 memcpy(priv->ucode_init.v_addr, src, len);
5424         }
5425
5426         /* Initialization data (4th block) */
5427         if (init_data_size) {
5428                 src = &ucode->data[inst_size + data_size + init_size];
5429                 len = priv->ucode_init_data.len;
5430                 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
5431                                (int)len);
5432                 memcpy(priv->ucode_init_data.v_addr, src, len);
5433         }
5434
5435         /* Bootstrap instructions (5th block) */
5436         src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5437         len = priv->ucode_boot.len;
5438         IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
5439                        (int)len);
5440         memcpy(priv->ucode_boot.v_addr, src, len);
5441
5442         /* We have our copies now, allow OS release its copies */
5443         release_firmware(ucode_raw);
5444         return 0;
5445
5446  err_pci_alloc:
5447         IWL_ERROR("failed to allocate pci memory\n");
5448         ret = -ENOMEM;
5449         iwl3945_dealloc_ucode_pci(priv);
5450
5451  err_release:
5452         release_firmware(ucode_raw);
5453
5454  error:
5455         return ret;
5456 }
5457
5458
5459 /**
5460  * iwl3945_set_ucode_ptrs - Set uCode address location
5461  *
5462  * Tell initialization uCode where to find runtime uCode.
5463  *
5464  * BSM registers initially contain pointers to initialization uCode.
5465  * We need to replace them to load runtime uCode inst and data,
5466  * and to save runtime data when powering down.
5467  */
5468 static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
5469 {
5470         dma_addr_t pinst;
5471         dma_addr_t pdata;
5472         int rc = 0;
5473         unsigned long flags;
5474
5475         /* bits 31:0 for 3945 */
5476         pinst = priv->ucode_code.p_addr;
5477         pdata = priv->ucode_data_backup.p_addr;
5478
5479         spin_lock_irqsave(&priv->lock, flags);
5480         rc = iwl_grab_nic_access(priv);
5481         if (rc) {
5482                 spin_unlock_irqrestore(&priv->lock, flags);
5483                 return rc;
5484         }
5485
5486         /* Tell bootstrap uCode where to find image to load */
5487         iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5488         iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5489         iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
5490                                  priv->ucode_data.len);
5491
5492         /* Inst byte count must be last to set up, bit 31 signals uCode
5493          *   that all new ptr/size info is in place */
5494         iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
5495                                  priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5496
5497         iwl_release_nic_access(priv);
5498
5499         spin_unlock_irqrestore(&priv->lock, flags);
5500
5501         IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5502
5503         return rc;
5504 }
5505
5506 /**
5507  * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
5508  *
5509  * Called after REPLY_ALIVE notification received from "initialize" uCode.
5510  *
5511  * Tell "initialize" uCode to go ahead and load the runtime uCode.
5512  */
5513 static void iwl3945_init_alive_start(struct iwl_priv *priv)
5514 {
5515         /* Check alive response for "valid" sign from uCode */
5516         if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5517                 /* We had an error bringing up the hardware, so take it
5518                  * all the way back down so we can try again */
5519                 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5520                 goto restart;
5521         }
5522
5523         /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5524          * This is a paranoid check, because we would not have gotten the
5525          * "initialize" alive if code weren't properly loaded.  */
5526         if (iwl3945_verify_ucode(priv)) {
5527                 /* Runtime instruction load was bad;
5528                  * take it all the way back down so we can try again */
5529                 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5530                 goto restart;
5531         }
5532
5533         /* Send pointers to protocol/runtime uCode image ... init code will
5534          * load and launch runtime uCode, which will send us another "Alive"
5535          * notification. */
5536         IWL_DEBUG_INFO("Initialization Alive received.\n");
5537         if (iwl3945_set_ucode_ptrs(priv)) {
5538                 /* Runtime instruction load won't happen;
5539                  * take it all the way back down so we can try again */
5540                 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5541                 goto restart;
5542         }
5543         return;
5544
5545  restart:
5546         queue_work(priv->workqueue, &priv->restart);
5547 }
5548
5549
5550 /* temporary */
5551 static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
5552                                      struct sk_buff *skb);
5553
5554 /**
5555  * iwl3945_alive_start - called after REPLY_ALIVE notification received
5556  *                   from protocol/runtime uCode (initialization uCode's
5557  *                   Alive gets handled by iwl3945_init_alive_start()).
5558  */
5559 static void iwl3945_alive_start(struct iwl_priv *priv)
5560 {
5561         int rc = 0;
5562         int thermal_spin = 0;
5563         u32 rfkill;
5564
5565         IWL_DEBUG_INFO("Runtime Alive received.\n");
5566
5567         if (priv->card_alive.is_valid != UCODE_VALID_OK) {
5568                 /* We had an error bringing up the hardware, so take it
5569                  * all the way back down so we can try again */
5570                 IWL_DEBUG_INFO("Alive failed.\n");
5571                 goto restart;
5572         }
5573
5574         /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5575          * This is a paranoid check, because we would not have gotten the
5576          * "runtime" alive if code weren't properly loaded.  */
5577         if (iwl3945_verify_ucode(priv)) {
5578                 /* Runtime instruction load was bad;
5579                  * take it all the way back down so we can try again */
5580                 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
5581                 goto restart;
5582         }
5583
5584         iwl3945_clear_stations_table(priv);
5585
5586         rc = iwl_grab_nic_access(priv);
5587         if (rc) {
5588                 IWL_WARNING("Can not read RFKILL status from adapter\n");
5589                 return;
5590         }
5591
5592         rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
5593         IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
5594         iwl_release_nic_access(priv);
5595
5596         if (rfkill & 0x1) {
5597                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5598                 /* if RFKILL is not on, then wait for thermal
5599                  * sensor in adapter to kick in */
5600                 while (iwl3945_hw_get_temperature(priv) == 0) {
5601                         thermal_spin++;
5602                         udelay(10);
5603                 }
5604
5605                 if (thermal_spin)
5606                         IWL_DEBUG_INFO("Thermal calibration took %dus\n",
5607                                        thermal_spin * 10);
5608         } else
5609                 set_bit(STATUS_RF_KILL_HW, &priv->status);
5610
5611         /* After the ALIVE response, we can send commands to 3945 uCode */
5612         set_bit(STATUS_ALIVE, &priv->status);
5613
5614         /* Clear out the uCode error bit if it is set */
5615         clear_bit(STATUS_FW_ERROR, &priv->status);
5616
5617         if (iwl3945_is_rfkill(priv))
5618                 return;
5619
5620         ieee80211_wake_queues(priv->hw);
5621
5622         priv->active_rate = priv->rates_mask;
5623         priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5624
5625         iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
5626
5627         if (iwl3945_is_associated(priv)) {
5628                 struct iwl3945_rxon_cmd *active_rxon =
5629                                 (struct iwl3945_rxon_cmd *)(&priv->active39_rxon);
5630
5631                 memcpy(&priv->staging39_rxon, &priv->active39_rxon,
5632                        sizeof(priv->staging39_rxon));
5633                 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5634         } else {
5635                 /* Initialize our rx_config data */
5636                 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
5637                 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
5638         }
5639
5640         /* Configure Bluetooth device coexistence support */
5641         iwl3945_send_bt_config(priv);
5642
5643         /* Configure the adapter for unassociated operation */
5644         iwl3945_commit_rxon(priv);
5645
5646         iwl3945_reg_txpower_periodic(priv);
5647
5648         iwl3945_led_register(priv);
5649
5650         IWL_DEBUG_INFO("ALIVE processing complete.\n");
5651         set_bit(STATUS_READY, &priv->status);
5652         wake_up_interruptible(&priv->wait_command_queue);
5653
5654         if (priv->error_recovering)
5655                 iwl3945_error_recovery(priv);
5656
5657         /* reassociate for ADHOC mode */
5658         if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
5659                 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
5660                                                                 priv->vif);
5661                 if (beacon)
5662                         iwl3945_mac_beacon_update(priv->hw, beacon);
5663         }
5664
5665         return;
5666
5667  restart:
5668         queue_work(priv->workqueue, &priv->restart);
5669 }
5670
5671 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
5672
5673 static void __iwl3945_down(struct iwl_priv *priv)
5674 {
5675         unsigned long flags;
5676         int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5677         struct ieee80211_conf *conf = NULL;
5678
5679         IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5680
5681         conf = ieee80211_get_hw_conf(priv->hw);
5682
5683         if (!exit_pending)
5684                 set_bit(STATUS_EXIT_PENDING, &priv->status);
5685
5686         iwl3945_led_unregister(priv);
5687         iwl3945_clear_stations_table(priv);
5688
5689         /* Unblock any waiting calls */
5690         wake_up_interruptible_all(&priv->wait_command_queue);
5691
5692         /* Wipe out the EXIT_PENDING status bit if we are not actually
5693          * exiting the module */
5694         if (!exit_pending)
5695                 clear_bit(STATUS_EXIT_PENDING, &priv->status);
5696
5697         /* stop and reset the on-board processor */
5698         iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5699
5700         /* tell the device to stop sending interrupts */
5701         spin_lock_irqsave(&priv->lock, flags);
5702         iwl3945_disable_interrupts(priv);
5703         spin_unlock_irqrestore(&priv->lock, flags);
5704         iwl_synchronize_irq(priv);
5705
5706         if (priv->mac80211_registered)
5707                 ieee80211_stop_queues(priv->hw);
5708
5709         /* If we have not previously called iwl3945_init() then
5710          * clear all bits but the RF Kill and SUSPEND bits and return */
5711         if (!iwl3945_is_init(priv)) {
5712                 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5713                                         STATUS_RF_KILL_HW |
5714                                test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5715                                         STATUS_RF_KILL_SW |
5716                                test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5717                                         STATUS_GEO_CONFIGURED |
5718                                test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5719                                         STATUS_IN_SUSPEND |
5720                                 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5721                                         STATUS_EXIT_PENDING;
5722                 goto exit;
5723         }
5724
5725         /* ...otherwise clear out all the status bits but the RF Kill and
5726          * SUSPEND bits and continue taking the NIC down. */
5727         priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5728                                 STATUS_RF_KILL_HW |
5729                         test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5730                                 STATUS_RF_KILL_SW |
5731                         test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5732                                 STATUS_GEO_CONFIGURED |
5733                         test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5734                                 STATUS_IN_SUSPEND |
5735                         test_bit(STATUS_FW_ERROR, &priv->status) <<
5736                                 STATUS_FW_ERROR |
5737                         test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5738                                 STATUS_EXIT_PENDING;
5739
5740         spin_lock_irqsave(&priv->lock, flags);
5741         iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5742         spin_unlock_irqrestore(&priv->lock, flags);
5743
5744         iwl3945_hw_txq_ctx_stop(priv);
5745         iwl3945_hw_rxq_stop(priv);
5746
5747         spin_lock_irqsave(&priv->lock, flags);
5748         if (!iwl_grab_nic_access(priv)) {
5749                 iwl_write_prph(priv, APMG_CLK_DIS_REG,
5750                                          APMG_CLK_VAL_DMA_CLK_RQT);
5751                 iwl_release_nic_access(priv);
5752         }
5753         spin_unlock_irqrestore(&priv->lock, flags);
5754
5755         udelay(5);
5756
5757         iwl3945_hw_nic_stop_master(priv);
5758         iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
5759         iwl3945_hw_nic_reset(priv);
5760
5761  exit:
5762         memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
5763
5764         if (priv->ibss_beacon)
5765                 dev_kfree_skb(priv->ibss_beacon);
5766         priv->ibss_beacon = NULL;
5767
5768         /* clear out any free frames */
5769         iwl3945_clear_free_frames(priv);
5770 }
5771
5772 static void iwl3945_down(struct iwl_priv *priv)
5773 {
5774         mutex_lock(&priv->mutex);
5775         __iwl3945_down(priv);
5776         mutex_unlock(&priv->mutex);
5777
5778         iwl3945_cancel_deferred_work(priv);
5779 }
5780
5781 #define MAX_HW_RESTARTS 5
5782
5783 static int __iwl3945_up(struct iwl_priv *priv)
5784 {
5785         int rc, i;
5786
5787         if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5788                 IWL_WARNING("Exit pending; will not bring the NIC up\n");
5789                 return -EIO;
5790         }
5791
5792         if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
5793                 IWL_WARNING("Radio disabled by SW RF kill (module "
5794                             "parameter)\n");
5795                 return -ENODEV;
5796         }
5797
5798         if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
5799                 IWL_ERROR("ucode not available for device bring up\n");
5800                 return -EIO;
5801         }
5802
5803         /* If platform's RF_KILL switch is NOT set to KILL */
5804         if (iwl_read32(priv, CSR_GP_CNTRL) &
5805                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5806                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5807         else {
5808                 set_bit(STATUS_RF_KILL_HW, &priv->status);
5809                 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
5810                         IWL_WARNING("Radio disabled by HW RF Kill switch\n");
5811                         return -ENODEV;
5812                 }
5813         }
5814
5815         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5816
5817         rc = iwl3945_hw_nic_init(priv);
5818         if (rc) {
5819                 IWL_ERROR("Unable to int nic\n");
5820                 return rc;
5821         }
5822
5823         /* make sure rfkill handshake bits are cleared */
5824         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5825         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
5826                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5827
5828         /* clear (again), then enable host interrupts */
5829         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5830         iwl3945_enable_interrupts(priv);
5831
5832         /* really make sure rfkill handshake bits are cleared */
5833         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5834         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5835
5836         /* Copy original ucode data image from disk into backup cache.
5837          * This will be used to initialize the on-board processor's
5838          * data SRAM for a clean start when the runtime program first loads. */
5839         memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5840                priv->ucode_data.len);
5841
5842         /* We return success when we resume from suspend and rf_kill is on. */
5843         if (test_bit(STATUS_RF_KILL_HW, &priv->status))
5844                 return 0;
5845
5846         for (i = 0; i < MAX_HW_RESTARTS; i++) {
5847
5848                 iwl3945_clear_stations_table(priv);
5849
5850                 /* load bootstrap state machine,
5851                  * load bootstrap program into processor's memory,
5852                  * prepare to load the "initialize" uCode */
5853                 rc = iwl3945_load_bsm(priv);
5854
5855                 if (rc) {
5856                         IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
5857                         continue;
5858                 }
5859
5860                 /* start card; "initialize" will load runtime ucode */
5861                 iwl3945_nic_start(priv);
5862
5863                 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
5864
5865                 return 0;
5866         }
5867
5868         set_bit(STATUS_EXIT_PENDING, &priv->status);
5869         __iwl3945_down(priv);
5870         clear_bit(STATUS_EXIT_PENDING, &priv->status);
5871
5872         /* tried to restart and config the device for as long as our
5873          * patience could withstand */
5874         IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
5875         return -EIO;
5876 }
5877
5878
5879 /*****************************************************************************
5880  *
5881  * Workqueue callbacks
5882  *
5883  *****************************************************************************/
5884
5885 static void iwl3945_bg_init_alive_start(struct work_struct *data)
5886 {
5887         struct iwl_priv *priv =
5888             container_of(data, struct iwl_priv, init_alive_start.work);
5889
5890         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5891                 return;
5892
5893         mutex_lock(&priv->mutex);
5894         iwl3945_init_alive_start(priv);
5895         mutex_unlock(&priv->mutex);
5896 }
5897
5898 static void iwl3945_bg_alive_start(struct work_struct *data)
5899 {
5900         struct iwl_priv *priv =
5901             container_of(data, struct iwl_priv, alive_start.work);
5902
5903         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5904                 return;
5905
5906         mutex_lock(&priv->mutex);
5907         iwl3945_alive_start(priv);
5908         mutex_unlock(&priv->mutex);
5909 }
5910
5911 static void iwl3945_bg_rf_kill(struct work_struct *work)
5912 {
5913         struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
5914
5915         wake_up_interruptible(&priv->wait_command_queue);
5916
5917         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5918                 return;
5919
5920         mutex_lock(&priv->mutex);
5921
5922         if (!iwl3945_is_rfkill(priv)) {
5923                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
5924                           "HW and/or SW RF Kill no longer active, restarting "
5925                           "device\n");
5926                 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
5927                         queue_work(priv->workqueue, &priv->restart);
5928         } else {
5929
5930                 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
5931                         IWL_DEBUG_RF_KILL("Can not turn radio back on - "
5932                                           "disabled by SW switch\n");
5933                 else
5934                         IWL_WARNING("Radio Frequency Kill Switch is On:\n"
5935                                     "Kill switch must be turned off for "
5936                                     "wireless networking to work.\n");
5937         }
5938
5939         mutex_unlock(&priv->mutex);
5940         iwl3945_rfkill_set_hw_state(priv);
5941 }
5942
5943 #define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
5944
5945 static void iwl3945_bg_scan_check(struct work_struct *data)
5946 {
5947         struct iwl_priv *priv =
5948             container_of(data, struct iwl_priv, scan_check.work);
5949
5950         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5951                 return;
5952
5953         mutex_lock(&priv->mutex);
5954         if (test_bit(STATUS_SCANNING, &priv->status) ||
5955             test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5956                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
5957                           "Scan completion watchdog resetting adapter (%dms)\n",
5958                           jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
5959
5960                 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
5961                         iwl3945_send_scan_abort(priv);
5962         }
5963         mutex_unlock(&priv->mutex);
5964 }
5965
5966 static void iwl3945_bg_request_scan(struct work_struct *data)
5967 {
5968         struct iwl_priv *priv =
5969             container_of(data, struct iwl_priv, request_scan);
5970         struct iwl3945_host_cmd cmd = {
5971                 .id = REPLY_SCAN_CMD,
5972                 .len = sizeof(struct iwl3945_scan_cmd),
5973                 .meta.flags = CMD_SIZE_HUGE,
5974         };
5975         int rc = 0;
5976         struct iwl3945_scan_cmd *scan;
5977         struct ieee80211_conf *conf = NULL;
5978         u8 n_probes = 2;
5979         enum ieee80211_band band;
5980         DECLARE_SSID_BUF(ssid);
5981
5982         conf = ieee80211_get_hw_conf(priv->hw);
5983
5984         mutex_lock(&priv->mutex);
5985
5986         if (!iwl3945_is_ready(priv)) {
5987                 IWL_WARNING("request scan called when driver not ready.\n");
5988                 goto done;
5989         }
5990
5991         /* Make sure the scan wasn't canceled before this queued work
5992          * was given the chance to run... */
5993         if (!test_bit(STATUS_SCANNING, &priv->status))
5994                 goto done;
5995
5996         /* This should never be called or scheduled if there is currently
5997          * a scan active in the hardware. */
5998         if (test_bit(STATUS_SCAN_HW, &priv->status)) {
5999                 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6000                                "Ignoring second request.\n");
6001                 rc = -EIO;
6002                 goto done;
6003         }
6004
6005         if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6006                 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6007                 goto done;
6008         }
6009
6010         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6011                 IWL_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6012                 goto done;
6013         }
6014
6015         if (iwl3945_is_rfkill(priv)) {
6016                 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6017                 goto done;
6018         }
6019
6020         if (!test_bit(STATUS_READY, &priv->status)) {
6021                 IWL_DEBUG_HC("Scan request while uninitialized.  Queuing.\n");
6022                 goto done;
6023         }
6024
6025         if (!priv->scan_bands) {
6026                 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6027                 goto done;
6028         }
6029
6030         if (!priv->scan39) {
6031                 priv->scan39 = kmalloc(sizeof(struct iwl3945_scan_cmd) +
6032                                      IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6033                 if (!priv->scan39) {
6034                         rc = -ENOMEM;
6035                         goto done;
6036                 }
6037         }
6038         scan = priv->scan39;
6039         memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
6040
6041         scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6042         scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6043
6044         if (iwl3945_is_associated(priv)) {
6045                 u16 interval = 0;
6046                 u32 extra;
6047                 u32 suspend_time = 100;
6048                 u32 scan_suspend_time = 100;
6049                 unsigned long flags;
6050
6051                 IWL_DEBUG_INFO("Scanning while associated...\n");
6052
6053                 spin_lock_irqsave(&priv->lock, flags);
6054                 interval = priv->beacon_int;
6055                 spin_unlock_irqrestore(&priv->lock, flags);
6056
6057                 scan->suspend_time = 0;
6058                 scan->max_out_time = cpu_to_le32(200 * 1024);
6059                 if (!interval)
6060                         interval = suspend_time;
6061                 /*
6062                  * suspend time format:
6063             &n