2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2014, Broadcom Corporation
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
25 * $Id: dhd_linux.c 457888 2014-02-25 03:34:39Z $
32 #include <linux/init.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/etherdevice.h>
40 #include <linux/random.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/fcntl.h>
46 #include <linux/compat.h>
47 #include <net/addrconf.h>
48 #include <linux/cpufreq.h>
50 #include <asm/uaccess.h>
51 #include <asm/unaligned.h>
55 #include <bcmendian.h>
58 #include <proto/ethernet.h>
59 #include <proto/bcmevent.h>
60 #include <dngl_stats.h>
61 #include <dhd_linux_wq.h>
63 #include <dhd_linux.h>
65 #include <dhd_proto.h>
67 #ifdef CONFIG_HAS_WAKELOCK
68 #include <linux/wakelock.h>
71 #include <wl_cfg80211.h>
77 #ifdef DHDTCPACK_SUPPRESS
79 #endif /* DHDTCPACK_SUPPRESS */
83 #include <linux/time.h>
86 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
87 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
88 #define TSMAX 1000 /* max no. of timing record kept */
91 static uint32 tsidx = 0;
92 static uint32 htsf_seqnum = 0;
95 static uint32 tsport = 5010;
97 typedef struct histo_ {
101 #if !ISPOWEROF2(DHD_SDALIGN)
102 #error DHD_SDALIGN is not a power of 2!
105 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
106 #endif /* WLMEDIA_HTSF */
111 extern bool ap_cfg_running;
112 extern bool ap_fw_loaded;
116 #ifdef ENABLE_ADAPTIVE_SCHED
117 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
118 #ifndef CUSTOM_CPUFREQ_THRESH
119 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
120 #endif /* CUSTOM_CPUFREQ_THRESH */
121 #endif /* ENABLE_ADAPTIVE_SCHED */
123 /* enable HOSTIP cache update from the host side when an eth0:N is up */
124 #define AOE_IP_ALIAS_SUPPORT 1
128 #include <bcm_rpc_tp.h>
131 #include <wlfc_proto.h>
132 #include <dhd_wlfc.h>
135 #include <wl_android.h>
138 #ifdef ARP_OFFLOAD_SUPPORT
139 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
140 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
141 unsigned long event, void *ptr);
142 static struct notifier_block dhd_inetaddr_notifier = {
143 .notifier_call = dhd_inetaddr_notifier_call
145 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
146 * created in kernel notifier link list (with 'next' pointing to itself)
148 static bool dhd_inetaddr_notifier_registered = FALSE;
149 #endif /* ARP_OFFLOAD_SUPPORT */
151 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
152 unsigned long event, void *ptr);
153 static struct notifier_block dhd_inet6addr_notifier = {
154 .notifier_call = dhd_inet6addr_notifier_call
156 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
157 * created in kernel notifier link list (with 'next' pointing to itself)
159 static bool dhd_inet6addr_notifier_registered = FALSE;
161 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
162 #include <linux/suspend.h>
163 volatile bool dhd_mmc_suspend = FALSE;
164 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
165 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
167 #if defined(OOB_INTR_ONLY)
168 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
170 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
171 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
173 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
174 MODULE_LICENSE("GPL v2");
175 #endif /* LinuxVer */
180 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
182 #ifndef PROP_TXSTATUS
183 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
185 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
187 #endif /* BCM_FD_AGGR */
190 extern bool dhd_wlfc_skip_fc(void);
191 extern void dhd_wlfc_plat_init(void *dhd);
192 extern void dhd_wlfc_plat_deinit(void *dhd);
193 #endif /* PROP_TXSTATUS */
195 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
201 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
203 /* Linux wireless extension support */
204 #if defined(WL_WIRELESS_EXT)
206 extern wl_iw_extra_params_t g_wl_iw_params;
207 #endif /* defined(WL_WIRELESS_EXT) */
209 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
210 #include <linux/earlysuspend.h>
211 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
213 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
215 #ifdef PKT_FILTER_SUPPORT
216 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
217 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
218 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
223 extern int dhd_read_macaddr(struct dhd_info *dhd);
225 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
228 extern int dhd_write_macaddr(struct ether_addr *mac);
230 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
234 typedef struct dhd_if_event {
235 struct list_head list;
236 wl_event_data_if_t event;
237 char name[IFNAMSIZ+1];
238 uint8 mac[ETHER_ADDR_LEN];
241 /* Interface control information */
242 typedef struct dhd_if {
243 struct dhd_info *info; /* back pointer to dhd_info */
244 /* OS/stack specifics */
245 struct net_device *net;
246 struct net_device_stats stats;
247 int idx; /* iface idx in dongle */
248 uint subunit; /* subunit */
249 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
250 bool attached; /* Delayed attachment when unset */
251 bool txflowcontrol; /* Per interface flow control indicator */
252 char name[IFNAMSIZ+1]; /* linux interface name */
253 uint8 bssidx; /* bsscfg index for the interface */
268 uint32 coef; /* scaling factor */
269 uint32 coefdec1; /* first decimal */
270 uint32 coefdec2; /* second decimal */
280 static tstamp_t ts[TSMAX];
281 static tstamp_t maxdelayts;
282 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
284 #endif /* WLMEDIA_HTSF */
286 struct ipv6_work_info_t {
292 /* Local private structure (extension of pub) */
293 typedef struct dhd_info {
294 #if defined(WL_WIRELESS_EXT)
295 wl_iw_t iw; /* wireless extensions state (must be first) */
296 #endif /* defined(WL_WIRELESS_EXT) */
299 void *adapter; /* adapter information, interrupt, fw path etc. */
300 char fw_path[PATH_MAX]; /* path to firmware image */
301 char nv_path[PATH_MAX]; /* path to nvram vars file */
303 /* For supporting multiple interfaces */
304 dhd_if_t *iflist[DHD_MAX_IFS];
306 struct semaphore proto_sem;
308 spinlock_t wlfc_spinlock;
310 #endif /* PROP_TXSTATUS */
314 wait_queue_head_t ioctl_resp_wait;
315 uint32 default_wd_interval;
317 struct timer_list timer;
319 struct tasklet_struct tasklet;
324 struct semaphore sdsem;
325 tsk_ctl_t thr_dpc_ctl;
326 tsk_ctl_t thr_wdt_ctl;
328 tsk_ctl_t thr_rxf_ctl;
330 bool rxthread_enabled;
333 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
334 struct wake_lock wl_wifi; /* Wifi wakelock */
335 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
336 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
337 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
340 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
341 /* net_device interface lock, prevent race conditions among net_dev interface
342 * calls and wifi_on or wifi_off
344 struct mutex dhd_net_if_mutex;
345 struct mutex dhd_suspend_mutex;
347 spinlock_t wakelock_spinlock;
348 uint32 wakelock_counter;
350 uint32 wakelock_before_waive;
351 int wakelock_wd_counter;
352 int wakelock_rx_timeout_enable;
353 int wakelock_ctrl_timeout_enable;
355 /* Thread to issue ioctl for multicast */
356 wait_queue_head_t ctrl_wait;
357 atomic_t pend_8021x_cnt;
358 dhd_attach_states_t dhd_state;
360 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
361 struct early_suspend early_suspend;
362 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
364 #ifdef ARP_OFFLOAD_SUPPORT
366 #endif /* ARP_OFFLOAD_SUPPORT */
370 struct timer_list rpcth_timer;
371 bool rpcth_timer_active;
374 #ifdef DHDTCPACK_SUPPRESS
375 spinlock_t tcpack_lock;
376 #endif /* DHDTCPACK_SUPPRESS */
377 void *dhd_deferred_wq;
378 #ifdef DEBUG_CPU_FREQ
379 struct notifier_block freq_trans;
380 int __percpu *new_freq;
383 struct notifier_block pm_notifier;
386 /* Flag to indicate if we should download firmware on driver load */
387 uint dhd_download_fw_on_driverload = TRUE;
389 /* Definitions to provide path to the firmware and nvram
390 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
392 char firmware_path[MOD_PARAM_PATHLEN];
393 char nvram_path[MOD_PARAM_PATHLEN];
395 /* information string to keep firmware, chio, cheip version info visiable from log */
396 char info_string[MOD_PARAM_INFOLEN];
397 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
399 int disable_proptx = 0;
400 module_param(op_mode, int, 0644);
401 extern int wl_control_wl_start(struct net_device *dev);
402 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
403 struct semaphore dhd_registration_sem;
404 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
406 /* deferred handlers */
407 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
408 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
409 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
410 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
411 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
414 module_param(dhd_msg_level, int, 0);
416 #ifdef ARP_OFFLOAD_SUPPORT
417 /* ARP offload enable */
418 uint dhd_arp_enable = TRUE;
419 module_param(dhd_arp_enable, uint, 0);
421 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
423 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
425 module_param(dhd_arp_mode, uint, 0);
426 #endif /* ARP_OFFLOAD_SUPPORT */
428 /* Disable Prop tx */
429 module_param(disable_proptx, int, 0644);
430 /* load firmware and/or nvram values from the filesystem */
431 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
432 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
434 /* Watchdog interval */
436 /* extend watchdog expiration to 2 seconds when DPC is running */
437 #define WATCHDOG_EXTEND_INTERVAL (2000)
439 uint dhd_watchdog_ms = 10;
440 module_param(dhd_watchdog_ms, uint, 0);
442 #if defined(DHD_DEBUG)
443 /* Console poll interval */
444 uint dhd_console_ms = 0;
445 module_param(dhd_console_ms, uint, 0644);
446 #endif /* defined(DHD_DEBUG) */
449 uint dhd_slpauto = TRUE;
450 module_param(dhd_slpauto, uint, 0);
452 #ifdef PKT_FILTER_SUPPORT
453 /* Global Pkt filter enable control */
454 uint dhd_pkt_filter_enable = TRUE;
455 module_param(dhd_pkt_filter_enable, uint, 0);
458 /* Pkt filter init setup */
459 uint dhd_pkt_filter_init = 0;
460 module_param(dhd_pkt_filter_init, uint, 0);
462 /* Pkt filter mode control */
463 uint dhd_master_mode = TRUE;
464 module_param(dhd_master_mode, uint, 0);
466 int dhd_watchdog_prio = 0;
467 module_param(dhd_watchdog_prio, int, 0);
469 /* DPC thread priority */
470 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
471 module_param(dhd_dpc_prio, int, 0);
473 /* RX frame thread priority */
474 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
475 module_param(dhd_rxf_prio, int, 0);
477 #if !defined(BCMDHDUSB)
478 extern int dhd_dongle_ramsize;
479 module_param(dhd_dongle_ramsize, int, 0);
480 #endif /* BCMDHDUSB */
482 /* Keep track of number of instances */
483 static int dhd_found = 0;
484 static int instance_base = 0; /* Starting instance number */
485 module_param(instance_base, int, 0644);
487 /* Control fw roaming */
488 uint dhd_roam_disable = 0;
490 /* Control radio state */
491 uint dhd_radio_up = 1;
493 /* Network inteface name */
494 char iface_name[IFNAMSIZ] = {'\0'};
495 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
497 /* The following are specific to the SDIO dongle */
499 /* IOCTL response timeout */
500 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
502 /* Idle timeout for backplane clock */
503 int dhd_idletime = DHD_IDLETIME_TICKS;
504 module_param(dhd_idletime, int, 0);
507 uint dhd_poll = FALSE;
508 module_param(dhd_poll, uint, 0);
511 uint dhd_intr = TRUE;
512 module_param(dhd_intr, uint, 0);
514 /* SDIO Drive Strength (in milliamps) */
515 uint dhd_sdiod_drive_strength = 6;
516 module_param(dhd_sdiod_drive_strength, uint, 0);
520 extern uint dhd_txbound;
521 extern uint dhd_rxbound;
522 module_param(dhd_txbound, uint, 0);
523 module_param(dhd_rxbound, uint, 0);
525 /* Deferred transmits */
526 extern uint dhd_deferred_tx;
527 module_param(dhd_deferred_tx, uint, 0);
530 extern void dhd_dbg_init(dhd_pub_t *dhdp);
531 extern void dhd_dbg_remove(void);
532 #endif /* BCMDBGFS */
538 /* Echo packet generator (pkts/s) */
540 module_param(dhd_pktgen, uint, 0);
542 /* Echo packet len (0 => sawtooth, max 2040) */
543 uint dhd_pktgen_len = 0;
544 module_param(dhd_pktgen_len, uint, 0);
548 extern char dhd_version[];
550 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
551 static void dhd_net_if_lock_local(dhd_info_t *dhd);
552 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
553 static void dhd_suspend_lock(dhd_pub_t *dhdp);
554 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
557 void htsf_update(dhd_info_t *dhd, void *data);
558 tsf_t prev_tsf, cur_tsf;
560 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
561 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
562 static void dhd_dump_latency(void);
563 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
564 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
565 static void dhd_dump_htsfhisto(histo_t *his, char *s);
566 #endif /* WLMEDIA_HTSF */
568 /* Monitor interface */
569 int dhd_monitor_init(void *dhd_pub);
570 int dhd_monitor_uninit(void);
573 #if defined(WL_WIRELESS_EXT)
574 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
575 #endif /* defined(WL_WIRELESS_EXT) */
577 static void dhd_dpc(ulong data);
579 extern int dhd_wait_pend8021x(struct net_device *dev);
580 void dhd_os_wd_timer_extend(void *bus, bool extend);
584 #error TOE requires BDC
586 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
587 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
590 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
591 wl_event_msg_t *event_ptr, void **data_ptr);
594 static int dhd_wakelock_waive(dhd_info_t *dhdinfo);
595 static int dhd_wakelock_restore(dhd_info_t *dhdinfo);
598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
599 KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
600 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
602 int ret = NOTIFY_DONE;
603 bool suspend = FALSE;
604 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
606 BCM_REFERENCE(dhdinfo);
608 case PM_HIBERNATION_PREPARE:
609 case PM_SUSPEND_PREPARE:
612 case PM_POST_HIBERNATION:
613 case PM_POST_SUSPEND:
620 dhd_wakelock_waive(dhdinfo);
621 dhd_wlfc_suspend(&dhdinfo->pub);
622 dhd_wakelock_restore(dhdinfo);
624 dhd_wlfc_resume(&dhdinfo->pub);
629 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
630 KERNEL_VERSION(2, 6, 39))
631 dhd_mmc_suspend = suspend;
637 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
638 * created in kernel notifier link list (with 'next' pointing to itself)
640 static bool dhd_pm_notifier_registered = FALSE;
642 extern int register_pm_notifier(struct notifier_block *nb);
643 extern int unregister_pm_notifier(struct notifier_block *nb);
644 #endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP) */
646 /* Request scheduling of the bus rx frame */
647 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
648 static void dhd_os_rxflock(dhd_pub_t *pub);
649 static void dhd_os_rxfunlock(dhd_pub_t *pub);
651 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
657 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
661 dhd_os_rxflock(dhdp);
662 store_idx = dhdp->store_idx;
663 sent_idx = dhdp->sent_idx;
664 if (dhdp->skbbuf[store_idx] != NULL) {
665 /* Make sure the previous packets are processed */
666 dhd_os_rxfunlock(dhdp);
667 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
668 skb, store_idx, sent_idx));
669 /* removed msleep here, should use wait_event_timeout if we
670 * want to give rx frame thread a chance to run
672 #if defined(WAIT_DEQUEUE)
677 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
678 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
679 dhdp->skbbuf[store_idx] = skb;
680 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
681 dhd_os_rxfunlock(dhdp);
686 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
692 dhd_os_rxflock(dhdp);
694 store_idx = dhdp->store_idx;
695 sent_idx = dhdp->sent_idx;
696 skb = dhdp->skbbuf[sent_idx];
699 dhd_os_rxfunlock(dhdp);
700 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
701 store_idx, sent_idx));
705 dhdp->skbbuf[sent_idx] = NULL;
706 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
708 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
711 dhd_os_rxfunlock(dhdp);
716 static int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
718 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
720 if (prepost) { /* pre process */
721 dhd_read_macaddr(dhd);
722 } else { /* post process */
723 dhd_write_macaddr(&dhd->pub.mac);
729 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
731 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
734 /* In case of IBSS mode, apply arp pkt filter */
735 if (op_mode & DHD_FLAG_IBSS_MODE) {
739 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
740 if ((dhd->arp_version == 1) &&
741 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
749 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
751 void dhd_set_packet_filter(dhd_pub_t *dhd)
753 #ifdef PKT_FILTER_SUPPORT
756 DHD_TRACE(("%s: enter\n", __FUNCTION__));
757 if (dhd_pkt_filter_enable) {
758 for (i = 0; i < dhd->pktfilter_count; i++) {
759 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
762 #endif /* PKT_FILTER_SUPPORT */
765 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
767 #ifdef PKT_FILTER_SUPPORT
770 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
771 /* 1 - Enable packet filter, only allow unicast packet to send up */
772 /* 0 - Disable packet filter */
773 if (dhd_pkt_filter_enable && (!value ||
774 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
776 for (i = 0; i < dhd->pktfilter_count; i++) {
777 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
778 if (value && (i == DHD_ARP_FILTER_NUM) &&
779 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
780 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
781 "val %d, cnt %d, op_mode 0x%x\n",
782 value, i, dhd->op_mode));
785 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
786 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
787 value, dhd_master_mode);
790 #endif /* PKT_FILTER_SUPPORT */
793 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
795 #ifndef SUPPORT_PM2_ONLY
796 int power_mode = PM_MAX;
797 #endif /* SUPPORT_PM2_ONLY */
798 /* wl_pkt_filter_enable_t enable_parm; */
800 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
801 #ifndef ENABLE_FW_ROAM_SUSPEND
803 #endif /* ENABLE_FW_ROAM_SUSPEND */
804 uint nd_ra_filter = 0;
807 #ifdef DYNAMIC_SWOOB_DURATION
808 #ifndef CUSTOM_INTR_WIDTH
809 #define CUSTOM_INTR_WIDTH 100
810 #endif /* CUSTOM_INTR_WIDTH */
812 #endif /* DYNAMIC_SWOOB_DURATION */
816 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
817 __FUNCTION__, value, dhd->in_suspend));
819 dhd_suspend_lock(dhd);
821 #ifdef CUSTOM_SET_CPUCORE
822 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
823 /* set specific cpucore */
824 dhd_set_cpucore(dhd, TRUE);
825 #endif /* CUSTOM_SET_CPUCORE */
827 if (value && dhd->in_suspend) {
828 #ifdef PKT_FILTER_SUPPORT
829 dhd->early_suspended = 1;
831 /* Kernel suspended */
832 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
834 #ifndef SUPPORT_PM2_ONLY
835 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
836 sizeof(power_mode), TRUE, 0);
837 #endif /* SUPPORT_PM2_ONLY */
839 /* Enable packet filter, only allow unicast packet to send up */
840 dhd_enable_packet_filter(1, dhd);
843 /* If DTIM skip is set up as default, force it to wake
844 * each third DTIM for better power savings. Note that
845 * one side effect is a chance to miss BC/MC packet.
847 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
848 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
849 4, iovbuf, sizeof(iovbuf));
850 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
852 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
854 #ifndef ENABLE_FW_ROAM_SUSPEND
855 /* Disable firmware roaming during suspend */
856 bcm_mkiovar("roam_off", (char *)&roamvar, 4,
857 iovbuf, sizeof(iovbuf));
858 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
859 #endif /* ENABLE_FW_ROAM_SUSPEND */
860 if (FW_SUPPORTED(dhd, ndoe)) {
861 /* enable IPv6 RA filter in firmware during suspend */
863 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
864 iovbuf, sizeof(iovbuf));
865 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
866 sizeof(iovbuf), TRUE, 0)) < 0)
867 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
870 #ifdef DYNAMIC_SWOOB_DURATION
871 intr_width = CUSTOM_INTR_WIDTH;
872 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
873 iovbuf, sizeof(iovbuf));
874 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
875 sizeof(iovbuf), TRUE, 0)) < 0)
876 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
877 #endif /* DYNAMIC_SWOOB_DURATION */
879 #ifdef PKT_FILTER_SUPPORT
880 dhd->early_suspended = 0;
883 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
884 #ifdef DYNAMIC_SWOOB_DURATION
886 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
887 iovbuf, sizeof(iovbuf));
888 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
889 sizeof(iovbuf), TRUE, 0)) < 0)
890 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
891 #endif /* DYNAMIC_SWOOB_DURATION */
893 #ifndef SUPPORT_PM2_ONLY
894 power_mode = PM_FAST;
895 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
896 sizeof(power_mode), TRUE, 0);
897 #endif /* SUPPORT_PM2_ONLY */
898 #ifdef PKT_FILTER_SUPPORT
899 /* disable pkt filter */
900 dhd_enable_packet_filter(0, dhd);
901 #endif /* PKT_FILTER_SUPPORT */
903 /* restore pre-suspend setting for dtim_skip */
904 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
905 4, iovbuf, sizeof(iovbuf));
907 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
908 #ifndef ENABLE_FW_ROAM_SUSPEND
909 roamvar = dhd_roam_disable;
910 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
912 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
913 #endif /* ENABLE_FW_ROAM_SUSPEND */
914 if (FW_SUPPORTED(dhd, ndoe)) {
915 /* disable IPv6 RA filter in firmware during suspend */
917 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
918 iovbuf, sizeof(iovbuf));
919 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
920 sizeof(iovbuf), TRUE, 0)) < 0)
921 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
926 dhd_suspend_unlock(dhd);
931 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
933 dhd_pub_t *dhdp = &dhd->pub;
936 DHD_OS_WAKE_LOCK(dhdp);
937 /* Set flag when early suspend was called */
938 dhdp->in_suspend = val;
939 if ((force || !dhdp->suspend_disable_flag) &&
940 dhd_support_sta_mode(dhdp))
942 ret = dhd_set_suspend(val, dhdp);
945 DHD_OS_WAKE_UNLOCK(dhdp);
949 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
950 static void dhd_early_suspend(struct early_suspend *h)
952 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
953 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
956 dhd_suspend_resume_helper(dhd, 1, 0);
959 static void dhd_late_resume(struct early_suspend *h)
961 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
962 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
965 dhd_suspend_resume_helper(dhd, 0, 0);
967 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
970 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
971 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
973 * dhd_timeout_start(&tmo, usec);
974 * while (!dhd_timeout_expired(&tmo))
975 * if (poll_something())
977 * if (dhd_timeout_expired(&tmo))
982 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
987 tmo->tick = jiffies_to_usecs(1);
991 dhd_timeout_expired(dhd_timeout_t *tmo)
993 /* Does nothing the first call */
994 if (tmo->increment == 0) {
999 if (tmo->elapsed >= tmo->limit)
1002 /* Add the delay that's about to take place */
1003 tmo->elapsed += tmo->increment;
1005 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1006 OSL_DELAY(tmo->increment);
1007 tmo->increment *= 2;
1008 if (tmo->increment > tmo->tick)
1009 tmo->increment = tmo->tick;
1011 wait_queue_head_t delay_wait;
1012 DECLARE_WAITQUEUE(wait, current);
1013 init_waitqueue_head(&delay_wait);
1014 add_wait_queue(&delay_wait, &wait);
1015 set_current_state(TASK_INTERRUPTIBLE);
1016 schedule_timeout(1);
1017 remove_wait_queue(&delay_wait, &wait);
1018 set_current_state(TASK_RUNNING);
1025 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1030 while (i < DHD_MAX_IFS) {
1031 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1039 struct net_device * dhd_idx2net(void *pub, int ifidx)
1041 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1042 struct dhd_info *dhd_info;
1044 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1046 dhd_info = dhd_pub->info;
1047 if (dhd_info && dhd_info->iflist[ifidx])
1048 return dhd_info->iflist[ifidx]->net;
1053 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1055 int i = DHD_MAX_IFS;
1059 if (name == NULL || *name == '\0')
1063 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1066 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1068 return i; /* default - the primary interface */
1072 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1074 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1078 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1079 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1083 if (dhd->iflist[ifidx] == NULL) {
1084 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1088 if (dhd->iflist[ifidx]->net)
1089 return dhd->iflist[ifidx]->net->name;
1095 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1098 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1101 for (i = 0; i < DHD_MAX_IFS; i++)
1102 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1103 return dhd->iflist[i]->mac_addr;
1110 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1112 struct net_device *dev;
1113 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1114 struct netdev_hw_addr *ha;
1116 struct dev_mc_list *mclist;
1118 uint32 allmulti, cnt;
1125 ASSERT(dhd && dhd->iflist[ifidx]);
1126 dev = dhd->iflist[ifidx]->net;
1129 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1130 netif_addr_lock_bh(dev);
1132 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1133 cnt = netdev_mc_count(dev);
1135 cnt = dev->mc_count;
1136 #endif /* LINUX_VERSION_CODE */
1138 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1139 netif_addr_unlock_bh(dev);
1142 /* Determine initial value of allmulti flag */
1143 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
1145 /* Send down the multicast list first. */
1148 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
1149 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
1150 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1151 dhd_ifname(&dhd->pub, ifidx), cnt));
1155 strncpy(bufp, "mcast_list", buflen - 1);
1156 bufp[buflen - 1] = '\0';
1157 bufp += strlen("mcast_list") + 1;
1160 memcpy(bufp, &cnt, sizeof(cnt));
1161 bufp += sizeof(cnt);
1164 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1165 netif_addr_lock_bh(dev);
1167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1168 netdev_for_each_mc_addr(ha, dev) {
1171 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
1172 bufp += ETHER_ADDR_LEN;
1176 for (mclist = dev->mc_list; (mclist && (cnt > 0));
1177 cnt--, mclist = mclist->next) {
1178 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
1179 bufp += ETHER_ADDR_LEN;
1181 #endif /* LINUX_VERSION_CODE */
1183 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1184 netif_addr_unlock_bh(dev);
1187 memset(&ioc, 0, sizeof(ioc));
1188 ioc.cmd = WLC_SET_VAR;
1193 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1195 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
1196 dhd_ifname(&dhd->pub, ifidx), cnt));
1197 allmulti = cnt ? TRUE : allmulti;
1200 MFREE(dhd->pub.osh, buf, buflen);
1202 /* Now send the allmulti setting. This is based on the setting in the
1203 * net_device flags, but might be modified above to be turned on if we
1204 * were trying to set some addresses and dongle rejected it...
1207 buflen = sizeof("allmulti") + sizeof(allmulti);
1208 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
1209 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
1212 allmulti = htol32(allmulti);
1214 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
1215 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
1216 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
1217 MFREE(dhd->pub.osh, buf, buflen);
1222 memset(&ioc, 0, sizeof(ioc));
1223 ioc.cmd = WLC_SET_VAR;
1228 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1230 DHD_ERROR(("%s: set allmulti %d failed\n",
1231 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
1234 MFREE(dhd->pub.osh, buf, buflen);
1236 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
1238 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
1240 allmulti = htol32(allmulti);
1242 memset(&ioc, 0, sizeof(ioc));
1243 ioc.cmd = WLC_SET_PROMISC;
1244 ioc.buf = &allmulti;
1245 ioc.len = sizeof(allmulti);
1248 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1250 DHD_ERROR(("%s: set promisc %d failed\n",
1251 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
1256 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
1262 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
1263 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
1266 memset(&ioc, 0, sizeof(ioc));
1267 ioc.cmd = WLC_SET_VAR;
1272 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1274 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
1276 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
1278 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
1285 extern struct net_device *ap_net_dev;
1286 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
1290 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
1292 dhd_info_t *dhd = handle;
1293 dhd_if_event_t *if_event = event_info;
1294 struct net_device *ndev;
1298 if (event != DHD_WQ_WORK_IF_ADD) {
1299 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1304 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1309 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
1313 dhd_net_if_lock_local(dhd);
1314 DHD_OS_WAKE_LOCK(&dhd->pub);
1316 ifidx = if_event->event.ifidx;
1317 bssidx = if_event->event.bssidx;
1318 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
1320 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
1321 if_event->mac, bssidx, TRUE);
1323 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
1327 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
1328 if (ret != BCME_OK) {
1329 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
1330 dhd_remove_if(&dhd->pub, ifidx, TRUE);
1333 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
1335 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1336 dhd_net_if_unlock_local(dhd);
1340 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
1342 dhd_info_t *dhd = handle;
1344 dhd_if_event_t *if_event = event_info;
1347 if (event != DHD_WQ_WORK_IF_DEL) {
1348 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1353 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1358 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
1362 dhd_net_if_lock_local(dhd);
1363 DHD_OS_WAKE_LOCK(&dhd->pub);
1365 ifidx = if_event->event.ifidx;
1366 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
1368 dhd_remove_if(&dhd->pub, ifidx, TRUE);
1370 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
1372 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1373 dhd_net_if_unlock_local(dhd);
1377 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
1379 dhd_info_t *dhd = handle;
1380 dhd_if_t *ifp = event_info;
1383 unsigned long flags;
1387 if (event != DHD_WQ_WORK_SET_MAC) {
1388 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1392 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1397 flags = dhd_os_spin_lock(&dhd->pub);
1398 in_ap = (ap_net_dev != NULL);
1399 dhd_os_spin_unlock(&dhd->pub, flags);
1402 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
1407 dhd_net_if_lock_local(dhd);
1408 DHD_OS_WAKE_LOCK(&dhd->pub);
1410 if (ifp == NULL || !dhd->pub.up) {
1411 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
1415 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
1416 ifp->set_macaddress = FALSE;
1417 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
1418 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
1420 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
1423 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1424 dhd_net_if_unlock_local(dhd);
1428 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
1430 dhd_info_t *dhd = handle;
1431 dhd_if_t *ifp = event_info;
1436 unsigned long flags;
1439 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
1440 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
1445 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
1450 flags = dhd_os_spin_lock(&dhd->pub);
1451 in_ap = (ap_net_dev != NULL);
1452 dhd_os_spin_unlock(&dhd->pub, flags);
1455 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
1457 ifp->set_multicast = FALSE;
1462 dhd_net_if_lock_local(dhd);
1463 DHD_OS_WAKE_LOCK(&dhd->pub);
1465 if (ifp == NULL || !dhd->pub.up) {
1466 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
1473 _dhd_set_multicast_list(dhd, ifidx);
1474 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
1477 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1478 dhd_net_if_unlock_local(dhd);
1482 dhd_set_mac_address(struct net_device *dev, void *addr)
1486 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
1487 struct sockaddr *sa = (struct sockaddr *)addr;
1491 ifidx = dhd_net2idx(dhd, dev);
1492 if (ifidx == DHD_BAD_IF)
1495 dhdif = dhd->iflist[ifidx];
1497 dhd_net_if_lock_local(dhd);
1498 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
1499 dhdif->set_macaddress = TRUE;
1500 dhd_net_if_unlock_local(dhd);
1501 dhd_deferred_schedule_work((void *)dhdif, DHD_WQ_WORK_SET_MAC,
1502 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
1507 dhd_set_multicast_list(struct net_device *dev)
1509 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
1512 ifidx = dhd_net2idx(dhd, dev);
1513 if (ifidx == DHD_BAD_IF)
1516 dhd->iflist[ifidx]->set_multicast = TRUE;
1517 dhd_deferred_schedule_work((void *)dhd->iflist[ifidx], DHD_WQ_WORK_SET_MCAST_LIST,
1518 dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
1521 #ifdef PROP_TXSTATUS
1523 dhd_os_wlfc_block(dhd_pub_t *pub)
1525 dhd_info_t *di = (dhd_info_t *)(pub->info);
1527 spin_lock_bh(&di->wlfc_spinlock);
1532 dhd_os_wlfc_unblock(dhd_pub_t *pub)
1534 dhd_info_t *di = (dhd_info_t *)(pub->info);
1537 spin_unlock_bh(&di->wlfc_spinlock);
1541 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
1542 uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
1543 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
1545 #endif /* PROP_TXSTATUS */
1547 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
1550 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
1551 struct ether_header *eh = NULL;
1553 /* Reject if down */
1554 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
1555 /* free the packet here since the caller won't */
1556 PKTFREE(dhdp->osh, pktbuf, TRUE);
1560 /* Update multicast statistic */
1561 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
1562 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
1563 eh = (struct ether_header *)pktdata;
1565 if (ETHER_ISMULTI(eh->ether_dhost))
1566 dhdp->tx_multicast++;
1567 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
1568 atomic_inc(&dhd->pend_8021x_cnt);
1570 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
1574 #ifdef DHDTCPACK_SUPPRESS
1575 /* If this packet has replaced another packet and got freed, just return */
1576 if (dhd_tcpack_suppress(dhdp, pktbuf))
1578 #endif /* DHDTCPACK_SUPPRESS */
1580 /* Look into the packet and update the packet priority */
1581 #ifndef PKTPRIO_OVERRIDE
1582 if (PKTPRIO(pktbuf) == 0)
1584 pktsetprio(pktbuf, FALSE);
1586 #ifdef PROP_TXSTATUS
1587 if (dhd_wlfc_is_supported(dhdp)) {
1588 /* store the interface ID */
1589 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
1591 /* store destination MAC in the tag as well */
1592 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
1594 /* decide which FIFO this packet belongs to */
1595 if (ETHER_ISMULTI(eh->ether_dhost))
1596 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
1597 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
1599 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
1601 #endif /* PROP_TXSTATUS */
1602 /* If the protocol uses a data header, apply it */
1603 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
1605 /* Use bus module to send data frame */
1607 dhd_htsf_addtxts(dhdp, pktbuf);
1609 #ifdef PROP_TXSTATUS
1611 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
1612 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
1613 /* non-proptxstatus way */
1614 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
1619 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
1621 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
1622 #endif /* BCMPCIE */
1623 #endif /* PROP_TXSTATUS */
1629 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
1634 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
1635 dhd_if_t *ifp = NULL;
1638 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
1640 uint8 htsfdlystat_sz = 0;
1643 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1645 DHD_OS_WAKE_LOCK(&dhd->pub);
1647 /* Reject if down */
1648 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
1649 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
1650 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
1651 netif_stop_queue(net);
1652 /* Send Event when bus down detected during data session */
1654 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
1655 net_os_send_hang_message(net);
1657 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1658 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
1661 return NETDEV_TX_BUSY;
1665 ifidx = dhd_net2idx(dhd, net);
1666 if (ifidx == DHD_BAD_IF) {
1667 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
1668 netif_stop_queue(net);
1669 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1670 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
1673 return NETDEV_TX_BUSY;
1677 /* re-align socket buffer if "skb->data" is odd adress */
1678 if (((unsigned long)(skb->data)) & 0x1) {
1679 unsigned char *data = skb->data;
1680 uint32 length = skb->len;
1681 PKTPUSH(dhd->pub.osh, skb, 1);
1682 memmove(skb->data, data, length);
1683 PKTSETLEN(dhd->pub.osh, skb, length);
1686 ifp = dhd->iflist[ifidx];
1687 datalen = PKTLEN(dhd->pub.osh, skb);
1689 /* Make sure there's enough room for any header */
1691 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
1692 struct sk_buff *skb2;
1694 DHD_INFO(("%s: insufficient headroom\n",
1695 dhd_ifname(&dhd->pub, ifidx)));
1696 dhd->pub.tx_realloc++;
1698 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
1701 if ((skb = skb2) == NULL) {
1702 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
1703 dhd_ifname(&dhd->pub, ifidx)));
1709 /* Convert to packet */
1710 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
1711 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
1712 dhd_ifname(&dhd->pub, ifidx)));
1713 dev_kfree_skb_any(skb);
1718 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
1719 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
1720 struct ether_header *eh = (struct ether_header *)pktdata;
1722 if (!ETHER_ISMULTI(eh->ether_dhost) &&
1723 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
1724 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
1729 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
1733 ifp->stats.tx_dropped++;
1736 dhd->pub.tx_packets++;
1737 ifp->stats.tx_packets++;
1738 ifp->stats.tx_bytes += datalen;
1741 DHD_OS_WAKE_UNLOCK(&dhd->pub);
1743 /* Return ok: we always eat the packet */
1744 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
1747 return NETDEV_TX_OK;
1752 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
1754 struct net_device *net;
1755 dhd_info_t *dhd = dhdp->info;
1758 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1762 if (ifidx == ALL_INTERFACES) {
1763 /* Flow control on all active interfaces */
1764 dhdp->txoff = state;
1765 for (i = 0; i < DHD_MAX_IFS; i++) {
1766 if (dhd->iflist[i]) {
1767 net = dhd->iflist[i]->net;
1769 netif_stop_queue(net);
1771 netif_wake_queue(net);
1776 if (dhd->iflist[ifidx]) {
1777 net = dhd->iflist[ifidx]->net;
1779 netif_stop_queue(net);
1781 netif_wake_queue(net);
1792 static const PKTTYPE_INFO packet_type_info[] =
1794 { ETHER_TYPE_IP, "IP" },
1795 { ETHER_TYPE_ARP, "ARP" },
1796 { ETHER_TYPE_BRCM, "BRCM" },
1797 { ETHER_TYPE_802_1X, "802.1X" },
1798 { ETHER_TYPE_WAI, "WAPI" },
1802 static const char *_get_packet_type_str(uint16 type)
1805 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
1807 for (i = 0; i < n; i++) {
1808 if (packet_type_info[i].type == type)
1809 return packet_type_info[i].str;
1812 return packet_type_info[n].str;
1814 #endif /* DHD_RX_DUMP */
1818 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
1820 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1821 struct sk_buff *skb;
1824 void *data, *pnext = NULL;
1827 wl_event_msg_t event;
1830 void *skbhead = NULL;
1831 void *skbprev = NULL;
1832 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
1835 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
1837 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1839 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
1840 struct ether_header *eh;
1842 pnext = PKTNEXT(dhdp->osh, pktbuf);
1843 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
1845 ifp = dhd->iflist[ifidx];
1847 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
1849 PKTFREE(dhdp->osh, pktbuf, FALSE);
1852 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
1853 /* Dropping only data packets before registering net device to avoid kernel panic */
1854 #ifndef PROP_TXSTATUS_VSDB
1855 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
1856 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
1858 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
1859 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
1860 #endif /* PROP_TXSTATUS_VSDB */
1861 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
1863 PKTFREE(dhdp->osh, pktbuf, FALSE);
1868 #ifdef PROP_TXSTATUS
1869 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
1870 /* WLFC may send header only packet when
1871 there is an urgent message but no packet to
1874 PKTFREE(dhdp->osh, pktbuf, FALSE);
1878 #ifdef DHDTCPACK_SUPPRESS
1879 dhd_tcpdata_info_get(dhdp, pktbuf);
1881 skb = PKTTONATIVE(dhdp->osh, pktbuf);
1883 ifp = dhd->iflist[ifidx];
1885 ifp = dhd->iflist[0];
1888 skb->dev = ifp->net;
1891 /* Get the protocol, maintain skb around eth_type_trans()
1892 * The main reason for this hack is for the limitation of
1893 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
1894 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
1895 * coping of the packet coming from the network stack to add
1896 * BDC, Hardware header etc, during network interface registration
1897 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
1898 * for BDC, Hardware header etc. and not just the ETH_HLEN
1903 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
1904 dump_data = skb->data;
1905 protocol = (dump_data[12] << 8) | dump_data[13];
1907 if (protocol == ETHER_TYPE_802_1X) {
1908 DHD_ERROR(("ETHER_TYPE_802_1X: "
1909 "ver %d, type %d, replay %d\n",
1910 dump_data[14], dump_data[15],
1913 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
1914 #if defined(DHD_RX_DUMP)
1915 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
1916 if (protocol != ETHER_TYPE_BRCM) {
1917 if (dump_data[0] == 0xFF) {
1918 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
1920 if ((dump_data[12] == 8) &&
1921 (dump_data[13] == 6)) {
1922 DHD_ERROR(("%s: ARP %d\n",
1923 __FUNCTION__, dump_data[0x15]));
1925 } else if (dump_data[0] & 1) {
1926 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
1927 __FUNCTION__, MAC2STRDBG(dump_data)));
1929 #ifdef DHD_RX_FULL_DUMP
1932 for (k = 0; k < skb->len; k++) {
1933 DHD_ERROR(("%02X ", dump_data[k]));
1939 #endif /* DHD_RX_FULL_DUMP */
1941 #endif /* DHD_RX_DUMP */
1943 skb->protocol = eth_type_trans(skb, skb->dev);
1945 if (skb->pkt_type == PACKET_MULTICAST) {
1946 dhd->pub.rx_multicast++;
1953 dhd_htsf_addrxts(dhdp, pktbuf);
1955 /* Strip header, count, deliver upward */
1956 skb_pull(skb, ETH_HLEN);
1958 /* Process special event packets and then discard them */
1959 memset(&event, 0, sizeof(event));
1960 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
1961 dhd_wl_host_event(dhd, &ifidx,
1962 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1963 skb_mac_header(skb),
1966 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
1970 wl_event_to_host_order(&event);
1972 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
1974 #if defined(PNO_SUPPORT)
1975 if (event.event_type == WLC_E_PFN_NET_FOUND) {
1976 /* enforce custom wake lock to garantee that Kernel not suspended */
1977 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
1979 #endif /* PNO_SUPPORT */
1981 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
1982 PKTFREE(dhdp->osh, pktbuf, FALSE);
1984 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
1986 tout_rx = DHD_PACKET_TIMEOUT_MS;
1989 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
1990 ifp = dhd->iflist[ifidx];
1993 ifp->net->last_rx = jiffies;
1995 dhdp->dstats.rx_bytes += skb->len;
1996 dhdp->rx_packets++; /* Local count */
1997 ifp->stats.rx_bytes += skb->len;
1998 ifp->stats.rx_packets++;
2001 if (in_interrupt()) {
2004 if (dhd->rxthread_enabled) {
2008 PKTSETNEXT(dhdp->osh, skbprev, skb);
2012 /* If the receive is not processed inside an ISR,
2013 * the softirqd must be woken explicitly to service
2014 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
2015 * by netif_rx_ni(), but in earlier kernels, we need
2016 * to do it manually.
2018 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2023 local_irq_save(flags);
2025 local_irq_restore(flags);
2026 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
2031 if (dhd->rxthread_enabled && skbhead)
2032 dhd_sched_rxf(dhdp, skbhead);
2034 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
2035 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
2039 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
2041 /* Linux version has nothing to do */
2046 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
2048 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2049 struct ether_header *eh;
2052 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
2054 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
2055 type = ntoh16(eh->ether_type);
2057 if (type == ETHER_TYPE_802_1X)
2058 atomic_dec(&dhd->pend_8021x_cnt);
2062 static struct net_device_stats *
2063 dhd_get_stats(struct net_device *net)
2065 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2069 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2071 ifidx = dhd_net2idx(dhd, net);
2072 if (ifidx == DHD_BAD_IF) {
2073 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
2075 memset(&net->stats, 0, sizeof(net->stats));
2079 ifp = dhd->iflist[ifidx];
2083 /* Use the protocol to get dongle stats */
2084 dhd_prot_dstats(&dhd->pub);
2087 /* Copy dongle stats to net device stats */
2088 ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
2089 ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
2090 ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
2091 ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
2092 ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
2093 ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
2094 ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
2095 ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
2096 ifp->stats.multicast = dhd->pub.dstats.multicast;
2102 dhd_watchdog_thread(void *data)
2104 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2105 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2106 /* This thread doesn't need any user-level access,
2107 * so get rid of all our resources
2109 if (dhd_watchdog_prio > 0) {
2110 struct sched_param param;
2111 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
2112 dhd_watchdog_prio:(MAX_RT_PRIO-1);
2113 setScheduler(current, SCHED_FIFO, ¶m);
2117 if (down_interruptible (&tsk->sema) == 0) {
2118 unsigned long flags;
2119 unsigned long jiffies_at_start = jiffies;
2120 unsigned long time_lapse;
2122 SMP_RD_BARRIER_DEPENDS();
2123 if (tsk->terminated) {
2127 dhd_os_sdlock(&dhd->pub);
2128 if (dhd->pub.dongle_reset == FALSE) {
2129 DHD_TIMER(("%s:\n", __FUNCTION__));
2131 /* Call the bus module watchdog */
2132 dhd_bus_watchdog(&dhd->pub);
2134 flags = dhd_os_spin_lock(&dhd->pub);
2135 /* Count the tick for reference */
2137 time_lapse = jiffies - jiffies_at_start;
2139 /* Reschedule the watchdog */
2140 if (dhd->wd_timer_valid)
2141 mod_timer(&dhd->timer,
2143 msecs_to_jiffies(dhd_watchdog_ms) -
2144 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
2145 dhd_os_spin_unlock(&dhd->pub, flags);
2147 dhd_os_sdunlock(&dhd->pub);
2152 complete_and_exit(&tsk->completed, 0);
2155 static void dhd_watchdog(ulong data)
2157 dhd_info_t *dhd = (dhd_info_t *)data;
2158 unsigned long flags;
2160 if (dhd->pub.dongle_reset) {
2164 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
2165 up(&dhd->thr_wdt_ctl.sema);
2169 dhd_os_sdlock(&dhd->pub);
2170 /* Call the bus module watchdog */
2171 dhd_bus_watchdog(&dhd->pub);
2173 flags = dhd_os_spin_lock(&dhd->pub);
2174 /* Count the tick for reference */
2177 /* Reschedule the watchdog */
2178 if (dhd->wd_timer_valid)
2179 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
2180 dhd_os_spin_unlock(&dhd->pub, flags);
2181 dhd_os_sdunlock(&dhd->pub);
2184 #ifdef ENABLE_ADAPTIVE_SCHED
2186 dhd_sched_policy(int prio)
2188 struct sched_param param;
2189 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
2190 param.sched_priority = 0;
2191 setScheduler(current, SCHED_NORMAL, ¶m);
2193 if (get_scheduler_policy(current) != SCHED_FIFO) {
2194 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
2195 setScheduler(current, SCHED_FIFO, ¶m);
2199 #endif /* ENABLE_ADAPTIVE_SCHED */
2200 #ifdef DEBUG_CPU_FREQ
2201 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
2203 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
2204 struct cpufreq_freqs *freq = data;
2208 if (val == CPUFREQ_POSTCHANGE) {
2209 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
2210 freq->new, freq->cpu));
2211 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
2217 #endif /* DEBUG_CPU_FREQ */
2219 dhd_dpc_thread(void *data)
2221 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2222 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2224 /* This thread doesn't need any user-level access,
2225 * so get rid of all our resources
2227 if (dhd_dpc_prio > 0)
2229 struct sched_param param;
2230 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
2231 setScheduler(current, SCHED_FIFO, ¶m);
2234 #ifdef CUSTOM_DPC_CPUCORE
2235 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
2237 #ifdef CUSTOM_SET_CPUCORE
2238 dhd->pub.current_dpc = current;
2239 #endif /* CUSTOM_SET_CPUCORE */
2241 /* Run until signal received */
2243 if (!binary_sema_down(tsk)) {
2244 #ifdef ENABLE_ADAPTIVE_SCHED
2245 dhd_sched_policy(dhd_dpc_prio);
2246 #endif /* ENABLE_ADAPTIVE_SCHED */
2247 SMP_RD_BARRIER_DEPENDS();
2248 if (tsk->terminated) {
2252 /* Call bus dpc unless it indicated down (then clean stop) */
2253 if (dhd->pub.busstate != DHD_BUS_DOWN) {
2254 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
2255 while (dhd_bus_dpc(dhd->pub.bus)) {
2256 /* process all data */
2258 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
2259 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2263 dhd_bus_stop(dhd->pub.bus, TRUE);
2264 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2271 complete_and_exit(&tsk->completed, 0);
2275 dhd_rxf_thread(void *data)
2277 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2278 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2279 dhd_pub_t *pub = &dhd->pub;
2280 #if defined(WAIT_DEQUEUE)
2281 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
2282 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
2285 /* This thread doesn't need any user-level access,
2286 * so get rid of all our resources
2288 if (dhd_rxf_prio > 0)
2290 struct sched_param param;
2291 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
2292 setScheduler(current, SCHED_FIFO, ¶m);
2295 DAEMONIZE("dhd_rxf");
2296 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
2298 /* signal: thread has started */
2299 complete(&tsk->completed);
2300 #ifdef CUSTOM_SET_CPUCORE
2301 dhd->pub.current_rxf = current;
2302 #endif /* CUSTOM_SET_CPUCORE */
2304 /* Run until signal received */
2306 if (down_interruptible(&tsk->sema) == 0) {
2308 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
2311 #ifdef ENABLE_ADAPTIVE_SCHED
2312 dhd_sched_policy(dhd_rxf_prio);
2313 #endif /* ENABLE_ADAPTIVE_SCHED */
2315 SMP_RD_BARRIER_DEPENDS();
2317 if (tsk->terminated) {
2320 skb = dhd_rxf_dequeue(pub);
2326 void *skbnext = PKTNEXT(pub->osh, skb);
2327 PKTSETNEXT(pub->osh, skb, NULL);
2329 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2333 local_irq_save(flags);
2335 local_irq_restore(flags);
2340 #if defined(WAIT_DEQUEUE)
2341 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
2343 watchdogTime = OSL_SYSUPTIME();
2347 DHD_OS_WAKE_UNLOCK(pub);
2353 complete_and_exit(&tsk->completed, 0);
2361 dhd = (dhd_info_t *)data;
2363 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
2364 * down below , wake lock is set,
2365 * the tasklet is initialized in dhd_attach()
2367 /* Call bus dpc unless it indicated down (then clean stop) */
2368 if (dhd->pub.busstate != DHD_BUS_DOWN) {
2369 if (dhd_bus_dpc(dhd->pub.bus))
2370 tasklet_schedule(&dhd->tasklet);
2372 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2374 dhd_bus_stop(dhd->pub.bus, TRUE);
2375 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2380 dhd_sched_dpc(dhd_pub_t *dhdp)
2382 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2384 DHD_OS_WAKE_LOCK(dhdp);
2385 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
2386 /* If the semaphore does not get up,
2387 * wake unlock should be done here
2389 if (!binary_sema_up(&dhd->thr_dpc_ctl))
2390 DHD_OS_WAKE_UNLOCK(dhdp);
2393 tasklet_schedule(&dhd->tasklet);
2398 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
2400 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2402 DHD_OS_WAKE_LOCK(dhdp);
2404 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
2407 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
2410 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
2411 up(&dhd->thr_rxf_ctl.sema);
2417 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
2419 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
2425 memset(&ioc, 0, sizeof(ioc));
2427 ioc.cmd = WLC_GET_VAR;
2429 ioc.len = (uint)sizeof(buf);
2432 strncpy(buf, "toe_ol", sizeof(buf) - 1);
2433 buf[sizeof(buf) - 1] = '\0';
2434 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
2435 /* Check for older dongle image that doesn't support toe_ol */
2437 DHD_ERROR(("%s: toe not supported by device\n",
2438 dhd_ifname(&dhd->pub, ifidx)));
2442 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
2446 memcpy(toe_ol, buf, sizeof(uint32));
2450 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
2452 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
2458 memset(&ioc, 0, sizeof(ioc));
2460 ioc.cmd = WLC_SET_VAR;
2462 ioc.len = (uint)sizeof(buf);
2465 /* Set toe_ol as requested */
2467 strncpy(buf, "toe_ol", sizeof(buf) - 1);
2468 buf[sizeof(buf) - 1] = '\0';
2469 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
2471 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
2472 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
2473 dhd_ifname(&dhd->pub, ifidx), ret));
2477 /* Enable toe globally only if any components are enabled. */
2479 toe = (toe_ol != 0);
2482 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
2484 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
2485 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
2493 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
2495 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
2497 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2499 snprintf(info->driver, sizeof(info->driver), "wl");
2500 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
2503 struct ethtool_ops dhd_ethtool_ops = {
2504 .get_drvinfo = dhd_ethtool_get_drvinfo
2506 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
2509 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
2511 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
2513 struct ethtool_drvinfo info;
2514 char drvname[sizeof(info.driver)];
2517 struct ethtool_value edata;
2518 uint32 toe_cmpnt, csum_dir;
2522 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2524 /* all ethtool calls start with a cmd word */
2525 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
2529 case ETHTOOL_GDRVINFO:
2530 /* Copy out any request driver name */
2531 if (copy_from_user(&info, uaddr, sizeof(info)))
2533 strncpy(drvname, info.driver, sizeof(info.driver));
2534 drvname[sizeof(info.driver)-1] = '\0';
2536 /* clear struct for return */
2537 memset(&info, 0, sizeof(info));
2540 /* if dhd requested, identify ourselves */
2541 if (strcmp(drvname, "?dhd") == 0) {
2542 snprintf(info.driver, sizeof(info.driver), "dhd");
2543 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
2544 info.version[sizeof(info.version) - 1] = '\0';
2547 /* otherwise, require dongle to be up */
2548 else if (!dhd->pub.up) {
2549 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
2553 /* finally, report dongle driver type */
2554 else if (dhd->pub.iswl)
2555 snprintf(info.driver, sizeof(info.driver), "wl");
2557 snprintf(info.driver, sizeof(info.driver), "xx");
2559 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
2560 if (copy_to_user(uaddr, &info, sizeof(info)))
2562 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
2563 (int)sizeof(drvname), drvname, info.driver));
2567 /* Get toe offload components from dongle */
2568 case ETHTOOL_GRXCSUM:
2569 case ETHTOOL_GTXCSUM:
2570 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
2573 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
2576 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
2578 if (copy_to_user(uaddr, &edata, sizeof(edata)))
2582 /* Set toe offload components in dongle */
2583 case ETHTOOL_SRXCSUM:
2584 case ETHTOOL_STXCSUM:
2585 if (copy_from_user(&edata, uaddr, sizeof(edata)))
2588 /* Read the current settings, update and write back */
2589 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
2592 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
2594 if (edata.data != 0)
2595 toe_cmpnt |= csum_dir;
2597 toe_cmpnt &= ~csum_dir;
2599 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
2602 /* If setting TX checksum mode, tell Linux the new mode */
2603 if (cmd == ETHTOOL_STXCSUM) {
2605 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
2607 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
2619 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
2621 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
2626 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
2633 dhd = (dhd_info_t *)dhdp->info;
2634 #if !defined(BCMPCIE)
2635 if (dhd->thr_dpc_ctl.thr_pid < 0) {
2636 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
2641 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
2642 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
2643 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
2644 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
2645 net_os_send_hang_message(net);
2651 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
2653 int bcmerror = BCME_OK;
2655 struct net_device *net;
2657 net = dhd_idx2net(pub, ifidx);
2659 bcmerror = BCME_BADARG;
2664 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
2666 /* check for local dhd ioctl and handle it */
2667 if (ioc->driver == DHD_IOCTL_MAGIC) {
2668 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
2670 pub->bcmerror = bcmerror;
2675 /* send to dongle (must be up, and wl). */
2676 if (pub->busstate != DHD_BUS_DATA) {
2677 bcmerror = BCME_DONGLE_DOWN;
2682 bcmerror = BCME_DONGLE_DOWN;
2687 * Flush the TX queue if required for proper message serialization:
2688 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
2689 * prevent M4 encryption and
2690 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
2691 * prevent disassoc frame being sent before WPS-DONE frame.
2693 if (ioc->cmd == WLC_SET_KEY ||
2694 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
2695 strncmp("wsec_key", data_buf, 9) == 0) ||
2696 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
2697 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
2698 ioc->cmd == WLC_DISASSOC)
2699 dhd_wait_pend8021x(net);
2703 /* short cut wl ioctl calls here */
2704 if (strcmp("htsf", data_buf) == 0) {
2705 dhd_ioctl_htsf_get(dhd, 0);
2709 if (strcmp("htsflate", data_buf) == 0) {
2711 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
2712 memset(&maxdelayts, 0, sizeof(tstamp_t));
2716 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
2717 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
2718 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
2719 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
2725 if (strcmp("htsfclear", data_buf) == 0) {
2726 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
2727 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
2728 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
2729 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
2733 if (strcmp("htsfhis", data_buf) == 0) {
2734 dhd_dump_htsfhisto(&vi_d1, "H to D");
2735 dhd_dump_htsfhisto(&vi_d2, "D to D");
2736 dhd_dump_htsfhisto(&vi_d3, "D to H");
2737 dhd_dump_htsfhisto(&vi_d4, "H to H");
2740 if (strcmp("tsport", data_buf) == 0) {
2742 memcpy(&tsport, data_buf + 7, 4);
2744 DHD_ERROR(("current timestamp port: %d \n", tsport));
2749 #endif /* WLMEDIA_HTSF */
2751 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
2752 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
2754 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
2756 bcmerror = BCME_UNSUPPORTED;
2760 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
2763 dhd_check_hang(net, pub, bcmerror);
2769 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
2771 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2773 #ifdef CONFIG_COMPAT
2774 dhd_ioctl_compat_t ioc_compat;
2779 void *local_buf = NULL;
2782 DHD_OS_WAKE_LOCK(&dhd->pub);
2784 /* send to dongle only if we are not waiting for reload already */
2785 if (dhd->pub.hang_was_sent) {
2786 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
2787 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
2788 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2789 return OSL_ERROR(BCME_DONGLE_DOWN);
2792 ifidx = dhd_net2idx(dhd, net);
2793 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
2795 if (ifidx == DHD_BAD_IF) {
2796 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
2797 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2801 #if defined(WL_WIRELESS_EXT)
2802 /* linux wireless extensions */
2803 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
2804 /* may recurse, do NOT lock */
2805 ret = wl_iw_ioctl(net, ifr, cmd);
2806 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2809 #endif /* defined(WL_WIRELESS_EXT) */
2811 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
2812 if (cmd == SIOCETHTOOL) {
2813 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
2814 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2817 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
2819 if (cmd == SIOCDEVPRIVATE+1) {
2820 ret = wl_android_priv_cmd(net, ifr, cmd);
2821 dhd_check_hang(net, &dhd->pub, ret);
2822 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2826 if (cmd != SIOCDEVPRIVATE) {
2827 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2831 memset(&ioc, 0, sizeof(ioc));
2832 #ifdef CONFIG_COMPAT
2833 memset(&ioc_compat, 0, sizeof(ioc_compat));
2835 if (is_compat_task()) {
2836 /* Copy the ioc control structure part of ioctl request */
2837 if (copy_from_user(&ioc_compat, ifr->ifr_data, sizeof(dhd_ioctl_compat_t))) {
2838 bcmerror = BCME_BADADDR;
2841 ioc.cmd = ioc_compat.cmd;
2842 ioc.buf = (void *)(uintptr_t) ioc_compat.buf;
2843 ioc.len = ioc_compat.len;
2844 ioc.set = ioc_compat.set;
2845 ioc.used = ioc_compat.used;
2846 ioc.needed = ioc_compat.needed;
2847 ioc.driver = ioc_compat.driver;
2849 /* Copy the ioc control structure part of ioctl request */
2850 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
2851 bcmerror = BCME_BADADDR;
2855 /* To differentiate between wl and dhd read 4 more byes */
2856 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
2857 sizeof(uint)) != 0)) {
2858 bcmerror = BCME_BADADDR;
2863 /* Copy the ioc control structure part of ioctl request */
2864 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
2865 bcmerror = BCME_BADADDR;
2869 /* To differentiate between wl and dhd read 4 more byes */
2870 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
2871 sizeof(uint)) != 0)) {
2872 bcmerror = BCME_BADADDR;
2877 if (!capable(CAP_NET_ADMIN)) {
2878 bcmerror = BCME_EPERM;
2883 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
2884 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
2885 bcmerror = BCME_NOMEM;
2888 if (copy_from_user(local_buf, ioc.buf, buflen)) {
2889 bcmerror = BCME_BADADDR;
2892 *(char *)(local_buf + buflen) = '\0';
2895 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
2897 if (!bcmerror && buflen && local_buf && ioc.buf) {
2898 if (copy_to_user(ioc.buf, local_buf, buflen))
2904 MFREE(dhd->pub.osh, local_buf, buflen+1);
2906 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2908 return OSL_ERROR(bcmerror);
2914 dhd_stop(struct net_device *net)
2917 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2918 DHD_OS_WAKE_LOCK(&dhd->pub);
2919 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
2920 if (dhd->pub.up == 0) {
2925 ifidx = dhd_net2idx(dhd, net);
2926 BCM_REFERENCE(ifidx);
2928 /* Set state and stop OS transmissions */
2929 netif_stop_queue(net);
2934 wl_cfg80211_down(NULL);
2937 * For CFG80211: Clean up all the left over virtual interfaces
2938 * when the primary Interface is brought down. [ifconfig wlan0 down]
2940 if (!dhd_download_fw_on_driverload) {
2941 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
2942 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
2945 dhd_net_if_lock_local(dhd);
2946 for (i = 1; i < DHD_MAX_IFS; i++)
2947 dhd_remove_if(&dhd->pub, i, TRUE);
2948 dhd_net_if_unlock_local(dhd);
2952 #endif /* WL_CFG80211 */
2954 #ifdef PROP_TXSTATUS
2955 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
2957 /* Stop the protocol module */
2958 dhd_prot_stop(&dhd->pub);
2960 OLD_MOD_DEC_USE_COUNT;
2962 #if defined(WL_CFG80211)
2963 if (ifidx == 0 && !dhd_download_fw_on_driverload)
2964 wl_android_wifi_off(net);
2966 dhd->pub.rxcnt_timeout = 0;
2967 dhd->pub.txcnt_timeout = 0;
2969 dhd->pub.hang_was_sent = 0;
2971 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2975 #if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
2976 defined(USE_INITIAL_SHORT_DWELL_TIME))
2977 extern bool g_first_broadcast_scan;
2978 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
2981 static int dhd_interworking_enable(dhd_pub_t *dhd)
2983 char iovbuf[WLC_IOCTL_SMLEN];
2984 uint32 enable = true;
2987 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
2988 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2989 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
2997 dhd_open(struct net_device *net)
2999 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
3008 DHD_OS_WAKE_LOCK(&dhd->pub);
3009 dhd->pub.dongle_trap_occured = 0;
3010 dhd->pub.hang_was_sent = 0;
3012 #if !defined(WL_CFG80211)
3014 * Force start if ifconfig_up gets called before START command
3015 * We keep WEXT's wl_control_wl_start to provide backward compatibility
3016 * This should be removed in the future
3018 ret = wl_control_wl_start(net);
3020 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
3027 ifidx = dhd_net2idx(dhd, net);
3028 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
3031 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
3036 if (!dhd->iflist[ifidx]) {
3037 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
3043 atomic_set(&dhd->pend_8021x_cnt, 0);
3044 #if defined(WL_CFG80211)
3045 if (!dhd_download_fw_on_driverload) {
3046 DHD_ERROR(("\n%s\n", dhd_version));
3047 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
3048 g_first_broadcast_scan = TRUE;
3049 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
3050 ret = wl_android_wifi_on(net);
3052 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
3053 __FUNCTION__, ret));
3060 if (dhd->pub.busstate != DHD_BUS_DATA) {
3062 /* try to bring up bus */
3063 if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
3064 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
3071 /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
3072 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
3075 /* Get current TOE mode from dongle */
3076 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
3077 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
3079 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
3082 #if defined(WL_CFG80211)
3083 if (unlikely(wl_cfg80211_up(NULL))) {
3084 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
3088 #endif /* WL_CFG80211 */
3091 /* Allow transmit calls */
3092 netif_start_queue(net);
3096 dhd_dbg_init(&dhd->pub);
3099 OLD_MOD_INC_USE_COUNT;
3104 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3110 int dhd_do_driver_init(struct net_device *net)
3112 dhd_info_t *dhd = NULL;
3115 DHD_ERROR(("Primary Interface not initialized \n"));
3120 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
3121 dhd = *(dhd_info_t **)netdev_priv(net);
3123 /* If driver is already initialized, do nothing
3125 if (dhd->pub.busstate == DHD_BUS_DATA) {
3126 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
3130 if (dhd_open(net) < 0) {
3131 DHD_ERROR(("Driver Init Failed \n"));
3139 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
3143 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
3147 /* handle IF event caused by wl commands, SoftAP, WEXT and
3148 * anything else. This has to be done asynchronously otherwise
3149 * DPC will be blocked (and iovars will timeout as DPC has no chance
3150 * to read the response back)
3152 if (ifevent->ifidx > 0) {
3153 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
3155 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
3156 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
3157 strncpy(if_event->name, name, IFNAMSIZ);
3158 if_event->name[IFNAMSIZ - 1] = '\0';
3159 dhd_deferred_schedule_work((void *)if_event, DHD_WQ_WORK_IF_ADD,
3160 dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
3167 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
3169 dhd_if_event_t *if_event;
3172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
3173 wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx);
3175 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
3177 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
3178 #endif /* WL_CFG80211 */
3180 /* handle IF event caused by wl commands, SoftAP, WEXT and
3183 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
3184 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
3185 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
3186 strncpy(if_event->name, name, IFNAMSIZ);
3187 if_event->name[IFNAMSIZ - 1] = '\0';
3188 dhd_deferred_schedule_work((void *)if_event, DHD_WQ_WORK_IF_DEL,
3189 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
3194 /* unregister and free the existing net_device interface (if any) in iflist and
3195 * allocate a new one. the slot is reused. this function does NOT register the
3196 * new interface to linux kernel. dhd_register_if does the job
3199 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
3200 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
3202 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
3205 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
3206 ifp = dhdinfo->iflist[ifidx];
3209 if (ifp->net != NULL) {
3210 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
3212 /* in unregister_netdev case, the interface gets freed by net->destructor
3213 * (which is set to free_netdev)
3215 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
3216 free_netdev(ifp->net);
3218 netif_stop_queue(ifp->net);
3220 unregister_netdev(ifp->net);
3222 unregister_netdevice(ifp->net);
3227 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
3229 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
3234 memset(ifp, 0, sizeof(dhd_if_t));
3235 ifp->info = dhdinfo;
3237 ifp->bssidx = bssidx;
3239 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
3241 /* Allocate etherdev, including space for private structure */
3242 ifp->net = alloc_etherdev(sizeof(dhdinfo));
3243 if (ifp->net == NULL) {
3244 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
3247 memcpy(netdev_priv(ifp->net), &dhdinfo, sizeof(dhdinfo));
3248 if (name && name[0]) {
3249 strncpy(ifp->net->name, name, IFNAMSIZ);
3250 ifp->net->name[IFNAMSIZ - 1] = '\0';
3252 ifp->net->destructor = free_netdev;
3253 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
3254 ifp->name[IFNAMSIZ - 1] = '\0';
3255 dhdinfo->iflist[ifidx] = ifp;
3260 if (ifp->net != NULL) {
3261 free_netdev(ifp->net);
3264 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
3267 dhdinfo->iflist[ifidx] = NULL;
3271 /* unregister and free the the net_device interface associated with the indexed
3272 * slot, also free the slot memory and set the slot pointer to NULL
3275 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
3277 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
3280 ifp = dhdinfo->iflist[ifidx];
3282 if (ifp->net != NULL) {
3283 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
3285 /* in unregister_netdev case, the interface gets freed by net->destructor
3286 * (which is set to free_netdev)
3288 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
3289 free_netdev(ifp->net);
3291 netif_stop_queue(ifp->net);
3294 unregister_netdev(ifp->net);
3296 unregister_netdevice(ifp->net);
3301 dhdinfo->iflist[ifidx] = NULL;
3302 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
3309 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
3310 static struct net_device_ops dhd_ops_pri = {
3311 .ndo_open = dhd_open,
3312 .ndo_stop = dhd_stop,
3313 .ndo_get_stats = dhd_get_stats,
3314 .ndo_do_ioctl = dhd_ioctl_entry,
3315 .ndo_start_xmit = dhd_start_xmit,
3316 .ndo_set_mac_address = dhd_set_mac_address,
3317 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
3318 .ndo_set_rx_mode = dhd_set_multicast_list,
3320 .ndo_set_multicast_list = dhd_set_multicast_list,
3324 static struct net_device_ops dhd_ops_virt = {
3325 .ndo_get_stats = dhd_get_stats,
3326 .ndo_do_ioctl = dhd_ioctl_entry,
3327 .ndo_start_xmit = dhd_start_xmit,
3328 .ndo_set_mac_address = dhd_set_mac_address,
3329 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
3330 .ndo_set_rx_mode = dhd_set_multicast_list,
3332 .ndo_set_multicast_list = dhd_set_multicast_list,
3335 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
3338 extern void debugger_init(void *bus_handle);
3343 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
3345 dhd_info_t *dhd = NULL;
3346 struct net_device *net = NULL;
3347 char if_name[IFNAMSIZ] = {'\0'};
3348 uint32 bus_type = -1;
3349 uint32 bus_num = -1;
3350 uint32 slot_num = -1;
3351 wifi_adapter_info_t *adapter = NULL;
3353 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
3354 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3356 /* will implement get_ids for DBUS later */
3357 #if defined(BCMSDIO)
3358 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
3360 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
3362 /* Allocate primary dhd_info */
3363 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
3365 dhd = MALLOC(osh, sizeof(dhd_info_t));
3367 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
3371 memset(dhd, 0, sizeof(dhd_info_t));
3372 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
3375 dhd->adapter = adapter;
3377 #ifdef GET_CUSTOM_MAC_ENABLE
3378 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
3379 #endif /* GET_CUSTOM_MAC_ENABLE */
3380 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
3381 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
3383 /* Initialize thread based operation and lock */
3384 sema_init(&dhd->sdsem, 1);
3386 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
3387 * This is indeed a hack but we have to make it work properly before we have a better
3390 dhd_update_fw_nv_path(dhd);
3392 /* Link to info module */
3393 dhd->pub.info = dhd;
3394 /* Link to bus module */
3396 dhd->pub.hdrlen = bus_hdrlen;
3398 /* Set network interface name if it was provided as module parameter */
3399 if (iface_name[0]) {
3402 strncpy(if_name, iface_name, IFNAMSIZ);
3403 if_name[IFNAMSIZ - 1] = 0;
3404 len = strlen(if_name);
3405 ch = if_name[len - 1];
3406 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
3407 strcat(if_name, "%d");
3409 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
3412 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
3414 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
3417 net->netdev_ops = NULL;
3420 sema_init(&dhd->proto_sem, 1);
3422 #ifdef PROP_TXSTATUS
3423 spin_lock_init(&dhd->wlfc_spinlock);
3425 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
3426 dhd->pub.plat_init = dhd_wlfc_plat_init;
3427 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
3428 #endif /* PROP_TXSTATUS */
3430 /* Initialize other structure content */
3431 init_waitqueue_head(&dhd->ioctl_resp_wait);
3432 init_waitqueue_head(&dhd->ctrl_wait);
3434 /* Initialize the spinlocks */
3435 spin_lock_init(&dhd->sdlock);
3436 spin_lock_init(&dhd->txqlock);
3437 spin_lock_init(&dhd->dhd_lock);
3438 spin_lock_init(&dhd->rxf_lock);
3439 #if defined(RXFRAME_THREAD)
3440 dhd->rxthread_enabled = TRUE;
3441 #endif /* defined(RXFRAME_THREAD) */
3443 #ifdef DHDTCPACK_SUPPRESS
3444 spin_lock_init(&dhd->tcpack_lock);
3445 #endif /* DHDTCPACK_SUPPRESS */
3447 /* Initialize Wakelock stuff */
3448 spin_lock_init(&dhd->wakelock_spinlock);
3449 dhd->wakelock_counter = 0;
3450 dhd->wakelock_wd_counter = 0;
3451 dhd->wakelock_rx_timeout_enable = 0;
3452 dhd->wakelock_ctrl_timeout_enable = 0;
3453 dhd->waive_wakelock = FALSE;
3454 #ifdef CONFIG_HAS_WAKELOCK
3455 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
3456 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
3457 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
3458 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
3459 #endif /* CONFIG_HAS_WAKELOCK */
3460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
3461 mutex_init(&dhd->dhd_net_if_mutex);
3462 mutex_init(&dhd->dhd_suspend_mutex);
3464 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
3466 /* Attach and link in the protocol */
3467 if (dhd_prot_attach(&dhd->pub) != 0) {
3468 DHD_ERROR(("dhd_prot_attach failed\n"));
3471 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
3474 /* Attach and link in the cfg80211 */
3475 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
3476 DHD_ERROR(("wl_cfg80211_attach failed\n"));
3480 dhd_monitor_init(&dhd->pub);
3481 dhd_state |= DHD_ATTACH_STATE_CFG80211;
3483 #if defined(WL_WIRELESS_EXT)
3484 /* Attach and link in the iw */
3485 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
3486 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
3487 DHD_ERROR(("wl_iw_attach failed\n"));
3490 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
3492 #endif /* defined(WL_WIRELESS_EXT) */
3495 /* Set up the watchdog timer */
3496 init_timer(&dhd->timer);
3497 dhd->timer.data = (ulong)dhd;
3498 dhd->timer.function = dhd_watchdog;
3499 dhd->default_wd_interval = dhd_watchdog_ms;
3501 if (dhd_watchdog_prio >= 0) {
3502 /* Initialize watchdog thread */
3503 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
3506 dhd->thr_wdt_ctl.thr_pid = -1;
3510 debugger_init((void *) bus);
3513 /* Set up the bottom half handler */
3514 if (dhd_dpc_prio >= 0) {
3515 /* Initialize DPC thread */
3516 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
3518 /* use tasklet for dpc */
3519 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
3520 dhd->thr_dpc_ctl.thr_pid = -1;
3523 if (dhd->rxthread_enabled) {
3524 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
3525 /* Initialize RXF thread */
3526 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
3529 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
3532 * Save the dhd_info into the priv
3534 memcpy(netdev_priv(net), &dhd, sizeof(dhd));
3536 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
3537 KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
3538 dhd->pm_notifier.notifier_call = dhd_pm_callback;
3539 dhd->pm_notifier.priority = 10;
3540 if (!dhd_pm_notifier_registered) {
3541 dhd_pm_notifier_registered = TRUE;
3542 register_pm_notifier(&dhd->pm_notifier);
3544 #endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP) */
3546 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3547 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
3548 dhd->early_suspend.suspend = dhd_early_suspend;
3549 dhd->early_suspend.resume = dhd_late_resume;
3550 register_early_suspend(&dhd->early_suspend);
3551 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
3552 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3554 #ifdef ARP_OFFLOAD_SUPPORT
3555 dhd->pend_ipaddr = 0;
3556 if (!dhd_inetaddr_notifier_registered) {
3557 dhd_inetaddr_notifier_registered = TRUE;
3558 register_inetaddr_notifier(&dhd_inetaddr_notifier);
3560 #endif /* ARP_OFFLOAD_SUPPORT */
3561 if (!dhd_inet6addr_notifier_registered) {
3562 dhd_inet6addr_notifier_registered = TRUE;
3563 register_inet6addr_notifier(&dhd_inet6addr_notifier);
3565 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
3566 #ifdef DEBUG_CPU_FREQ
3567 dhd->new_freq = alloc_percpu(int);
3568 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
3569 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
3571 #ifdef DHDTCPACK_SUPPRESS
3573 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
3575 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
3576 #endif /* BCMSDIO */
3577 #endif /* DHDTCPACK_SUPPRESS */
3579 dhd_state |= DHD_ATTACH_STATE_DONE;
3580 dhd->dhd_state = dhd_state;
3582 dhd->unit = dhd_found + instance_base;
3587 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
3588 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
3589 __FUNCTION__, dhd_state, &dhd->pub));
3590 dhd->dhd_state = dhd_state;
3591 dhd_detach(&dhd->pub);
3592 dhd_free(&dhd->pub);
3598 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
3600 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
3601 return DHD_FLAG_HOSTAP_MODE;
3602 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
3603 return DHD_FLAG_P2P_MODE;
3604 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
3605 return DHD_FLAG_IBSS_MODE;
3606 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
3607 return DHD_FLAG_MFG_MODE;
3609 return DHD_FLAG_STA_MODE;
3612 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
3616 const char *fw = NULL;
3617 const char *nv = NULL;
3618 wifi_adapter_info_t *adapter = dhdinfo->adapter;
3621 /* Update firmware and nvram path. The path may be from adapter info or module parameter
3622 * The path from adapter info is used for initialization only (as it won't change).
3624 * The firmware_path/nvram_path module parameter may be changed by the system at run
3625 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
3626 * command may change dhdinfo->fw_path. As such we need to clear the path info in
3627 * module parameter after it is copied. We won't update the path until the module parameter
3628 * is changed again (first character is not '\0')
3631 /* set default firmware and nvram path for built-in type driver */
3632 if (!dhd_download_fw_on_driverload) {
3633 #ifdef CONFIG_BCMDHD_FW_PATH
3634 fw = CONFIG_BCMDHD_FW_PATH;
3635 #endif /* CONFIG_BCMDHD_FW_PATH */
3636 #ifdef CONFIG_BCMDHD_NVRAM_PATH
3637 nv = CONFIG_BCMDHD_NVRAM_PATH;
3638 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
3641 /* check if we need to initialize the path */
3642 if (dhdinfo->fw_path[0] == '\0') {
3643 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
3644 fw = adapter->fw_path;
3647 if (dhdinfo->nv_path[0] == '\0') {
3648 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
3649 nv = adapter->nv_path;
3652 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
3654 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
3656 if (firmware_path[0] != '\0')
3658 if (nvram_path[0] != '\0')
3661 if (fw && fw[0] != '\0') {
3662 fw_len = strlen(fw);
3663 if (fw_len >= sizeof(dhdinfo->fw_path)) {
3664 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
3667 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
3668 if (dhdinfo->fw_path[fw_len-1] == '\n')
3669 dhdinfo->fw_path[fw_len-1] = '\0';
3671 if (nv && nv[0] != '\0') {
3672 nv_len = strlen(nv);
3673 if (nv_len >= sizeof(dhdinfo->nv_path)) {
3674 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
3677 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
3678 if (dhdinfo->nv_path[nv_len-1] == '\n')
3679 dhdinfo->nv_path[nv_len-1] = '\0';
3682 /* clear the path in module parameter */
3683 firmware_path[0] = '\0';
3684 nvram_path[0] = '\0';
3686 if (dhdinfo->fw_path[0] == '\0') {
3687 DHD_ERROR(("firmware path not found\n"));
3690 if (dhdinfo->nv_path[0] == '\0') {
3691 DHD_ERROR(("nvram path not found\n"));
3700 dhd_bus_start(dhd_pub_t *dhdp)
3703 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
3704 unsigned long flags;
3708 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
3710 /* try to download image and nvram to the dongle */
3711 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
3712 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
3713 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
3714 dhd->fw_path, dhd->nv_path);
3716 DHD_ERROR(("%s: failed to download firmware %s\n",
3717 __FUNCTION__, dhd->fw_path));
3721 if (dhd->pub.busstate != DHD_BUS_LOAD) {
3725 dhd_os_sdlock(dhdp);
3727 /* Start the watchdog timer */
3728 dhd->pub.tickcnt = 0;
3729 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
3731 /* Bring up the bus */
3732 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
3734 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
3735 dhd_os_sdunlock(dhdp);
3738 #if defined(OOB_INTR_ONLY)
3739 /* Host registration for OOB interrupt */
3740 if (dhd_bus_oob_intr_register(dhdp)) {
3741 /* deactivate timer and wait for the handler to finish */
3743 flags = dhd_os_spin_lock(&dhd->pub);
3744 dhd->wd_timer_valid = FALSE;
3745 dhd_os_spin_unlock(&dhd->pub, flags);
3746 del_timer_sync(&dhd->timer);
3748 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
3749 dhd_os_sdunlock(dhdp);
3750 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
3754 /* Enable oob at firmware */
3755 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
3758 /* If bus is not ready, can't come up */
3759 if (dhd->pub.busstate != DHD_BUS_DATA) {
3760 flags = dhd_os_spin_lock(&dhd->pub);
3761 dhd->wd_timer_valid = FALSE;
3762 dhd_os_spin_unlock(&dhd->pub, flags);
3763 del_timer_sync(&dhd->timer);
3764 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
3765 dhd_os_sdunlock(dhdp);
3766 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
3770 dhd_os_sdunlock(dhdp);
3772 dhd_process_cid_mac(dhdp, TRUE);
3774 /* Bus is ready, do any protocol initialization */
3775 if ((ret = dhd_prot_init(&dhd->pub)) < 0)
3778 dhd_process_cid_mac(dhdp, FALSE);
3780 #ifdef ARP_OFFLOAD_SUPPORT
3781 if (dhd->pend_ipaddr) {
3782 #ifdef AOE_IP_ALIAS_SUPPORT
3783 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
3784 #endif /* AOE_IP_ALIAS_SUPPORT */
3785 dhd->pend_ipaddr = 0;
3787 #endif /* ARP_OFFLOAD_SUPPORT */
3792 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
3794 char iovbuf[WLC_IOCTL_SMLEN];
3795 uint32 tdls = tdls_on;
3797 uint32 tdls_auto_op = 0;
3798 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
3799 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
3800 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
3802 if (!FW_SUPPORTED(dhd, tdls))
3805 if (dhd->tdls_enable == tdls_on)
3807 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
3808 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3809 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
3812 dhd->tdls_enable = tdls_on;
3815 tdls_auto_op = auto_on;
3816 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
3817 iovbuf, sizeof(iovbuf));
3818 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3819 sizeof(iovbuf), TRUE, 0)) < 0) {
3820 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
3825 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
3826 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
3827 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3828 sizeof(iovbuf), TRUE, 0)) < 0) {
3829 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
3832 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
3833 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3834 sizeof(iovbuf), TRUE, 0)) < 0) {
3835 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
3838 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
3839 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3840 sizeof(iovbuf), TRUE, 0)) < 0) {
3841 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
3849 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
3851 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
3854 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
3861 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
3866 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
3868 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
3869 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
3874 #if !defined(AP) && defined(WLP2P)
3875 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
3876 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
3877 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
3878 * would still be named as fw_bcmdhd_apsta.
3881 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
3884 char buf[WLC_IOCTL_SMLEN];
3885 bool mchan_supported = FALSE;
3886 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
3887 * test mode, that means we only will use the mode as it is
3889 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
3891 if (FW_SUPPORTED(dhd, vsdb)) {
3892 mchan_supported = TRUE;
3894 if (!FW_SUPPORTED(dhd, p2p)) {
3895 DHD_TRACE(("Chip does not support p2p\n"));
3899 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
3900 memset(buf, 0, sizeof(buf));
3901 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
3902 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
3904 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
3909 /* By default, chip supports single chan concurrency,
3910 * now lets check for mchan
3912 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
3913 if (mchan_supported)
3914 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
3915 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
3916 /* For customer_hw4, although ICS,
3917 * we still support concurrent mode
3932 dhd_preinit_ioctls(dhd_pub_t *dhd)
3935 char eventmask[WL_EVENTING_MASK_LEN];
3936 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
3937 uint32 buf_key_b4_m4 = 1;
3938 #if defined(CUSTOM_AMPDU_BA_WSIZE)
3939 uint32 ampdu_ba_wsize = 0;
3941 #if defined(CUSTOM_AMPDU_MPDU)
3942 uint32 ampdu_mpdu = 0;
3945 #if defined(BCMSDIO)
3946 #ifdef PROP_TXSTATUS
3947 int wlfc_enable = TRUE;
3949 uint32 hostreorder = 1;
3951 #endif /* DISABLE_11N */
3952 #endif /* PROP_TXSTATUS */
3955 #ifdef DHD_ENABLE_LPC
3957 #endif /* DHD_ENABLE_LPC */
3958 uint power_mode = PM_FAST;
3959 uint32 dongle_align = DHD_SDALIGN;
3960 #if defined(BCMSDIO)
3961 uint32 glom = CUSTOM_GLOM_SETTING;
3962 #endif /* defined(BCMSDIO) */
3963 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
3966 uint bcn_timeout = 4;
3968 #if defined(ARP_OFFLOAD_SUPPORT)
3971 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
3972 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
3973 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
3974 char buf[WLC_IOCTL_SMLEN];
3976 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
3979 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
3980 int roam_scan_period[2] = {10, WLC_BAND_ALL};
3981 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
3982 #ifdef ROAM_AP_ENV_DETECTION
3983 int roam_env_mode = AP_ENV_INDETERMINATE;
3984 #endif /* ROAM_AP_ENV_DETECTION */
3985 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
3986 int roam_fullscan_period = 60;
3987 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
3988 int roam_fullscan_period = 120;
3989 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
3991 #ifdef DISABLE_BUILTIN_ROAM
3993 #endif /* DISABLE_BUILTIN_ROAM */
3994 #endif /* ROAM_ENABLE */
3999 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
4000 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
4001 struct ether_addr p2p_ea;
4004 #if defined(AP) || defined(WLP2P)
4005 uint32 apsta = 1; /* Enable APSTA mode */
4006 #endif /* defined(AP) || defined(WLP2P) */
4007 #ifdef GET_CUSTOM_MAC_ENABLE
4008 struct ether_addr ea_addr;
4009 #endif /* GET_CUSTOM_MAC_ENABLE */
4011 #ifdef CUSTOM_AMPDU_BA_WSIZE
4012 struct ampdu_tid_control atc;
4016 #endif /* DISABLE_11N */
4020 #endif /* USE_WL_TXBF */
4021 #ifdef USE_WL_FRAMEBURST
4022 uint32 frameburst = 1;
4023 #endif /* USE_WL_FRAMEBURST */
4024 #ifdef CUSTOM_PSPRETEND_THR
4025 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
4027 #ifdef PKT_FILTER_SUPPORT
4028 dhd_pkt_filter_enable = TRUE;
4029 #endif /* PKT_FILTER_SUPPORT */
4031 dhd->tdls_enable = FALSE;
4033 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
4034 DHD_TRACE(("Enter %s\n", __FUNCTION__));
4036 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
4037 (op_mode == DHD_FLAG_MFG_MODE)) {
4038 /* Check and adjust IOCTL response timeout for Manufactring firmware */
4039 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
4040 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
4044 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
4045 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
4047 #ifdef GET_CUSTOM_MAC_ENABLE
4048 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
4050 memset(buf, 0, sizeof(buf));
4051 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
4052 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
4054 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
4057 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
4059 #endif /* GET_CUSTOM_MAC_ENABLE */
4060 /* Get the default device MAC address directly from firmware */
4061 memset(buf, 0, sizeof(buf));
4062 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
4063 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
4065 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
4068 /* Update public MAC address after reading from Firmware */
4069 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
4071 #ifdef GET_CUSTOM_MAC_ENABLE
4073 #endif /* GET_CUSTOM_MAC_ENABLE */
4074 /* get a capabilities from firmware */
4075 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
4076 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
4077 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
4078 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
4079 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
4080 __FUNCTION__, ret));
4083 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
4084 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
4085 #ifdef SET_RANDOM_MAC_SOFTAP
4088 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
4089 #if defined(ARP_OFFLOAD_SUPPORT)
4092 #ifdef PKT_FILTER_SUPPORT
4093 dhd_pkt_filter_enable = FALSE;
4095 #ifdef SET_RANDOM_MAC_SOFTAP
4096 SRANDOM32((uint)jiffies);
4097 rand_mac = RANDOM32();
4098 iovbuf[0] = 0x02; /* locally administered bit */
4101 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
4102 iovbuf[4] = (unsigned char)(rand_mac >> 8);
4103 iovbuf[5] = (unsigned char)(rand_mac >> 16);
4105 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
4106 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
4108 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
4110 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
4111 #endif /* SET_RANDOM_MAC_SOFTAP */
4112 #if !defined(AP) && defined(WL_CFG80211)
4113 /* Turn off MPC in AP mode */
4114 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
4115 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4116 sizeof(iovbuf), TRUE, 0)) < 0) {
4117 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
4120 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
4121 (op_mode == DHD_FLAG_MFG_MODE)) {
4122 #if defined(ARP_OFFLOAD_SUPPORT)
4124 #endif /* ARP_OFFLOAD_SUPPORT */
4125 #ifdef PKT_FILTER_SUPPORT
4126 dhd_pkt_filter_enable = FALSE;
4127 #endif /* PKT_FILTER_SUPPORT */
4128 dhd->op_mode = DHD_FLAG_MFG_MODE;
4130 uint32 concurrent_mode = 0;
4131 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
4132 (op_mode == DHD_FLAG_P2P_MODE)) {
4133 #if defined(ARP_OFFLOAD_SUPPORT)
4136 #ifdef PKT_FILTER_SUPPORT
4137 dhd_pkt_filter_enable = FALSE;
4139 dhd->op_mode = DHD_FLAG_P2P_MODE;
4140 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
4141 (op_mode == DHD_FLAG_IBSS_MODE)) {
4142 dhd->op_mode = DHD_FLAG_IBSS_MODE;
4144 dhd->op_mode = DHD_FLAG_STA_MODE;
4145 #if !defined(AP) && defined(WLP2P)
4146 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
4147 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
4148 #if defined(ARP_OFFLOAD_SUPPORT)
4151 dhd->op_mode |= concurrent_mode;
4154 /* Check if we are enabling p2p */
4155 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
4156 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
4157 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4158 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4159 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
4162 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
4163 ETHER_SET_LOCALADDR(&p2p_ea);
4164 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
4165 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
4166 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4167 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4168 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
4170 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
4174 (void)concurrent_mode;
4178 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
4179 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
4180 /* Set Country code */
4181 if (dhd->dhd_cspec.ccode[0] != 0) {
4182 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
4183 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
4184 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4185 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
4189 /* Set Listen Interval */
4190 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
4191 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4192 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
4194 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
4195 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
4196 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
4197 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4198 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
4199 #if defined(ROAM_ENABLE)
4200 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
4201 sizeof(roam_trigger), TRUE, 0)) < 0)
4202 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
4203 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
4204 sizeof(roam_scan_period), TRUE, 0)) < 0)
4205 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
4206 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
4207 sizeof(roam_delta), TRUE, 0)) < 0)
4208 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
4209 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
4210 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4211 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
4212 #ifdef ROAM_AP_ENV_DETECTION
4213 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
4214 bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode,
4215 4, iovbuf, sizeof(iovbuf));
4216 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) == BCME_OK)
4217 dhd->roam_env_detection = TRUE;
4219 dhd->roam_env_detection = FALSE;
4222 #endif /* ROAM_AP_ENV_DETECTION */
4223 #endif /* ROAM_ENABLE */
4226 /* by default TDLS on and auto mode off */
4227 _dhd_tdls_enable(dhd, true, false, NULL);
4230 #ifdef DHD_ENABLE_LPC
4232 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
4233 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4234 sizeof(iovbuf), TRUE, 0)) < 0) {
4235 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
4237 #endif /* DHD_ENABLE_LPC */
4239 /* Set PowerSave mode */
4240 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
4242 /* Match Host and Dongle rx alignment */
4243 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
4244 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4246 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
4247 /* enable credall to reduce the chance of no bus credit happened. */
4248 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
4249 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4252 #if defined(BCMSDIO)
4253 if (glom != DEFAULT_GLOM_VALUE) {
4254 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
4255 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
4256 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4258 #endif /* defined(BCMSDIO) */
4260 /* Setup timeout if Beacons are lost and roam is off to report link down */
4261 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
4262 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4263 /* Setup assoc_retry_max count to reconnect target AP in dongle */
4264 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
4265 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4266 #if defined(AP) && !defined(WLP2P)
4267 /* Turn off MPC in AP mode */
4268 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
4269 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4270 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
4271 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4272 #endif /* defined(AP) && !defined(WLP2P) */
4276 if (ap_fw_loaded == TRUE) {
4277 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
4281 #if defined(KEEP_ALIVE)
4283 /* Set Keep Alive : be sure to use FW with -keepalive */
4287 if (ap_fw_loaded == FALSE)
4289 if (!(dhd->op_mode &
4290 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
4291 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
4292 DHD_ERROR(("%s set keeplive failed %d\n",
4293 __FUNCTION__, res));
4296 #endif /* defined(KEEP_ALIVE) */
4298 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
4299 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4300 sizeof(iovbuf), TRUE, 0)) < 0) {
4301 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
4303 #endif /* USE_WL_TXBF */
4304 #ifdef USE_WL_FRAMEBURST
4305 /* Set frameburst to value */
4306 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
4307 sizeof(frameburst), TRUE, 0)) < 0) {
4308 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
4310 #endif /* USE_WL_FRAMEBURST */
4311 #if defined(CUSTOM_AMPDU_BA_WSIZE)
4312 /* Set ampdu ba wsize to 64 or 16 */
4313 #ifdef CUSTOM_AMPDU_BA_WSIZE
4314 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
4316 if (ampdu_ba_wsize != 0) {
4317 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
4318 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4319 sizeof(iovbuf), TRUE, 0)) < 0) {
4320 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
4321 __FUNCTION__, ampdu_ba_wsize, ret));
4327 bcm_mkiovar("ampdu_rx_tid", (char *)&atc, sizeof(atc), iovbuf, sizeof(iovbuf));
4328 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4331 #if defined(CUSTOM_AMPDU_MPDU)
4332 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
4333 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
4334 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
4335 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4336 sizeof(iovbuf), TRUE, 0)) < 0) {
4337 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
4338 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
4341 #endif /* CUSTOM_AMPDU_MPDU */
4343 #ifdef CUSTOM_PSPRETEND_THR
4344 /* Turn off MPC in AP mode */
4345 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
4346 iovbuf, sizeof(iovbuf));
4347 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4348 sizeof(iovbuf), TRUE, 0)) < 0) {
4349 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
4350 __FUNCTION__, ret));
4354 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
4355 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4356 sizeof(iovbuf), TRUE, 0)) < 0) {
4357 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
4360 /* Read event_msgs mask */
4361 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
4362 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
4363 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
4366 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
4368 /* Setup event_msgs */
4369 setbit(eventmask, WLC_E_SET_SSID);
4370 setbit(eventmask, WLC_E_PRUNE);
4371 setbit(eventmask, WLC_E_AUTH);
4372 setbit(eventmask, WLC_E_ASSOC);
4373 setbit(eventmask, WLC_E_REASSOC);
4374 setbit(eventmask, WLC_E_REASSOC_IND);
4375 setbit(eventmask, WLC_E_DEAUTH);
4376 setbit(eventmask, WLC_E_DEAUTH_IND);
4377 setbit(eventmask, WLC_E_DISASSOC_IND);
4378 setbit(eventmask, WLC_E_DISASSOC);
4379 setbit(eventmask, WLC_E_JOIN);
4380 setbit(eventmask, WLC_E_START);
4381 setbit(eventmask, WLC_E_ASSOC_IND);
4382 setbit(eventmask, WLC_E_PSK_SUP);
4383 setbit(eventmask, WLC_E_LINK);
4384 setbit(eventmask, WLC_E_NDIS_LINK);
4385 setbit(eventmask, WLC_E_MIC_ERROR);
4386 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
4387 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
4389 setbit(eventmask, WLC_E_PMKID_CACHE);
4390 setbit(eventmask, WLC_E_TXFAIL);
4392 setbit(eventmask, WLC_E_JOIN_START);
4393 setbit(eventmask, WLC_E_SCAN_COMPLETE);
4395 setbit(eventmask, WLC_E_HTSFSYNC);
4396 #endif /* WLMEDIA_HTSF */
4398 setbit(eventmask, WLC_E_PFN_NET_FOUND);
4399 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
4400 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
4401 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
4402 #endif /* PNO_SUPPORT */
4403 /* enable dongle roaming event */
4404 setbit(eventmask, WLC_E_ROAM);
4405 setbit(eventmask, WLC_E_BSSID);
4407 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
4410 setbit(eventmask, WLC_E_ESCAN_RESULT);
4411 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
4412 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
4413 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
4415 #endif /* WL_CFG80211 */
4417 /* Write updated Event mask */
4418 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
4419 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4420 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
4424 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
4425 sizeof(scan_assoc_time), TRUE, 0);
4426 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
4427 sizeof(scan_unassoc_time), TRUE, 0);
4428 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
4429 sizeof(scan_passive_time), TRUE, 0);
4431 #ifdef ARP_OFFLOAD_SUPPORT
4432 /* Set and enable ARP offload feature for STA only */
4434 if (arpoe && !ap_fw_loaded) {
4438 dhd_arp_offload_enable(dhd, TRUE);
4439 dhd_arp_offload_set(dhd, dhd_arp_mode);
4441 dhd_arp_offload_enable(dhd, FALSE);
4442 dhd_arp_offload_set(dhd, 0);
4444 dhd_arp_enable = arpoe;
4445 #endif /* ARP_OFFLOAD_SUPPORT */
4447 #ifdef PKT_FILTER_SUPPORT
4448 /* Setup default defintions for pktfilter , enable in suspend */
4449 dhd->pktfilter_count = 6;
4450 /* Setup filter to allow only unicast */
4451 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
4452 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
4453 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
4454 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
4455 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
4456 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
4457 /* apply APP pktfilter */
4458 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
4463 dhd_enable_packet_filter(0, dhd);
4465 #endif /* defined(SOFTAP) */
4466 dhd_set_packet_filter(dhd);
4467 #endif /* PKT_FILTER_SUPPORT */
4469 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
4470 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4471 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
4472 #endif /* DISABLE_11N */
4475 /* query for 'ver' to get version info from firmware */
4476 memset(buf, 0, sizeof(buf));
4478 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
4479 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
4480 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
4482 bcmstrtok(&ptr, "\n", 0);
4483 /* Print fw version info */
4484 DHD_ERROR(("Firmware version = %s\n", buf));
4485 #if defined(BCMSDIO)
4486 dhd_set_version_info(dhd, buf);
4487 #endif /* defined(BCMSDIO) */
4490 #if defined(BCMSDIO)
4491 dhd_txglom_enable(dhd, TRUE);
4492 #endif /* defined(BCMSDIO) */
4494 #if defined(BCMSDIO)
4495 #ifdef PROP_TXSTATUS
4496 if (disable_proptx ||
4497 #ifdef PROP_TXSTATUS_VSDB
4498 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
4499 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
4500 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
4501 #endif /* PROP_TXSTATUS_VSDB */
4503 wlfc_enable = FALSE;
4507 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
4508 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4509 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
4510 if (ret2 != BCME_OK)
4513 #endif /* DISABLE_11N */
4517 else if (hostreorder)
4518 dhd_wlfc_hostreorder_init(dhd);
4519 #endif /* DISABLE_11N */
4521 #endif /* PROP_TXSTATUS */
4522 #endif /* BCMSDIO || BCMBUS */
4524 if (!dhd->pno_state) {
4529 dhd_interworking_enable(dhd);
4536 void dhd_set_ampdu_rx_tid(struct net_device *dev, int ampdu_rx_tid)
4539 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
4540 dhd_pub_t *pub = &dhd->pub;
4542 for (i = 0; i < 8; i++) { /* One bit each for traffic class CS7 - CS0 */
4543 struct ampdu_tid_control atc;
4545 atc.enable = (ampdu_rx_tid >> i) & 1;
4546 bcm_mkiovar("ampdu_rx_tid", (char *)&atc, sizeof(atc), iovbuf,sizeof(iovbuf));
4547 ret = dhd_wl_ioctl_cmd(pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf),TRUE, 0);
4549 DHD_ERROR(("%s failed %d\n", __func__, ret));
4554 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
4556 char buf[strlen(name) + 1 + cmd_len];
4557 int len = sizeof(buf);
4561 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
4563 memset(&ioc, 0, sizeof(ioc));
4565 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
4570 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
4571 if (!set && ret >= 0)
4572 memcpy(cmd_buf, buf, cmd_len);
4577 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
4579 struct dhd_info *dhd = dhdp->info;
4580 struct net_device *dev = NULL;
4582 ASSERT(dhd && dhd->iflist[ifidx]);
4583 dev = dhd->iflist[ifidx]->net;
4586 if (netif_running(dev)) {
4587 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
4588 return BCME_NOTDOWN;
4591 #define DHD_MIN_MTU 1500
4592 #define DHD_MAX_MTU 1752
4594 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
4595 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
4603 #ifdef ARP_OFFLOAD_SUPPORT
4604 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
4606 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
4608 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
4612 bzero(ipv4_buf, sizeof(ipv4_buf));
4614 /* display what we've got */
4615 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
4616 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
4618 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
4620 /* now we saved hoste_ip table, clr it in the dongle AOE */
4621 dhd_aoe_hostip_clr(dhd_pub, idx);
4624 DHD_ERROR(("%s failed\n", __FUNCTION__));
4628 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
4629 if (add && (ipv4_buf[i] == 0)) {
4631 add = FALSE; /* added ipa to local table */
4632 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
4634 } else if (ipv4_buf[i] == ipa) {
4636 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
4637 __FUNCTION__, ipa, i));
4640 if (ipv4_buf[i] != 0) {
4641 /* add back host_ip entries from our local cache */
4642 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
4643 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
4644 __FUNCTION__, ipv4_buf[i], i));
4648 /* see the resulting hostip table */
4649 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
4650 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
4651 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
4656 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
4657 * whenever there is an event related to an IP address.
4658 * ptr : kernel provided pointer to IP address that has changed
4660 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
4661 unsigned long event,
4664 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4670 if (!dhd_arp_enable)
4672 if (!ifa || !(ifa->ifa_dev->dev))
4675 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4676 /* Filter notifications meant for non Broadcom devices */
4677 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
4678 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
4679 #if defined(WL_ENABLE_P2P_IF)
4680 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
4681 #endif /* WL_ENABLE_P2P_IF */
4684 #endif /* LINUX_VERSION_CODE */
4686 dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
4690 dhd_pub = &dhd->pub;
4692 if (dhd_pub->arp_version == 1) {
4696 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
4697 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
4700 if (idx < DHD_MAX_IFS)
4701 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
4702 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
4704 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
4711 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
4712 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
4714 if (dhd->pub.busstate != DHD_BUS_DATA) {
4715 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
4716 if (dhd->pend_ipaddr) {
4717 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
4718 __FUNCTION__, dhd->pend_ipaddr));
4720 dhd->pend_ipaddr = ifa->ifa_address;
4724 #ifdef AOE_IP_ALIAS_SUPPORT
4725 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
4727 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
4728 #endif /* AOE_IP_ALIAS_SUPPORT */
4732 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
4733 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
4734 dhd->pend_ipaddr = 0;
4735 #ifdef AOE_IP_ALIAS_SUPPORT
4736 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
4738 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
4740 dhd_aoe_hostip_clr(&dhd->pub, idx);
4741 dhd_aoe_arp_clr(&dhd->pub, idx);
4742 #endif /* AOE_IP_ALIAS_SUPPORT */
4746 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
4747 __func__, ifa->ifa_label, event));
4752 #endif /* ARP_OFFLOAD_SUPPORT */
4754 /* Neighbor Discovery Offload: defered handler */
4756 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
4758 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
4759 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
4762 if (event != DHD_WQ_WORK_IPV6_NDO) {
4763 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4768 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
4773 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
4777 if (ndo_work->if_idx) {
4778 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
4782 switch (ndo_work->event) {
4784 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
4785 ret = dhd_ndo_enable(pub, TRUE);
4787 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
4790 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
4792 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
4793 __FUNCTION__, ret));
4797 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
4798 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
4800 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
4801 __FUNCTION__, ret));
4805 ret = dhd_ndo_enable(pub, FALSE);
4807 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
4812 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
4816 /* free ndo_work. alloced while scheduling the work */
4823 * Neighbor Discovery Offload: Called when an interface
4824 * is assigned with ipv6 address.
4825 * Handles only primary interface
4827 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
4828 unsigned long event,
4833 struct inet6_ifaddr *inet6_ifa = ptr;
4834 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
4835 struct ipv6_work_info_t *ndo_info;
4836 int idx = 0; /* REVISIT */
4838 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4839 /* Filter notifications meant for non Broadcom devices */
4840 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
4843 #endif /* LINUX_VERSION_CODE */
4845 dhd = *(dhd_info_t **)netdev_priv(inet6_ifa->idev->dev);
4849 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
4851 dhd_pub = &dhd->pub;
4852 if (!FW_SUPPORTED(dhd_pub, ndoe))
4855 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
4857 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
4861 ndo_info->event = event;
4862 ndo_info->if_idx = idx;
4863 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
4865 /* defer the work to thread as it may block kernel */
4866 dhd_deferred_schedule_work((void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
4867 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
4872 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
4874 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4875 struct net_device *net = NULL;
4877 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
4879 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4881 ASSERT(dhd && dhd->iflist[ifidx]);
4882 net = dhd->iflist[ifidx]->net;
4885 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4887 net->get_stats = dhd_get_stats;
4888 net->do_ioctl = dhd_ioctl_entry;
4889 net->hard_start_xmit = dhd_start_xmit;
4890 net->set_mac_address = dhd_set_mac_address;
4891 net->set_multicast_list = dhd_set_multicast_list;
4892 net->open = net->stop = NULL;
4894 ASSERT(!net->netdev_ops);
4895 net->netdev_ops = &dhd_ops_virt;
4896 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
4898 /* Ok, link into the network layer... */
4901 * device functions for the primary interface only
4903 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4904 net->open = dhd_open;
4905 net->stop = dhd_stop;
4907 net->netdev_ops = &dhd_ops_pri;
4908 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
4909 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
4910 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
4913 * We have to use the primary MAC for virtual interfaces
4915 memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
4917 * Android sets the locally administered bit to indicate that this is a
4918 * portable hotspot. This will not work in simultaneous AP/STA mode,
4919 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
4921 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
4923 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
4924 __func__, net->name));
4925 temp_addr[0] |= 0x02;
4929 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
4930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
4931 net->ethtool_ops = &dhd_ethtool_ops;
4932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
4934 #if defined(WL_WIRELESS_EXT)
4935 #if WIRELESS_EXT < 19
4936 net->get_wireless_stats = dhd_get_wireless_stats;
4937 #endif /* WIRELESS_EXT < 19 */
4938 #if WIRELESS_EXT > 12
4939 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
4940 #endif /* WIRELESS_EXT > 12 */
4941 #endif /* defined(WL_WIRELESS_EXT) */
4943 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
4945 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
4948 printf("%s\n", dhd_version);
4951 err = register_netdev(net);
4953 err = register_netdevice(net);
4956 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
4961 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
4962 MAC2STRDBG(net->dev_addr));
4964 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
4965 wl_iw_iscan_set_scan_broadcast_prep(net, 1);
4968 #if defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
4971 up(&dhd_registration_sem);
4973 if (!dhd_download_fw_on_driverload) {
4975 dhd_net_bus_devreset(net, TRUE);
4976 dhd_net_bus_suspend(net);
4977 #endif /* BCMSDIO */
4978 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
4981 #endif /* OEM_ANDROID && BCMLXSDMMC && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
4985 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
4988 net->netdev_ops = NULL;
4994 dhd_bus_detach(dhd_pub_t *dhdp)
4998 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5001 dhd = (dhd_info_t *)dhdp->info;
5005 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
5006 * calling stop again will cuase SD read/write errors.
5008 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5009 /* Stop the protocol module */
5010 dhd_prot_stop(&dhd->pub);
5012 /* Stop the bus module */
5013 dhd_bus_stop(dhd->pub.bus, TRUE);
5016 #if defined(OOB_INTR_ONLY)
5017 dhd_bus_oob_intr_unregister(dhdp);
5024 void dhd_detach(dhd_pub_t *dhdp)
5027 unsigned long flags;
5028 int timer_valid = FALSE;
5033 dhd = (dhd_info_t *)dhdp->info;
5037 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
5040 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
5041 /* Give sufficient time for threads to start running in case
5042 * dhd_attach() has failed
5047 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
5048 dhd_bus_detach(dhdp);
5051 dhd_prot_detach(dhdp);
5054 #ifdef ARP_OFFLOAD_SUPPORT
5055 if (dhd_inetaddr_notifier_registered) {
5056 dhd_inetaddr_notifier_registered = FALSE;
5057 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
5059 #endif /* ARP_OFFLOAD_SUPPORT */
5060 if (dhd_inet6addr_notifier_registered) {
5061 dhd_inet6addr_notifier_registered = FALSE;
5062 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
5065 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5066 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
5067 if (dhd->early_suspend.suspend)
5068 unregister_early_suspend(&dhd->early_suspend);
5070 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5072 #if defined(WL_WIRELESS_EXT)
5073 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
5074 /* Detatch and unlink in the iw */
5077 #endif /* defined(WL_WIRELESS_EXT) */
5079 /* delete all interfaces, start with virtual */
5080 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
5084 /* Cleanup virtual interfaces */
5085 dhd_net_if_lock_local(dhd);
5086 for (i = 1; i < DHD_MAX_IFS; i++) {
5088 dhd_remove_if(&dhd->pub, i, TRUE);
5090 dhd_net_if_unlock_local(dhd);
5092 /* delete primary interface 0 */
5093 ifp = dhd->iflist[0];
5096 if (ifp && ifp->net) {
5097 /* in unregister_netdev case, the interface gets freed by net->destructor
5098 * (which is set to free_netdev)
5100 if (ifp->net->reg_state == NETREG_UNINITIALIZED)
5101 free_netdev(ifp->net);
5103 unregister_netdev(ifp->net);
5105 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
5106 dhd->iflist[0] = NULL;
5110 /* Clear the watchdog timer */
5111 flags = dhd_os_spin_lock(&dhd->pub);
5112 timer_valid = dhd->wd_timer_valid;
5113 dhd->wd_timer_valid = FALSE;
5114 dhd_os_spin_unlock(&dhd->pub, flags);
5116 del_timer_sync(&dhd->timer);
5118 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
5119 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
5120 PROC_STOP(&dhd->thr_wdt_ctl);
5123 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
5124 PROC_STOP(&dhd->thr_rxf_ctl);
5127 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5128 PROC_STOP(&dhd->thr_dpc_ctl);
5130 tasklet_kill(&dhd->tasklet);
5133 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
5134 wl_cfg80211_detach(NULL);
5135 dhd_monitor_uninit();
5138 /* free deferred work queue */
5139 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
5140 dhd->dhd_deferred_wq = NULL;
5143 if (dhdp->pno_state)
5144 dhd_pno_deinit(dhdp);
5146 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
5147 KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
5148 if (dhd_pm_notifier_registered) {
5149 unregister_pm_notifier(&dhd->pm_notifier);
5150 dhd_pm_notifier_registered = FALSE;
5152 #endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP) */
5153 #ifdef DEBUG_CPU_FREQ
5155 free_percpu(dhd->new_freq);
5156 dhd->new_freq = NULL;
5157 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
5159 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
5160 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
5161 #ifdef CONFIG_HAS_WAKELOCK
5162 dhd->wakelock_counter = 0;
5163 dhd->wakelock_wd_counter = 0;
5164 dhd->wakelock_rx_timeout_enable = 0;
5165 dhd->wakelock_ctrl_timeout_enable = 0;
5166 wake_lock_destroy(&dhd->wl_wifi);
5167 wake_lock_destroy(&dhd->wl_rxwake);
5168 wake_lock_destroy(&dhd->wl_ctrlwake);
5169 wake_lock_destroy(&dhd->wl_wdwake);
5170 #endif /* CONFIG_HAS_WAKELOCK */
5174 #ifdef DHDTCPACK_SUPPRESS
5175 /* This will free all MEM allocated for TCPACK SUPPRESS */
5176 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5177 #endif /* DHDTCPACK_SUPPRESS */
5182 dhd_free(dhd_pub_t *dhdp)
5185 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5189 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
5190 if (dhdp->reorder_bufs[i]) {
5191 reorder_info_t *ptr;
5192 uint32 buf_size = sizeof(struct reorder_info);
5194 ptr = dhdp->reorder_bufs[i];
5196 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
5197 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
5198 i, ptr->max_idx, buf_size));
5200 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
5201 dhdp->reorder_bufs[i] = NULL;
5204 dhd = (dhd_info_t *)dhdp->info;
5205 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
5207 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
5208 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
5214 dhd_module_cleanup(void)
5216 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5218 dhd_bus_unregister();
5222 dhd_wifi_platform_unregister_drv();
5226 dhd_module_init(void)
5230 DHD_ERROR(("%s in\n", __FUNCTION__));
5231 err = dhd_wifi_platform_register_drv();
5237 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5238 #if defined(CONFIG_DEFERRED_INITCALLS)
5239 deferred_module_init(dhd_module_init);
5240 #elif defined(USE_LATE_INITCALL_SYNC)
5241 late_initcall_sync(dhd_module_init);
5243 late_initcall(dhd_module_init);
5244 #endif /* USE_LATE_INITCALL_SYNC */
5246 module_init(dhd_module_init);
5247 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5249 module_exit(dhd_module_cleanup);
5252 * OS specific functions required to implement DHD driver in OS independent way
5255 dhd_os_proto_block(dhd_pub_t *pub)
5257 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
5260 down(&dhd->proto_sem);
5268 dhd_os_proto_unblock(dhd_pub_t *pub)
5270 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
5273 up(&dhd->proto_sem);
5281 dhd_os_get_ioctl_resp_timeout(void)
5283 return ((unsigned int)dhd_ioctl_timeout_msec);
5287 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
5289 dhd_ioctl_timeout_msec = (int)timeout_msec;
5293 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
5295 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
5298 /* Convert timeout in millsecond to jiffies */
5299 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5300 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
5302 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
5305 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
5310 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
5312 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5314 wake_up(&dhd->ioctl_resp_wait);
5319 dhd_os_wd_timer_extend(void *bus, bool extend)
5321 dhd_pub_t *pub = bus;
5322 dhd_info_t *dhd = (dhd_info_t *)pub->info;
5325 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
5327 dhd_os_wd_timer(bus, dhd->default_wd_interval);
5332 dhd_os_wd_timer(void *bus, uint wdtick)
5334 dhd_pub_t *pub = bus;
5335 dhd_info_t *dhd = (dhd_info_t *)pub->info;
5336 unsigned long flags;
5338 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5341 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
5345 flags = dhd_os_spin_lock(pub);
5347 /* don't start the wd until fw is loaded */
5348 if (pub->busstate == DHD_BUS_DOWN) {
5349 dhd_os_spin_unlock(pub, flags);
5351 DHD_OS_WD_WAKE_UNLOCK(pub);
5355 /* Totally stop the timer */
5356 if (!wdtick && dhd->wd_timer_valid == TRUE) {
5357 dhd->wd_timer_valid = FALSE;
5358 dhd_os_spin_unlock(pub, flags);
5359 del_timer_sync(&dhd->timer);
5360 DHD_OS_WD_WAKE_UNLOCK(pub);
5365 DHD_OS_WD_WAKE_LOCK(pub);
5366 dhd_watchdog_ms = (uint)wdtick;
5367 /* Re arm the timer, at last watchdog period */
5368 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
5369 dhd->wd_timer_valid = TRUE;
5371 dhd_os_spin_unlock(pub, flags);
5375 dhd_os_open_image(char *filename)
5379 fp = filp_open(filename, O_RDONLY, 0);
5381 * 2.6.11 (FC4) supports filp_open() but later revs don't?
5383 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
5393 dhd_os_get_image_block(char *buf, int len, void *image)
5395 struct file *fp = (struct file *)image;
5401 rdlen = kernel_read(fp, fp->f_pos, buf, len);
5409 dhd_os_close_image(void *image)
5412 filp_close((struct file *)image, NULL);
5416 dhd_os_sdlock(dhd_pub_t *pub)
5420 dhd = (dhd_info_t *)(pub->info);
5422 if (dhd_dpc_prio >= 0)
5425 spin_lock_bh(&dhd->sdlock);
5429 dhd_os_sdunlock(dhd_pub_t *pub)
5433 dhd = (dhd_info_t *)(pub->info);
5435 if (dhd_dpc_prio >= 0)
5438 spin_unlock_bh(&dhd->sdlock);
5442 dhd_os_sdlock_txq(dhd_pub_t *pub)
5446 dhd = (dhd_info_t *)(pub->info);
5447 spin_lock_bh(&dhd->txqlock);
5451 dhd_os_sdunlock_txq(dhd_pub_t *pub)
5455 dhd = (dhd_info_t *)(pub->info);
5456 spin_unlock_bh(&dhd->txqlock);
5460 dhd_os_sdlock_rxq(dhd_pub_t *pub)
5465 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
5470 dhd_os_sdtxlock(dhd_pub_t *pub)
5476 dhd_os_sdtxunlock(dhd_pub_t *pub)
5478 dhd_os_sdunlock(pub);
5482 dhd_os_rxflock(dhd_pub_t *pub)
5486 dhd = (dhd_info_t *)(pub->info);
5487 spin_lock_bh(&dhd->rxf_lock);
5492 dhd_os_rxfunlock(dhd_pub_t *pub)
5496 dhd = (dhd_info_t *)(pub->info);
5497 spin_unlock_bh(&dhd->rxf_lock);
5500 #ifdef DHDTCPACK_SUPPRESS
5502 dhd_os_tcpacklock(dhd_pub_t *pub)
5506 dhd = (dhd_info_t *)(pub->info);
5507 spin_lock_bh(&dhd->tcpack_lock);
5512 dhd_os_tcpackunlock(dhd_pub_t *pub)
5516 dhd = (dhd_info_t *)(pub->info);
5517 spin_unlock_bh(&dhd->tcpack_lock);
5519 #endif /* DHDTCPACK_SUPPRESS */
5521 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
5524 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
5526 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
5527 if (buf == NULL && kmalloc_if_fail)
5528 buf = kmalloc(size, flags);
5533 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
5537 #if defined(WL_WIRELESS_EXT)
5538 struct iw_statistics *
5539 dhd_get_wireless_stats(struct net_device *dev)
5542 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5548 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
5551 return &dhd->iw.wstats;
5555 #endif /* defined(WL_WIRELESS_EXT) */
5558 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
5559 wl_event_msg_t *event, void **data)
5562 ASSERT(dhd != NULL);
5564 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
5565 if (bcmerror != BCME_OK)
5568 #if defined(WL_WIRELESS_EXT)
5569 if (event->bsscfgidx == 0) {
5571 * Wireless ext is on primary interface only
5574 ASSERT(dhd->iflist[*ifidx] != NULL);
5575 ASSERT(dhd->iflist[*ifidx]->net != NULL);
5577 if (dhd->iflist[*ifidx]->net) {
5578 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
5581 #endif /* defined(WL_WIRELESS_EXT) */
5584 ASSERT(dhd->iflist[*ifidx] != NULL);
5585 ASSERT(dhd->iflist[*ifidx]->net != NULL);
5586 if (dhd->iflist[*ifidx]->net)
5587 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
5588 #endif /* defined(WL_CFG80211) */
5593 /* send up locally generated event */
5595 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
5597 switch (ntoh32(event->event_type)) {
5604 #ifdef LOG_INTO_TCPDUMP
5606 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
5608 struct sk_buff *p, *skb;
5615 struct ether_header eth;
5617 pktlen = sizeof(eth) + data_len;
5620 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
5621 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
5623 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
5624 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
5625 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
5626 eth.ether_type = hton16(ETHER_TYPE_BRCM);
5628 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
5629 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
5630 skb = PKTTONATIVE(dhdp->osh, p);
5631 skb_data = skb->data;
5634 ifidx = dhd_ifname2idx(dhd, "wlan0");
5635 ifp = dhd->iflist[ifidx];
5637 ifp = dhd->iflist[0];
5640 skb->dev = ifp->net;
5641 skb->protocol = eth_type_trans(skb, skb->dev);
5642 skb->data = skb_data;
5645 /* Strip header, count, deliver upward */
5646 skb_pull(skb, ETH_HLEN);
5648 /* Send the packet */
5649 if (in_interrupt()) {
5656 /* Could not allocate a sk_buf */
5657 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
5660 #endif /* LOG_INTO_TCPDUMP */
5662 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
5664 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
5665 struct dhd_info *dhdinfo = dhd->info;
5667 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5668 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
5670 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
5671 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
5673 dhd_os_sdunlock(dhd);
5674 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
5676 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
5680 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
5682 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
5683 struct dhd_info *dhdinfo = dhd->info;
5684 if (waitqueue_active(&dhdinfo->ctrl_wait))
5685 wake_up(&dhdinfo->ctrl_wait);
5692 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
5696 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5699 /* Issue wl down command before resetting the chip */
5700 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5701 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
5703 #ifdef PROP_TXSTATUS
5704 if (dhd->pub.wlfc_enabled)
5705 dhd_wlfc_deinit(&dhd->pub);
5706 #endif /* PROP_TXSTATUS */
5708 if (dhd->pub.pno_state)
5709 dhd_pno_deinit(&dhd->pub);
5714 dhd_update_fw_nv_path(dhd);
5715 /* update firmware and nvram path to sdio bus */
5716 dhd_bus_update_fw_nv_path(dhd->pub.bus,
5717 dhd->fw_path, dhd->nv_path);
5720 ret = dhd_bus_devreset(&dhd->pub, flag);
5722 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
5730 dhd_net_bus_suspend(struct net_device *dev)
5732 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
5733 return dhd_bus_suspend(&dhdinfo->pub);
5737 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
5739 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
5740 return dhd_bus_resume(&dhdinfo->pub, stage);
5743 #endif /* BCMSDIO */
5745 int net_os_set_suspend_disable(struct net_device *dev, int val)
5747 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5751 ret = dhd->pub.suspend_disable_flag;
5752 dhd->pub.suspend_disable_flag = val;
5757 int net_os_set_suspend(struct net_device *dev, int val, int force)
5760 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5763 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5764 ret = dhd_set_suspend(val, &dhd->pub);
5766 ret = dhd_suspend_resume_helper(dhd, val, force);
5769 wl_cfg80211_update_power_mode(dev);
5775 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
5777 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5780 dhd->pub.suspend_bcn_li_dtim = val;
5785 #ifdef PKT_FILTER_SUPPORT
5786 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
5788 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5789 char *filterp = NULL;
5793 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
5794 (num == DHD_MDNS_FILTER_NUM))
5796 if (num >= dhd->pub.pktfilter_count)
5799 case DHD_BROADCAST_FILTER_NUM:
5800 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
5803 case DHD_MULTICAST4_FILTER_NUM:
5804 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
5807 case DHD_MULTICAST6_FILTER_NUM:
5808 filterp = "103 0 0 0 0xFFFF 0x3333";
5817 dhd->pub.pktfilter[num] = filterp;
5818 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
5819 } else { /* Delete filter */
5820 if (dhd->pub.pktfilter[num] != NULL) {
5821 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
5822 dhd->pub.pktfilter[num] = NULL;
5828 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
5833 /* Packet filtering is set only if we still in early-suspend and
5834 * we need either to turn it ON or turn it OFF
5835 * We can always turn it OFF in case of early-suspend, but we turn it
5836 * back ON only if suspend_disable_flag was not set
5838 if (dhdp && dhdp->up) {
5839 if (dhdp->in_suspend) {
5840 if (!val || (val && !dhdp->suspend_disable_flag))
5841 dhd_enable_packet_filter(val, dhdp);
5847 /* function to enable/disable packet for Network device */
5848 int net_os_enable_packet_filter(struct net_device *dev, int val)
5850 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5852 return dhd_os_enable_packet_filter(&dhd->pub, val);
5854 #endif /* PKT_FILTER_SUPPORT */
5857 dhd_dev_init_ioctl(struct net_device *dev)
5859 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5862 dhd_process_cid_mac(&dhd->pub, TRUE);
5864 if ((ret = dhd_prot_init(&dhd->pub)) < 0)
5867 dhd_process_cid_mac(&dhd->pub, FALSE);
5874 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
5876 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
5878 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5880 return (dhd_pno_stop_for_ssid(&dhd->pub));
5882 /* Linux wrapper to call common dhd_pno_set_for_ssid */
5884 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
5885 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
5887 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5889 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
5890 pno_repeat, pno_freq_expo_max, channel_list, nchan));
5893 /* Linux wrapper to call common dhd_pno_enable */
5895 dhd_dev_pno_enable(struct net_device *dev, int enable)
5897 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5899 return (dhd_pno_enable(&dhd->pub, enable));
5902 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
5904 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
5905 struct dhd_pno_hotlist_params *hotlist_params)
5907 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5908 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
5910 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
5912 dhd_dev_pno_stop_for_batch(struct net_device *dev)
5914 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5915 return (dhd_pno_stop_for_batch(&dhd->pub));
5917 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
5919 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
5921 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5922 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
5924 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
5926 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
5928 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5929 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
5931 #endif /* PNO_SUPPORT */
5933 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5934 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
5937 struct net_device *dev;
5939 dhd = (dhd_info_t *)dhd_info;
5940 dev = dhd->iflist[0]->net;
5946 #if defined(WL_WIRELESS_EXT)
5947 wl_iw_send_priv_event(dev, "HANG");
5949 #if defined(WL_CFG80211)
5950 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
5955 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
5959 if (!dhdp->hang_was_sent) {
5960 dhdp->hang_was_sent = 1;
5961 dhd_deferred_schedule_work((void *)dhdp, DHD_WQ_WORK_HANG_MSG,
5962 dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
5968 int net_os_send_hang_message(struct net_device *dev)
5970 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5974 /* Report FW problem when enabled */
5975 if (dhd->pub.hang_report) {
5976 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5977 ret = dhd_os_send_hang_message(&dhd->pub);
5979 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
5982 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
5984 /* Enforce bus down to stop any future traffic */
5985 dhd->pub.busstate = DHD_BUS_DOWN;
5990 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
5993 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
5995 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
5996 return wifi_platform_set_power(dhdinfo->adapter, on, delay_msec);
5999 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
6000 wl_country_t *cspec)
6002 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
6003 get_customized_country_code(dhdinfo->adapter, country_iso_code, cspec);
6005 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
6007 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6008 if (dhd && dhd->pub.up) {
6009 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
6011 wl_update_wiphybands(NULL, notify);
6016 void dhd_bus_band_set(struct net_device *dev, uint band)
6018 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6019 if (dhd && dhd->pub.up) {
6021 wl_update_wiphybands(NULL, true);
6026 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
6028 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6030 if (!fw || fw[0] == '\0')
6033 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
6034 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
6037 if (strstr(fw, "apsta") != NULL) {
6038 DHD_INFO(("GOT APSTA FIRMWARE\n"));
6039 ap_fw_loaded = TRUE;
6041 DHD_INFO(("GOT STA FIRMWARE\n"));
6042 ap_fw_loaded = FALSE;
6048 void dhd_net_if_lock(struct net_device *dev)
6050 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6051 dhd_net_if_lock_local(dhd);
6054 void dhd_net_if_unlock(struct net_device *dev)
6056 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6057 dhd_net_if_unlock_local(dhd);
6060 static void dhd_net_if_lock_local(dhd_info_t *dhd)
6062 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
6064 mutex_lock(&dhd->dhd_net_if_mutex);
6068 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
6070 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
6072 mutex_unlock(&dhd->dhd_net_if_mutex);
6076 static void dhd_suspend_lock(dhd_pub_t *pub)
6078 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
6079 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6081 mutex_lock(&dhd->dhd_suspend_mutex);
6085 static void dhd_suspend_unlock(dhd_pub_t *pub)
6087 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
6088 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6090 mutex_unlock(&dhd->dhd_suspend_mutex);
6094 unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
6096 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6097 unsigned long flags = 0;
6100 spin_lock_irqsave(&dhd->dhd_lock, flags);
6105 void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
6107 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6110 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
6114 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
6116 return (atomic_read(&dhd->pend_8021x_cnt));
6119 #define MAX_WAIT_FOR_8021X_TX 50
6122 dhd_wait_pend8021x(struct net_device *dev)
6124 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6125 int timeout = msecs_to_jiffies(10);
6126 int ntimes = MAX_WAIT_FOR_8021X_TX;
6127 int pend = dhd_get_pend_8021x_cnt(dhd);
6129 while (ntimes && pend) {
6131 set_current_state(TASK_INTERRUPTIBLE);
6132 schedule_timeout(timeout);
6133 set_current_state(TASK_RUNNING);
6136 pend = dhd_get_pend_8021x_cnt(dhd);
6140 atomic_set(&dhd->pend_8021x_cnt, 0);
6141 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
6148 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
6152 mm_segment_t old_fs;
6155 /* change to KERNEL_DS address limit */
6159 /* open file to write */
6160 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
6162 printf("%s: open file error\n", __FUNCTION__);
6167 /* Write buf to file */
6168 fp->f_op->write(fp, buf, size, &pos);
6171 /* free buf before return */
6172 MFREE(dhd->osh, buf, size);
6173 /* close file before return */
6175 filp_close(fp, current->files);
6176 /* restore previous address limit */
6181 #endif /* DHD_DEBUG */
6183 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
6185 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6186 unsigned long flags;
6190 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6191 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
6192 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
6193 #ifdef CONFIG_HAS_WAKELOCK
6194 if (dhd->wakelock_rx_timeout_enable)
6195 wake_lock_timeout(&dhd->wl_rxwake,
6196 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
6197 if (dhd->wakelock_ctrl_timeout_enable)
6198 wake_lock_timeout(&dhd->wl_ctrlwake,
6199 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
6201 dhd->wakelock_rx_timeout_enable = 0;
6202 dhd->wakelock_ctrl_timeout_enable = 0;
6203 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6208 int net_os_wake_lock_timeout(struct net_device *dev)
6210 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6214 ret = dhd_os_wake_lock_timeout(&dhd->pub);
6218 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
6220 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6221 unsigned long flags;
6224 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6225 if (val > dhd->wakelock_rx_timeout_enable)
6226 dhd->wakelock_rx_timeout_enable = val;
6227 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6232 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
6234 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6235 unsigned long flags;
6238 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6239 if (val > dhd->wakelock_ctrl_timeout_enable)
6240 dhd->wakelock_ctrl_timeout_enable = val;
6241 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6246 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
6248 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6249 unsigned long flags;
6252 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6253 dhd->wakelock_ctrl_timeout_enable = 0;
6254 #ifdef CONFIG_HAS_WAKELOCK
6255 if (wake_lock_active(&dhd->wl_ctrlwake))
6256 wake_unlock(&dhd->wl_ctrlwake);
6258 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6263 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
6265 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6269 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
6273 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
6275 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6279 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
6283 int dhd_os_wake_lock(dhd_pub_t *pub)
6285 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6286 unsigned long flags;
6290 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6291 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
6292 #ifdef CONFIG_HAS_WAKELOCK
6293 wake_lock(&dhd->wl_wifi);
6294 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
6295 dhd_bus_dev_pm_stay_awake(pub);
6298 dhd->wakelock_counter++;
6299 ret = dhd->wakelock_counter;
6300 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6305 int net_os_wake_lock(struct net_device *dev)
6307 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6311 ret = dhd_os_wake_lock(&dhd->pub);
6315 int dhd_os_wake_unlock(dhd_pub_t *pub)
6317 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6318 unsigned long flags;
6321 dhd_os_wake_lock_timeout(pub);
6323 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6324 if (dhd->wakelock_counter > 0) {
6325 dhd->wakelock_counter--;
6326 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
6327 #ifdef CONFIG_HAS_WAKELOCK
6328 wake_unlock(&dhd->wl_wifi);
6329 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
6330 dhd_bus_dev_pm_relax(pub);
6333 ret = dhd->wakelock_counter;
6335 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6340 int dhd_os_check_wakelock(dhd_pub_t *pub)
6342 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
6343 KERNEL_VERSION(2, 6, 36)))
6348 dhd = (dhd_info_t *)(pub->info);
6349 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
6351 #ifdef CONFIG_HAS_WAKELOCK
6352 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
6353 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
6354 (wake_lock_active(&dhd->wl_wdwake))))
6356 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
6357 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
6362 int net_os_wake_unlock(struct net_device *dev)
6364 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
6368 ret = dhd_os_wake_unlock(&dhd->pub);
6372 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
6374 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6375 unsigned long flags;
6379 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6380 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
6381 #ifdef CONFIG_HAS_WAKELOCK
6382 /* if wakelock_wd_counter was never used : lock it at once */
6383 wake_lock(&dhd->wl_wdwake);
6386 dhd->wakelock_wd_counter++;
6387 ret = dhd->wakelock_wd_counter;
6388 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6393 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
6395 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6396 unsigned long flags;
6400 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6401 if (dhd->wakelock_wd_counter > 0) {
6402 dhd->wakelock_wd_counter = 0;
6403 if (!dhd->waive_wakelock) {
6404 #ifdef CONFIG_HAS_WAKELOCK
6405 wake_unlock(&dhd->wl_wdwake);
6409 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6414 #ifdef PROP_TXSTATUS
6415 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
6416 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
6418 int dhd_wakelock_waive(dhd_info_t *dhdinfo)
6420 unsigned long flags;
6423 spin_lock_irqsave(&dhdinfo->wakelock_spinlock, flags);
6424 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
6425 if (dhdinfo->waive_wakelock)
6427 /* record current lock status */
6428 dhdinfo->wakelock_before_waive = dhdinfo->wakelock_counter;
6429 dhdinfo->waive_wakelock = TRUE;
6432 ret = dhdinfo->wakelock_wd_counter;
6433 spin_unlock_irqrestore(&dhdinfo->wakelock_spinlock, flags);
6437 int dhd_wakelock_restore(dhd_info_t *dhdinfo)
6439 unsigned long flags;
6442 spin_lock_irqsave(&dhdinfo->wakelock_spinlock, flags);
6443 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
6444 if (!dhdinfo->waive_wakelock)
6447 dhdinfo->waive_wakelock = FALSE;
6448 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
6449 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
6450 * the lock in between, do the same by calling wake_unlock or pm_relax
6452 if (dhdinfo->wakelock_before_waive == 0 && dhdinfo->wakelock_counter > 0) {
6453 #ifdef CONFIG_HAS_WAKELOCK
6454 wake_lock(&dhdinfo->wl_wifi);
6455 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
6456 dhd_bus_dev_pm_stay_awake(&dhdinfo->pub);
6458 } else if (dhdinfo->wakelock_before_waive > 0 && dhdinfo->wakelock_counter == 0) {
6459 #ifdef CONFIG_HAS_WAKELOCK
6460 wake_unlock(&dhdinfo->wl_wifi);
6461 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
6462 dhd_bus_dev_pm_relax(&dhdinfo->pub);
6465 dhdinfo->wakelock_before_waive = 0;
6467 ret = dhdinfo->wakelock_wd_counter;
6468 spin_unlock_irqrestore(&dhdinfo->wakelock_spinlock, flags);
6471 #endif /* PROP_TXSTATUS */
6473 bool dhd_os_check_if_up(dhd_pub_t *pub)
6480 #if defined(BCMSDIO)
6481 /* function to collect firmware, chip id and chip version info */
6482 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
6486 i = snprintf(info_string, sizeof(info_string),
6487 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
6492 i = snprintf(&info_string[i], sizeof(info_string) - i,
6493 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
6494 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
6496 #endif /* defined(BCMSDIO) */
6497 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
6501 dhd_info_t *dhd = NULL;
6503 if (!net || !netdev_priv(net)) {
6504 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
6508 dhd = *(dhd_info_t **)netdev_priv(net);
6512 ifidx = dhd_net2idx(dhd, net);
6513 if (ifidx == DHD_BAD_IF) {
6514 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
6518 DHD_OS_WAKE_LOCK(&dhd->pub);
6519 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
6520 dhd_check_hang(net, &dhd->pub, ret);
6521 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6526 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
6528 struct net_device *net;
6530 net = dhd_idx2net(dhdp, ifidx);
6532 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
6536 return dhd_check_hang(net, dhdp, ret);
6540 #ifdef PROP_TXSTATUS
6542 void dhd_wlfc_plat_init(void *dhd)
6547 void dhd_wlfc_plat_deinit(void *dhd)
6552 bool dhd_wlfc_skip_fc(void)
6556 #endif /* PROP_TXSTATUS */
6560 #include <linux/debugfs.h>
6562 extern uint32 dhd_readregl(void *bp, uint32 addr);
6563 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
6565 typedef struct dhd_dbgfs {
6566 struct dentry *debugfs_dir;
6567 struct dentry *debugfs_mem;
6572 dhd_dbgfs_t g_dbgfs;
6575 dhd_dbg_state_open(struct inode *inode, struct file *file)
6577 file->private_data = inode->i_private;
6582 dhd_dbg_state_read(struct file *file, char __user *ubuf,
6583 size_t count, loff_t *ppos)
6592 if (pos >= g_dbgfs.size || !count)
6594 if (count > g_dbgfs.size - pos)
6595 count = g_dbgfs.size - pos;
6597 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
6598 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
6600 ret = copy_to_user(ubuf, &tmp, 4);
6605 *ppos = pos + count;
6613 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
6621 if (pos >= g_dbgfs.size || !count)
6623 if (count > g_dbgfs.size - pos)
6624 count = g_dbgfs.size - pos;
6626 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
6630 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
6631 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
6638 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
6647 pos = file->f_pos + off;
6650 pos = g_dbgfs.size - off;
6652 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
6655 static const struct file_operations dhd_dbg_state_ops = {
6656 .read = dhd_dbg_state_read,
6657 .write = dhd_debugfs_write,
6658 .open = dhd_dbg_state_open,
6659 .llseek = dhd_debugfs_lseek
6662 static void dhd_dbg_create(void)
6664 if (g_dbgfs.debugfs_dir) {
6665 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
6666 NULL, &dhd_dbg_state_ops);
6670 void dhd_dbg_init(dhd_pub_t *dhdp)
6674 g_dbgfs.dhdp = dhdp;
6675 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
6677 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
6678 if (IS_ERR(g_dbgfs.debugfs_dir)) {
6679 err = PTR_ERR(g_dbgfs.debugfs_dir);
6680 g_dbgfs.debugfs_dir = NULL;
6689 void dhd_dbg_remove(void)
6691 debugfs_remove(g_dbgfs.debugfs_mem);
6692 debugfs_remove(g_dbgfs.debugfs_dir);
6694 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
6697 #endif /* ifdef BCMDBGFS */
6702 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
6704 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
6705 struct sk_buff *skb;
6707 uint16 dport = 0, oldmagic = 0xACAC;
6711 /* timestamp packet */
6713 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
6715 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
6716 /* memcpy(&proto, p1+26, 4); */
6717 memcpy(&dport, p1+40, 2);
6718 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
6719 dport = ntoh16(dport);
6722 /* timestamp only if icmp or udb iperf with port 5555 */
6723 /* if (proto == 17 && dport == tsport) { */
6724 if (dport >= tsport && dport <= tsport + 20) {
6726 skb = (struct sk_buff *) pktbuf;
6728 htsf = dhd_get_htsf(dhd, 0);
6729 memset(skb->data + 44, 0, 2); /* clear checksum */
6730 memcpy(skb->data+82, &oldmagic, 2);
6731 memcpy(skb->data+84, &htsf, 4);
6733 memset(&ts, 0, sizeof(htsfts_t));
6734 ts.magic = HTSFMAGIC;
6735 ts.prio = PKTPRIO(pktbuf);
6736 ts.seqnum = htsf_seqnum++;
6737 ts.c10 = get_cycles();
6739 ts.endmagic = HTSFENDMAGIC;
6741 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
6745 static void dhd_dump_htsfhisto(histo_t *his, char *s)
6747 int pktcnt = 0, curval = 0, i;
6748 for (i = 0; i < (NUMBIN-2); i++) {
6750 printf("%d ", his->bin[i]);
6751 pktcnt += his->bin[i];
6753 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
6754 his->bin[NUMBIN-1], s);
6758 void sorttobin(int value, histo_t *histo)
6763 histo->bin[NUMBIN-1]++;
6766 if (value > histo->bin[NUMBIN-2]) /* store the max value */
6767 histo->bin[NUMBIN-2] = value;
6769 for (i = 0; i < (NUMBIN-2); i++) {
6770 binval += 500; /* 500m s bins */
6771 if (value <= binval) {
6776 histo->bin[NUMBIN-3]++;
6780 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
6782 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6783 struct sk_buff *skb;
6786 int d1, d2, d3, end2end;
6790 skb = PKTTONATIVE(dhdp->osh, pktbuf);
6791 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
6793 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
6794 memcpy(&old_magic, p1+78, 2);
6795 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
6800 if (htsf_ts->magic == HTSFMAGIC) {
6801 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
6802 htsf_ts->cE0 = get_cycles();
6805 if (old_magic == 0xACAC) {
6808 htsf = dhd_get_htsf(dhd, 0);
6809 memcpy(skb->data+92, &htsf, sizeof(uint32));
6811 memcpy(&ts[tsidx].t1, skb->data+80, 16);
6813 d1 = ts[tsidx].t2 - ts[tsidx].t1;
6814 d2 = ts[tsidx].t3 - ts[tsidx].t2;
6815 d3 = ts[tsidx].t4 - ts[tsidx].t3;
6816 end2end = ts[tsidx].t4 - ts[tsidx].t1;
6818 sorttobin(d1, &vi_d1);
6819 sorttobin(d2, &vi_d2);
6820 sorttobin(d3, &vi_d3);
6821 sorttobin(end2end, &vi_d4);
6823 if (end2end > 0 && end2end > maxdelay) {
6825 maxdelaypktno = tspktcnt;
6826 memcpy(&maxdelayts, &ts[tsidx], 16);
6828 if (++tsidx >= TSMAX)
6833 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
6835 uint32 htsf = 0, cur_cycle, delta, delta_us;
6836 uint32 factor, baseval, baseval2;
6842 if (cur_cycle > dhd->htsf.last_cycle)
6843 delta = cur_cycle - dhd->htsf.last_cycle;
6845 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
6850 if (dhd->htsf.coef) {
6851 /* times ten to get the first digit */
6852 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
6853 baseval = (delta*10)/factor;
6854 baseval2 = (delta*10)/(factor+1);
6855 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
6856 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
6859 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
6865 static void dhd_dump_latency(void)
6868 int d1, d2, d3, d4, d5;
6870 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
6871 for (i = 0; i < TSMAX; i++) {
6872 d1 = ts[i].t2 - ts[i].t1;
6873 d2 = ts[i].t3 - ts[i].t2;
6874 d3 = ts[i].t4 - ts[i].t3;
6875 d4 = ts[i].t4 - ts[i].t1;
6876 d5 = ts[max].t4-ts[max].t1;
6877 if (d4 > d5 && d4 > 0) {
6880 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
6881 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
6885 printf("current idx = %d \n", tsidx);
6887 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
6888 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
6889 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
6890 maxdelayts.t2 - maxdelayts.t1,
6891 maxdelayts.t3 - maxdelayts.t2,
6892 maxdelayts.t4 - maxdelayts.t3,
6893 maxdelayts.t4 - maxdelayts.t1);
6898 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
6910 memset(&ioc, 0, sizeof(ioc));
6911 memset(&tsf_buf, 0, sizeof(tsf_buf));
6913 ioc.cmd = WLC_GET_VAR;
6915 ioc.len = (uint)sizeof(buf);
6918 strncpy(buf, "tsf", sizeof(buf) - 1);
6919 buf[sizeof(buf) - 1] = '\0';
6920 s1 = dhd_get_htsf(dhd, 0);
6921 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
6923 DHD_ERROR(("%s: tsf is not supported by device\n",
6924 dhd_ifname(&dhd->pub, ifidx)));
6929 s2 = dhd_get_htsf(dhd, 0);
6931 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
6932 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
6933 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
6934 dhd->htsf.coefdec2, s2-tsf_buf.low);
6935 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
6939 void htsf_update(dhd_info_t *dhd, void *data)
6941 static ulong cur_cycle = 0, prev_cycle = 0;
6942 uint32 htsf, tsf_delta = 0;
6943 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
6947 /* cycles_t in inlcude/mips/timex.h */
6951 prev_cycle = cur_cycle;
6954 if (cur_cycle > prev_cycle)
6955 cyc_delta = cur_cycle - prev_cycle;
6959 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
6963 printf(" tsf update ata point er is null \n");
6965 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
6966 memcpy(&cur_tsf, data, sizeof(tsf_t));
6968 if (cur_tsf.low == 0) {
6969 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
6973 if (cur_tsf.low > prev_tsf.low)
6974 tsf_delta = (cur_tsf.low - prev_tsf.low);
6976 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
6977 cur_tsf.low, prev_tsf.low));
6978 if (cur_tsf.high > prev_tsf.high) {
6979 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
6980 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
6983 return; /* do not update */
6987 hfactor = cyc_delta / tsf_delta;
6988 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
6989 dec1 = tmp/tsf_delta;
6990 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
6991 tmp = (tmp - (dec1*tsf_delta))*10;
6992 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
7011 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
7012 dhd->htsf.coef = hfactor;
7013 dhd->htsf.last_cycle = cur_cycle;
7014 dhd->htsf.last_tsf = cur_tsf.low;
7015 dhd->htsf.coefdec1 = dec1;
7016 dhd->htsf.coefdec2 = dec2;
7019 htsf = prev_tsf.low;
7023 #endif /* WLMEDIA_HTSF */
7025 #ifdef CUSTOM_SET_CPUCORE
7026 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
7028 int e_dpc = 0, e_rxf = 0, retry_set = 0;
7030 if (!(dhd->chan_isvht80)) {
7031 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
7038 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
7039 cpumask_of(DPC_CPUCORE));
7041 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
7042 cpumask_of(PRIMARY_CPUCORE));
7044 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
7045 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
7050 } while (e_dpc < 0);
7055 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
7056 cpumask_of(RXF_CPUCORE));
7058 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
7059 cpumask_of(PRIMARY_CPUCORE));
7061 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
7062 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
7067 } while (e_rxf < 0);
7069 #ifdef DHD_OF_SUPPORT
7070 interrupt_set_cpucore(set);
7071 #endif /* DHD_OF_SUPPORT */
7072 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
7076 #endif /* CUSTOM_SET_CPUCORE */