/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
#define AUTO_OFF_TIMEOUT 2000
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
+bool enable_hs;
-static DEFINE_RWLOCK(hci_task_lock);
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
/* HCI device list */
LIST_HEAD(hci_dev_list);
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
-/* HCI protocols */
-#define HCI_MAX_PROTO 2
-struct hci_proto *hci_proto[HCI_MAX_PROTO];
-
/* HCI notifiers list */
static ATOMIC_NOTIFIER_HEAD(hci_notifier);
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void bredr_init(struct hci_dev *hdev)
{
struct hci_cp_delete_stored_link_key cp;
- struct sk_buff *skb;
__le16 param;
__u8 flt_type;
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Driver initialization */
-
- /* Special commands */
- while ((skb = skb_dequeue(&hdev->driver_init))) {
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- skb->dev = (void *) hdev;
-
- skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
- }
- skb_queue_purge(&hdev->driver_init);
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
/* Mandatory initialization */
/* Reset */
if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
- set_bit(HCI_RESET, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+ set_bit(HCI_RESET, &hdev->flags);
+ hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
/* Read Local Supported Features */
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-#if 0
- /* Host buffer size */
- {
- struct hci_cp_host_buffer_size cp;
- cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
- cp.sco_mtu = HCI_MAX_SCO_SIZE;
- cp.acl_max_pkt = cpu_to_le16(0xffff);
- cp.sco_max_pkt = cpu_to_le16(0xffff);
- hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
- }
-#endif
-
/* Read BD Address */
hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
}
+static void amp_init(struct hci_dev *hdev)
+{
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+
+ /* Reset */
+ hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+
+ /* Read Local Version */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+}
+
+static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("%s %ld", hdev->name, opt);
+
+ /* Driver initialization */
+
+ /* Special commands */
+ while ((skb = skb_dequeue(&hdev->driver_init))) {
+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+ skb->dev = (void *) hdev;
+
+ skb_queue_tail(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ }
+ skb_queue_purge(&hdev->driver_init);
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ bredr_init(hdev);
+ break;
+
+ case HCI_AMP:
+ amp_init(hdev);
+ break;
+
+ default:
+ BT_ERR("Unknown device type %d", hdev->dev_type);
+ break;
+ }
+
+}
+
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
{
BT_DBG("%s", hdev->name);
if (!hdev)
return -ENODEV;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
goto done;
}
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
- /* Treat all non BR/EDR controllers as raw devices for now */
- if (hdev->dev_type != HCI_BREDR)
+ /* Treat all non BR/EDR controllers as raw devices if
+ enable_hs is not set */
+ if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (hdev->open(hdev)) {
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->flags))
+ if (!test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock(hdev);
mgmt_powered(hdev, 1);
+ hci_dev_unlock(hdev);
+ }
} else {
/* Init failed, cleanup */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
- tasklet_kill(&hdev->cmd_task);
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->cmd_work);
+ flush_work(&hdev->rx_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
return 0;
}
- /* Kill RX and TX tasks */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
if (hdev->discov_timeout > 0) {
- cancel_delayed_work_sync(&hdev->discov_off);
+ cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
}
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
- cancel_delayed_work_sync(&hdev->power_off);
+ cancel_delayed_work(&hdev->power_off);
- hci_dev_lock_bh(hdev);
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ cancel_delayed_work(&hdev->service_cache);
+
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}
- /* Kill cmd task */
- tasklet_kill(&hdev->cmd_task);
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
* and no tasks are scheduled. */
hdev->close(hdev);
+ hci_dev_lock(hdev);
mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
/* Clear flags */
hdev->flags = 0;
return -ENODEV;
hci_req_lock(hdev);
- tasklet_disable(&hdev->tx_task);
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (hdev->flush)
hdev->flush(hdev);
msecs_to_jiffies(HCI_INIT_TIMEOUT));
done:
- tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
dr = dl->dev_req;
- read_lock_bh(&hci_dev_list_lock);
+ read_lock(&hci_dev_list_lock);
list_for_each_entry(hdev, &hci_dev_list, list) {
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
- cancel_delayed_work_sync(&hdev->power_off);
+ cancel_delayed_work(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
if (++n >= dev_num)
break;
}
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
return;
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
- queue_delayed_work(hdev->workqueue, &hdev->power_off,
+ schedule_delayed_work(&hdev->power_off,
msecs_to_jiffies(AUTO_OFF_TIMEOUT));
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
BT_DBG("%s", hdev->name);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
hdev->discov_timeout = 0;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
}
int hci_uuids_clear(struct hci_dev *hdev)
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
{
struct bdaddr_list *entry;
- if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+ if (bacmp(bdaddr, BDADDR_ANY) == 0)
return hci_blacklist_clear(hdev);
- }
entry = hci_blacklist_lookup(hdev, bdaddr);
- if (!entry) {
+ if (!entry)
return -ENOENT;
- }
list_del(&entry->list);
kfree(entry);
return mgmt_device_unblocked(hdev, bdaddr);
}
-static void hci_clear_adv_cache(unsigned long arg)
+static void hci_clear_adv_cache(struct work_struct *work)
{
- struct hci_dev *hdev = (void *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ adv_work.work);
hci_dev_lock(hdev);
*/
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
- write_lock_bh(&hci_dev_list_lock);
+ write_lock(&hci_dev_list_lock);
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
- list_add(&hdev->list, head);
+ list_add_tail(&hdev->list, head);
atomic_set(&hdev->refcnt, 1);
- spin_lock_init(&hdev->lock);
+ mutex_init(&hdev->lock);
hdev->flags = 0;
+ hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
- tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
- tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+ INIT_WORK(&hdev->rx_work, hci_rx_work);
+ INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+ INIT_WORK(&hdev->tx_work, hci_tx_work);
+
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
hci_conn_hash_init(hdev);
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->adv_entries);
- setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
- (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
atomic_set(&hdev->promisc, 0);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
- hdev->workqueue = create_singlethread_workqueue(hdev->name);
+ hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
if (!hdev->workqueue) {
error = -ENOMEM;
goto err;
set_bit(HCI_AUTO_OFF, &hdev->flags);
set_bit(HCI_SETUP, &hdev->flags);
- queue_work(hdev->workqueue, &hdev->power_on);
+ schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
err_wqueue:
destroy_workqueue(hdev->workqueue);
err:
- write_lock_bh(&hci_dev_list_lock);
+ write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
return error;
}
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_lock_bh(&hci_dev_list_lock);
+ write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
hci_dev_do_close(hdev);
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->flags))
+ !test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock(hdev);
mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
+ }
+
+ /* mgmt_index_removed should take care of emptying the
+ * pending list */
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
hci_notify(hdev, HCI_DEV_UNREG);
hci_del_sysfs(hdev);
- del_timer(&hdev->adv_timer);
+ cancel_delayed_work_sync(&hdev->adv_work);
destroy_workqueue(hdev->workqueue);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
__hci_dev_put(hdev);
}
/* Time stamp */
__net_timestamp(skb);
- /* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
- tasklet_schedule(&hdev->rx_task);
+ queue_work(hdev->workqueue, &hdev->rx_work);
return 0;
}
/* ---- Interface to upper protocols ---- */
-/* Register/Unregister protocols.
- * hci_task_lock is used to ensure that no tasks are running. */
-int hci_register_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (!hci_proto[hp->id])
- hci_proto[hp->id] = hp;
- else
- err = -EEXIST;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_register_proto);
-
-int hci_unregister_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (hci_proto[hp->id])
- hci_proto[hp->id] = NULL;
- else
- err = -ENOENT;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_unregister_proto);
-
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_add(&cb->list, &hci_cb_list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_del(&cb->list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
hdev->init_last_cmd = opcode;
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock_bh(&queue->lock);
+ spin_lock(&queue->lock);
__skb_queue_tail(queue, skb);
__skb_queue_tail(queue, skb);
} while (list);
- spin_unlock_bh(&queue->lock);
+ spin_unlock(&queue->lock);
}
}
hci_queue_acl(conn, &chan->data_q, skb, flags);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_acl);
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_sco);
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
- list_for_each_entry(c, &h->list, list) {
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
break;
}
+ rcu_read_unlock();
+
if (conn) {
int cnt, q;
BT_ERR("%s link tx timeout", hdev->name);
+ rcu_read_lock();
+
/* Kill stalled connections */
- list_for_each_entry(c, &h->list, list) {
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
}
+
+ rcu_read_unlock();
}
static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
BT_DBG("%s", hdev->name);
- list_for_each_entry(conn, &h->list, list) {
- struct hci_chan_hash *ch;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
struct hci_chan *tmp;
if (conn->type != type)
conn_num++;
- ch = &conn->chan_hash;
-
- list_for_each_entry(tmp, &ch->list, list) {
+ list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
struct sk_buff *skb;
if (skb_queue_empty(&tmp->data_q))
break;
}
+ rcu_read_unlock();
+
if (!chan)
return NULL;
BT_DBG("%s", hdev->name);
- list_for_each_entry(conn, &h->list, list) {
- struct hci_chan_hash *ch;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
struct hci_chan *chan;
if (conn->type != type)
num++;
- ch = &conn->chan_hash;
- list_for_each_entry(chan, &ch->list, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_list, list) {
struct sk_buff *skb;
if (chan->sent) {
if (hci_conn_num(hdev, type) == num)
break;
}
+
+ rcu_read_unlock();
+
}
static inline void hci_sched_acl(struct hci_dev *hdev)
hci_prio_recalculate(hdev, LE_LINK);
}
-static void hci_tx_task(unsigned long arg)
+static void hci_tx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
struct sk_buff *skb;
- read_lock(&hci_task_lock);
-
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
-
- read_unlock(&hci_task_lock);
}
/* ----- HCI RX task (incoming data processing) ----- */
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->recv_acldata) {
- hp->recv_acldata(conn, skb, flags);
- return;
- }
+ l2cap_recv_acldata(conn, skb, flags);
+ return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
hdev->name, handle);
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->recv_scodata) {
- hp->recv_scodata(conn, skb);
- return;
- }
+ sco_recv_scodata(conn, skb);
+ return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
hdev->name, handle);
kfree_skb(skb);
}
-static void hci_rx_task(unsigned long arg)
+static void hci_rx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
- read_lock(&hci_task_lock);
-
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
+ BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
break;
break;
}
}
-
- read_unlock(&hci_task_lock);
}
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
}
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");