|
@@ -319,8 +319,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
* Device is held on return. */
|
|
|
struct hci_dev *hci_dev_get(int index)
|
|
|
{
|
|
|
- struct hci_dev *hdev = NULL;
|
|
|
- struct list_head *p;
|
|
|
+ struct hci_dev *hdev = NULL, *d;
|
|
|
|
|
|
BT_DBG("%d", index);
|
|
|
|
|
@@ -328,8 +327,7 @@ struct hci_dev *hci_dev_get(int index)
|
|
|
return NULL;
|
|
|
|
|
|
read_lock(&hci_dev_list_lock);
|
|
|
- list_for_each(p, &hci_dev_list) {
|
|
|
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
|
|
|
+ list_for_each_entry(d, &hci_dev_list, list) {
|
|
|
if (d->id == index) {
|
|
|
hdev = hci_dev_hold(d);
|
|
|
break;
|
|
@@ -551,8 +549,11 @@ int hci_dev_open(__u16 dev)
|
|
|
hci_dev_hold(hdev);
|
|
|
set_bit(HCI_UP, &hdev->flags);
|
|
|
hci_notify(hdev, HCI_DEV_UP);
|
|
|
- if (!test_bit(HCI_SETUP, &hdev->flags))
|
|
|
- mgmt_powered(hdev->id, 1);
|
|
|
+ if (!test_bit(HCI_SETUP, &hdev->flags)) {
|
|
|
+ hci_dev_lock_bh(hdev);
|
|
|
+ mgmt_powered(hdev, 1);
|
|
|
+ hci_dev_unlock_bh(hdev);
|
|
|
+ }
|
|
|
} else {
|
|
|
/* Init failed, cleanup */
|
|
|
tasklet_kill(&hdev->rx_task);
|
|
@@ -597,6 +598,14 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
|
|
tasklet_kill(&hdev->rx_task);
|
|
|
tasklet_kill(&hdev->tx_task);
|
|
|
|
|
|
+ if (hdev->discov_timeout > 0) {
|
|
|
+ cancel_delayed_work(&hdev->discov_off);
|
|
|
+ hdev->discov_timeout = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
|
|
|
+ cancel_delayed_work(&hdev->power_off);
|
|
|
+
|
|
|
hci_dev_lock_bh(hdev);
|
|
|
inquiry_cache_flush(hdev);
|
|
|
hci_conn_hash_flush(hdev);
|
|
@@ -636,7 +645,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
|
|
* and no tasks are scheduled. */
|
|
|
hdev->close(hdev);
|
|
|
|
|
|
- mgmt_powered(hdev->id, 0);
|
|
|
+ hci_dev_lock_bh(hdev);
|
|
|
+ mgmt_powered(hdev, 0);
|
|
|
+ hci_dev_unlock_bh(hdev);
|
|
|
|
|
|
/* Clear flags */
|
|
|
hdev->flags = 0;
|
|
@@ -794,9 +805,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
|
|
|
|
|
|
int hci_get_dev_list(void __user *arg)
|
|
|
{
|
|
|
+ struct hci_dev *hdev;
|
|
|
struct hci_dev_list_req *dl;
|
|
|
struct hci_dev_req *dr;
|
|
|
- struct list_head *p;
|
|
|
int n = 0, size, err;
|
|
|
__u16 dev_num;
|
|
|
|
|
@@ -815,12 +826,9 @@ int hci_get_dev_list(void __user *arg)
|
|
|
dr = dl->dev_req;
|
|
|
|
|
|
read_lock_bh(&hci_dev_list_lock);
|
|
|
- list_for_each(p, &hci_dev_list) {
|
|
|
- struct hci_dev *hdev;
|
|
|
-
|
|
|
- hdev = list_entry(p, struct hci_dev, list);
|
|
|
-
|
|
|
- hci_del_off_timer(hdev);
|
|
|
+ list_for_each_entry(hdev, &hci_dev_list, list) {
|
|
|
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
|
|
|
+ cancel_delayed_work(&hdev->power_off);
|
|
|
|
|
|
if (!test_bit(HCI_MGMT, &hdev->flags))
|
|
|
set_bit(HCI_PAIRABLE, &hdev->flags);
|
|
@@ -855,7 +863,8 @@ int hci_get_dev_info(void __user *arg)
|
|
|
if (!hdev)
|
|
|
return -ENODEV;
|
|
|
|
|
|
- hci_del_off_timer(hdev);
|
|
|
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
|
|
|
+ cancel_delayed_work_sync(&hdev->power_off);
|
|
|
|
|
|
if (!test_bit(HCI_MGMT, &hdev->flags))
|
|
|
set_bit(HCI_PAIRABLE, &hdev->flags);
|
|
@@ -912,6 +921,7 @@ struct hci_dev *hci_alloc_dev(void)
|
|
|
if (!hdev)
|
|
|
return NULL;
|
|
|
|
|
|
+ hci_init_sysfs(hdev);
|
|
|
skb_queue_head_init(&hdev->driver_init);
|
|
|
|
|
|
return hdev;
|
|
@@ -938,39 +948,41 @@ static void hci_power_on(struct work_struct *work)
|
|
|
return;
|
|
|
|
|
|
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
|
|
|
- mod_timer(&hdev->off_timer,
|
|
|
- jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
|
|
|
+ queue_delayed_work(hdev->workqueue, &hdev->power_off,
|
|
|
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT));
|
|
|
|
|
|
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
|
|
|
- mgmt_index_added(hdev->id);
|
|
|
+ mgmt_index_added(hdev);
|
|
|
}
|
|
|
|
|
|
static void hci_power_off(struct work_struct *work)
|
|
|
{
|
|
|
- struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
|
|
|
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
|
|
|
+ power_off.work);
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
|
|
|
+
|
|
|
hci_dev_close(hdev->id);
|
|
|
}
|
|
|
|
|
|
-static void hci_auto_off(unsigned long data)
|
|
|
+static void hci_discov_off(struct work_struct *work)
|
|
|
{
|
|
|
- struct hci_dev *hdev = (struct hci_dev *) data;
|
|
|
+ struct hci_dev *hdev;
|
|
|
+ u8 scan = SCAN_PAGE;
|
|
|
+
|
|
|
+ hdev = container_of(work, struct hci_dev, discov_off.work);
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
|
|
|
+ hci_dev_lock_bh(hdev);
|
|
|
|
|
|
- queue_work(hdev->workqueue, &hdev->power_off);
|
|
|
-}
|
|
|
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
|
|
|
|
|
|
-void hci_del_off_timer(struct hci_dev *hdev)
|
|
|
-{
|
|
|
- BT_DBG("%s", hdev->name);
|
|
|
+ hdev->discov_timeout = 0;
|
|
|
|
|
|
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
|
|
|
- del_timer(&hdev->off_timer);
|
|
|
+ hci_dev_unlock_bh(hdev);
|
|
|
}
|
|
|
|
|
|
int hci_uuids_clear(struct hci_dev *hdev)
|
|
@@ -1007,16 +1019,11 @@ int hci_link_keys_clear(struct hci_dev *hdev)
|
|
|
|
|
|
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
{
|
|
|
- struct list_head *p;
|
|
|
-
|
|
|
- list_for_each(p, &hdev->link_keys) {
|
|
|
- struct link_key *k;
|
|
|
-
|
|
|
- k = list_entry(p, struct link_key, list);
|
|
|
+ struct link_key *k;
|
|
|
|
|
|
+ list_for_each_entry(k, &hdev->link_keys, list)
|
|
|
if (bacmp(bdaddr, &k->bdaddr) == 0)
|
|
|
return k;
|
|
|
- }
|
|
|
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1138,7 +1145,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
|
|
|
|
|
|
persistent = hci_persistent_key(hdev, conn, type, old_key_type);
|
|
|
|
|
|
- mgmt_new_key(hdev->id, key, persistent);
|
|
|
+ mgmt_new_link_key(hdev, key, persistent);
|
|
|
|
|
|
if (!persistent) {
|
|
|
list_del(&key->list);
|
|
@@ -1181,7 +1188,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
|
|
|
memcpy(id->rand, rand, sizeof(id->rand));
|
|
|
|
|
|
if (new_key)
|
|
|
- mgmt_new_key(hdev->id, key, old_key_type);
|
|
|
+ mgmt_new_link_key(hdev, key, old_key_type);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1279,16 +1286,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
|
|
|
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
|
|
|
bdaddr_t *bdaddr)
|
|
|
{
|
|
|
- struct list_head *p;
|
|
|
-
|
|
|
- list_for_each(p, &hdev->blacklist) {
|
|
|
- struct bdaddr_list *b;
|
|
|
-
|
|
|
- b = list_entry(p, struct bdaddr_list, list);
|
|
|
+ struct bdaddr_list *b;
|
|
|
|
|
|
+ list_for_each_entry(b, &hdev->blacklist, list)
|
|
|
if (bacmp(bdaddr, &b->bdaddr) == 0)
|
|
|
return b;
|
|
|
- }
|
|
|
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1327,7 +1329,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
|
|
list_add(&entry->list, &hdev->blacklist);
|
|
|
|
|
|
- return mgmt_device_blocked(hdev->id, bdaddr);
|
|
|
+ return mgmt_device_blocked(hdev, bdaddr);
|
|
|
}
|
|
|
|
|
|
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
@@ -1346,7 +1348,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
list_del(&entry->list);
|
|
|
kfree(entry);
|
|
|
|
|
|
- return mgmt_device_unblocked(hdev->id, bdaddr);
|
|
|
+ return mgmt_device_unblocked(hdev, bdaddr);
|
|
|
}
|
|
|
|
|
|
static void hci_clear_adv_cache(unsigned long arg)
|
|
@@ -1425,7 +1427,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
|
|
|
int hci_register_dev(struct hci_dev *hdev)
|
|
|
{
|
|
|
struct list_head *head = &hci_dev_list, *p;
|
|
|
- int i, id = 0;
|
|
|
+ int i, id, error;
|
|
|
|
|
|
BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
|
|
|
hdev->bus, hdev->owner);
|
|
@@ -1433,6 +1435,11 @@ int hci_register_dev(struct hci_dev *hdev)
|
|
|
if (!hdev->open || !hdev->close || !hdev->destruct)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ /* Do not allow HCI_AMP devices to register at index 0,
|
|
|
+ * so the index can be used as the AMP controller ID.
|
|
|
+ */
|
|
|
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
|
|
|
+
|
|
|
write_lock_bh(&hci_dev_list_lock);
|
|
|
|
|
|
/* Find first available device id */
|
|
@@ -1479,6 +1486,8 @@ int hci_register_dev(struct hci_dev *hdev)
|
|
|
|
|
|
hci_conn_hash_init(hdev);
|
|
|
|
|
|
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
|
|
|
+
|
|
|
INIT_LIST_HEAD(&hdev->blacklist);
|
|
|
|
|
|
INIT_LIST_HEAD(&hdev->uuids);
|
|
@@ -1492,8 +1501,9 @@ int hci_register_dev(struct hci_dev *hdev)
|
|
|
(unsigned long) hdev);
|
|
|
|
|
|
INIT_WORK(&hdev->power_on, hci_power_on);
|
|
|
- INIT_WORK(&hdev->power_off, hci_power_off);
|
|
|
- setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
|
|
|
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
|
|
+
|
|
|
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
|
|
|
|
|
|
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
|
|
|
|
|
@@ -1502,10 +1512,14 @@ int hci_register_dev(struct hci_dev *hdev)
|
|
|
write_unlock_bh(&hci_dev_list_lock);
|
|
|
|
|
|
hdev->workqueue = create_singlethread_workqueue(hdev->name);
|
|
|
- if (!hdev->workqueue)
|
|
|
- goto nomem;
|
|
|
+ if (!hdev->workqueue) {
|
|
|
+ error = -ENOMEM;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
|
|
|
- hci_register_sysfs(hdev);
|
|
|
+ error = hci_add_sysfs(hdev);
|
|
|
+ if (error < 0)
|
|
|
+ goto err_wqueue;
|
|
|
|
|
|
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
|
|
|
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
|
|
@@ -1524,17 +1538,19 @@ int hci_register_dev(struct hci_dev *hdev)
|
|
|
|
|
|
return id;
|
|
|
|
|
|
-nomem:
|
|
|
+err_wqueue:
|
|
|
+ destroy_workqueue(hdev->workqueue);
|
|
|
+err:
|
|
|
write_lock_bh(&hci_dev_list_lock);
|
|
|
list_del(&hdev->list);
|
|
|
write_unlock_bh(&hci_dev_list_lock);
|
|
|
|
|
|
- return -ENOMEM;
|
|
|
+ return error;
|
|
|
}
|
|
|
EXPORT_SYMBOL(hci_register_dev);
|
|
|
|
|
|
/* Unregister HCI device */
|
|
|
-int hci_unregister_dev(struct hci_dev *hdev)
|
|
|
+void hci_unregister_dev(struct hci_dev *hdev)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
@@ -1550,8 +1566,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
|
|
|
kfree_skb(hdev->reassembly[i]);
|
|
|
|
|
|
if (!test_bit(HCI_INIT, &hdev->flags) &&
|
|
|
- !test_bit(HCI_SETUP, &hdev->flags))
|
|
|
- mgmt_index_removed(hdev->id);
|
|
|
+ !test_bit(HCI_SETUP, &hdev->flags)) {
|
|
|
+ hci_dev_lock_bh(hdev);
|
|
|
+ mgmt_index_removed(hdev);
|
|
|
+ hci_dev_unlock_bh(hdev);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* mgmt_index_removed should take care of emptying the
|
|
|
+ * pending list */
|
|
|
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
|
|
|
|
|
|
hci_notify(hdev, HCI_DEV_UNREG);
|
|
|
|
|
@@ -1560,9 +1583,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
|
|
|
rfkill_destroy(hdev->rfkill);
|
|
|
}
|
|
|
|
|
|
- hci_unregister_sysfs(hdev);
|
|
|
+ hci_del_sysfs(hdev);
|
|
|
|
|
|
- hci_del_off_timer(hdev);
|
|
|
del_timer(&hdev->adv_timer);
|
|
|
|
|
|
destroy_workqueue(hdev->workqueue);
|
|
@@ -1576,8 +1598,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
|
|
|
hci_dev_unlock_bh(hdev);
|
|
|
|
|
|
__hci_dev_put(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(hci_unregister_dev);
|
|
|
|
|
@@ -1948,23 +1968,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
|
|
|
hdr->dlen = cpu_to_le16(len);
|
|
|
}
|
|
|
|
|
|
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
|
|
|
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
|
|
|
+ struct sk_buff *skb, __u16 flags)
|
|
|
{
|
|
|
struct hci_dev *hdev = conn->hdev;
|
|
|
struct sk_buff *list;
|
|
|
|
|
|
- BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
|
|
|
-
|
|
|
- skb->dev = (void *) hdev;
|
|
|
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
|
|
- hci_add_acl_hdr(skb, conn->handle, flags);
|
|
|
-
|
|
|
list = skb_shinfo(skb)->frag_list;
|
|
|
if (!list) {
|
|
|
/* Non fragmented */
|
|
|
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
|
|
|
|
|
|
- skb_queue_tail(&conn->data_q, skb);
|
|
|
+ skb_queue_tail(queue, skb);
|
|
|
} else {
|
|
|
/* Fragmented */
|
|
|
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
|
|
@@ -1972,9 +1987,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
|
|
|
skb_shinfo(skb)->frag_list = NULL;
|
|
|
|
|
|
/* Queue all fragments atomically */
|
|
|
- spin_lock_bh(&conn->data_q.lock);
|
|
|
+ spin_lock_bh(&queue->lock);
|
|
|
|
|
|
- __skb_queue_tail(&conn->data_q, skb);
|
|
|
+ __skb_queue_tail(queue, skb);
|
|
|
|
|
|
flags &= ~ACL_START;
|
|
|
flags |= ACL_CONT;
|
|
@@ -1987,11 +2002,25 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
|
|
|
|
|
|
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
|
|
|
|
|
|
- __skb_queue_tail(&conn->data_q, skb);
|
|
|
+ __skb_queue_tail(queue, skb);
|
|
|
} while (list);
|
|
|
|
|
|
- spin_unlock_bh(&conn->data_q.lock);
|
|
|
+ spin_unlock_bh(&queue->lock);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
|
|
|
+{
|
|
|
+ struct hci_conn *conn = chan->conn;
|
|
|
+ struct hci_dev *hdev = conn->hdev;
|
|
|
+
|
|
|
+ BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
|
|
|
+
|
|
|
+ skb->dev = (void *) hdev;
|
|
|
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
|
|
+ hci_add_acl_hdr(skb, conn->handle, flags);
|
|
|
+
|
|
|
+ hci_queue_acl(conn, &chan->data_q, skb, flags);
|
|
|
|
|
|
tasklet_schedule(&hdev->tx_task);
|
|
|
}
|
|
@@ -2026,16 +2055,12 @@ EXPORT_SYMBOL(hci_send_sco);
|
|
|
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
|
|
|
{
|
|
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
|
|
- struct hci_conn *conn = NULL;
|
|
|
+ struct hci_conn *conn = NULL, *c;
|
|
|
int num = 0, min = ~0;
|
|
|
- struct list_head *p;
|
|
|
|
|
|
/* We don't have to lock device here. Connections are always
|
|
|
* added and removed with TX task disabled. */
|
|
|
- list_for_each(p, &h->list) {
|
|
|
- struct hci_conn *c;
|
|
|
- c = list_entry(p, struct hci_conn, list);
|
|
|
-
|
|
|
+ list_for_each_entry(c, &h->list, list) {
|
|
|
if (c->type != type || skb_queue_empty(&c->data_q))
|
|
|
continue;
|
|
|
|
|
@@ -2084,14 +2109,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
|
|
|
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
|
|
|
{
|
|
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
|
|
- struct list_head *p;
|
|
|
- struct hci_conn *c;
|
|
|
+ struct hci_conn *c;
|
|
|
|
|
|
BT_ERR("%s link tx timeout", hdev->name);
|
|
|
|
|
|
/* Kill stalled connections */
|
|
|
- list_for_each(p, &h->list) {
|
|
|
- c = list_entry(p, struct hci_conn, list);
|
|
|
+ list_for_each_entry(c, &h->list, list) {
|
|
|
if (c->type == type && c->sent) {
|
|
|
BT_ERR("%s killing stalled connection %s",
|
|
|
hdev->name, batostr(&c->dst));
|
|
@@ -2100,11 +2123,137 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline void hci_sched_acl(struct hci_dev *hdev)
|
|
|
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
|
|
+ int *quote)
|
|
|
{
|
|
|
+ struct hci_conn_hash *h = &hdev->conn_hash;
|
|
|
+ struct hci_chan *chan = NULL;
|
|
|
+ int num = 0, min = ~0, cur_prio = 0;
|
|
|
struct hci_conn *conn;
|
|
|
+ int cnt, q, conn_num = 0;
|
|
|
+
|
|
|
+ BT_DBG("%s", hdev->name);
|
|
|
+
|
|
|
+ list_for_each_entry(conn, &h->list, list) {
|
|
|
+ struct hci_chan_hash *ch;
|
|
|
+ struct hci_chan *tmp;
|
|
|
+
|
|
|
+ if (conn->type != type)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ conn_num++;
|
|
|
+
|
|
|
+ ch = &conn->chan_hash;
|
|
|
+
|
|
|
+ list_for_each_entry(tmp, &ch->list, list) {
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ if (skb_queue_empty(&tmp->data_q))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ skb = skb_peek(&tmp->data_q);
|
|
|
+ if (skb->priority < cur_prio)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (skb->priority > cur_prio) {
|
|
|
+ num = 0;
|
|
|
+ min = ~0;
|
|
|
+ cur_prio = skb->priority;
|
|
|
+ }
|
|
|
+
|
|
|
+ num++;
|
|
|
+
|
|
|
+ if (conn->sent < min) {
|
|
|
+ min = conn->sent;
|
|
|
+ chan = tmp;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (hci_conn_num(hdev, type) == conn_num)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!chan)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ switch (chan->conn->type) {
|
|
|
+ case ACL_LINK:
|
|
|
+ cnt = hdev->acl_cnt;
|
|
|
+ break;
|
|
|
+ case SCO_LINK:
|
|
|
+ case ESCO_LINK:
|
|
|
+ cnt = hdev->sco_cnt;
|
|
|
+ break;
|
|
|
+ case LE_LINK:
|
|
|
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ cnt = 0;
|
|
|
+ BT_ERR("Unknown link type");
|
|
|
+ }
|
|
|
+
|
|
|
+ q = cnt / num;
|
|
|
+ *quote = q ? q : 1;
|
|
|
+ BT_DBG("chan %p quote %d", chan, *quote);
|
|
|
+ return chan;
|
|
|
+}
|
|
|
+
|
|
|
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
|
|
|
+{
|
|
|
+ struct hci_conn_hash *h = &hdev->conn_hash;
|
|
|
+ struct hci_conn *conn;
|
|
|
+ int num = 0;
|
|
|
+
|
|
|
+ BT_DBG("%s", hdev->name);
|
|
|
+
|
|
|
+ list_for_each_entry(conn, &h->list, list) {
|
|
|
+ struct hci_chan_hash *ch;
|
|
|
+ struct hci_chan *chan;
|
|
|
+
|
|
|
+ if (conn->type != type)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ num++;
|
|
|
+
|
|
|
+ ch = &conn->chan_hash;
|
|
|
+ list_for_each_entry(chan, &ch->list, list) {
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ if (chan->sent) {
|
|
|
+ chan->sent = 0;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (skb_queue_empty(&chan->data_q))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ skb = skb_peek(&chan->data_q);
|
|
|
+ if (skb->priority >= HCI_PRIO_MAX - 1)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ skb->priority = HCI_PRIO_MAX - 1;
|
|
|
+
|
|
|
+ BT_DBG("chan %p skb %p promoted to %d", chan, skb,
|
|
|
+ skb->priority);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (hci_conn_num(hdev, type) == num)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static inline void hci_sched_acl(struct hci_dev *hdev)
|
|
|
+{
|
|
|
+ struct hci_chan *chan;
|
|
|
struct sk_buff *skb;
|
|
|
int quote;
|
|
|
+ unsigned int cnt;
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
@@ -2118,19 +2267,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
|
|
|
hci_link_tx_to(hdev, ACL_LINK);
|
|
|
}
|
|
|
|
|
|
- while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
|
|
|
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
|
|
|
- BT_DBG("skb %p len %d", skb, skb->len);
|
|
|
+ cnt = hdev->acl_cnt;
|
|
|
+
|
|
|
+ while (hdev->acl_cnt &&
|
|
|
+ (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
|
|
|
+ u32 priority = (skb_peek(&chan->data_q))->priority;
|
|
|
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
|
|
|
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
|
|
|
+ skb->len, skb->priority);
|
|
|
|
|
|
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
|
|
|
+ /* Stop if priority has changed */
|
|
|
+ if (skb->priority < priority)
|
|
|
+ break;
|
|
|
+
|
|
|
+ skb = skb_dequeue(&chan->data_q);
|
|
|
+
|
|
|
+ hci_conn_enter_active_mode(chan->conn,
|
|
|
+ bt_cb(skb)->force_active);
|
|
|
|
|
|
hci_send_frame(skb);
|
|
|
hdev->acl_last_tx = jiffies;
|
|
|
|
|
|
hdev->acl_cnt--;
|
|
|
- conn->sent++;
|
|
|
+ chan->sent++;
|
|
|
+ chan->conn->sent++;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ if (cnt != hdev->acl_cnt)
|
|
|
+ hci_prio_recalculate(hdev, ACL_LINK);
|
|
|
}
|
|
|
|
|
|
/* Schedule SCO */
|
|
@@ -2182,9 +2347,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
|
|
|
|
|
|
static inline void hci_sched_le(struct hci_dev *hdev)
|
|
|
{
|
|
|
- struct hci_conn *conn;
|
|
|
+ struct hci_chan *chan;
|
|
|
struct sk_buff *skb;
|
|
|
- int quote, cnt;
|
|
|
+ int quote, cnt, tmp;
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
@@ -2200,21 +2365,35 @@ static inline void hci_sched_le(struct hci_dev *hdev)
|
|
|
}
|
|
|
|
|
|
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
|
|
|
- while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
|
|
|
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
|
|
|
- BT_DBG("skb %p len %d", skb, skb->len);
|
|
|
+ tmp = cnt;
|
|
|
+ while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
|
|
|
+ u32 priority = (skb_peek(&chan->data_q))->priority;
|
|
|
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
|
|
|
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
|
|
|
+ skb->len, skb->priority);
|
|
|
+
|
|
|
+ /* Stop if priority has changed */
|
|
|
+ if (skb->priority < priority)
|
|
|
+ break;
|
|
|
+
|
|
|
+ skb = skb_dequeue(&chan->data_q);
|
|
|
|
|
|
hci_send_frame(skb);
|
|
|
hdev->le_last_tx = jiffies;
|
|
|
|
|
|
cnt--;
|
|
|
- conn->sent++;
|
|
|
+ chan->sent++;
|
|
|
+ chan->conn->sent++;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
if (hdev->le_pkts)
|
|
|
hdev->le_cnt = cnt;
|
|
|
else
|
|
|
hdev->acl_cnt = cnt;
|
|
|
+
|
|
|
+ if (cnt != tmp)
|
|
|
+ hci_prio_recalculate(hdev, LE_LINK);
|
|
|
}
|
|
|
|
|
|
static void hci_tx_task(unsigned long arg)
|
|
@@ -2407,3 +2586,31 @@ static void hci_cmd_task(unsigned long arg)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
|
|
|
+{
|
|
|
+ /* General inquiry access code (GIAC) */
|
|
|
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
|
|
+ struct hci_cp_inquiry cp;
|
|
|
+
|
|
|
+ BT_DBG("%s", hdev->name);
|
|
|
+
|
|
|
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
|
|
|
+ return -EINPROGRESS;
|
|
|
+
|
|
|
+ memset(&cp, 0, sizeof(cp));
|
|
|
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
|
|
|
+ cp.length = length;
|
|
|
+
|
|
|
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
|
|
+}
|
|
|
+
|
|
|
+int hci_cancel_inquiry(struct hci_dev *hdev)
|
|
|
+{
|
|
|
+ BT_DBG("%s", hdev->name);
|
|
|
+
|
|
|
+ if (!test_bit(HCI_INQUIRY, &hdev->flags))
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
|
|
+}
|