Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

This commit is contained in:
John W. Linville 2012-05-16 15:38:11 -04:00
commit 05f8f25276
27 changed files with 1355 additions and 747 deletions

View File

@ -81,6 +81,9 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C) },
{ } /* Terminating entry */
};
@ -99,6 +102,9 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};

View File

@ -67,6 +67,7 @@ struct btmrvl_adapter {
u8 wakeup_tries;
wait_queue_head_t cmd_wait_q;
u8 cmd_complete;
bool is_suspended;
};
struct btmrvl_private {
@ -139,8 +140,10 @@ void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
int btmrvl_enable_hs(struct btmrvl_private *priv);
#ifdef CONFIG_DEBUG_FS
void btmrvl_debugfs_init(struct hci_dev *hdev);

View File

@ -200,6 +200,36 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
}
EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (!skb) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
BT_CMD_HOST_SLEEP_CONFIG));
cmd->length = 2;
cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
cmd->data[1]);
return 0;
}
EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
struct sk_buff *skb;
@ -232,7 +262,7 @@ int btmrvl_enable_ps(struct btmrvl_private *priv)
}
EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
static int btmrvl_enable_hs(struct btmrvl_private *priv)
int btmrvl_enable_hs(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
@ -268,35 +298,15 @@ static int btmrvl_enable_hs(struct btmrvl_private *priv)
return ret;
}
EXPORT_SYMBOL_GPL(btmrvl_enable_hs);
int btmrvl_prepare_command(struct btmrvl_private *priv)
{
struct sk_buff *skb = NULL;
struct btmrvl_cmd *cmd;
int ret = 0;
if (priv->btmrvl_dev.hscfgcmd) {
priv->btmrvl_dev.hscfgcmd = 0;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG));
cmd->length = 2;
cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x",
cmd->data[0], cmd->data[1]);
btmrvl_send_hscfg_cmd(priv);
}
if (priv->btmrvl_dev.pscmd) {

View File

@ -339,9 +339,7 @@ static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card)
done:
kfree(tmphlprbuf);
if (fw_helper)
release_firmware(fw_helper);
release_firmware(fw_helper);
return ret;
}
@ -484,10 +482,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
done:
kfree(tmpfwbuf);
if (fw_firmware)
release_firmware(fw_firmware);
release_firmware(fw_firmware);
return ret;
}
@ -1013,6 +1008,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
priv->btmrvl_dev.gpio_gap = 0xffff;
btmrvl_send_hscfg_cmd(priv);
return 0;
disable_host_int:
@ -1048,11 +1046,111 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
}
}
static int btmrvl_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct btmrvl_sdio_card *card;
struct btmrvl_private *priv;
mmc_pm_flag_t pm_flags;
struct hci_dev *hcidev;
if (func) {
pm_flags = sdio_get_host_pm_caps(func);
BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func),
pm_flags);
if (!(pm_flags & MMC_PM_KEEP_POWER)) {
BT_ERR("%s: cannot remain alive while suspended",
sdio_func_id(func));
return -ENOSYS;
}
card = sdio_get_drvdata(func);
if (!card || !card->priv) {
BT_ERR("card or priv structure is not valid");
return 0;
}
} else {
BT_ERR("sdio_func is not specified");
return 0;
}
priv = card->priv;
if (priv->adapter->hs_state != HS_ACTIVATED) {
if (btmrvl_enable_hs(priv)) {
BT_ERR("HS not actived, suspend failed!");
return -EBUSY;
}
}
hcidev = priv->btmrvl_dev.hcidev;
BT_DBG("%s: SDIO suspend", hcidev->name);
hci_suspend_dev(hcidev);
skb_queue_purge(&priv->adapter->tx_queue);
priv->adapter->is_suspended = true;
/* We will keep the power when hs enabled successfully */
if (priv->adapter->hs_state == HS_ACTIVATED) {
BT_DBG("suspend with MMC_PM_KEEP_POWER");
return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
} else {
BT_DBG("suspend without MMC_PM_KEEP_POWER");
return 0;
}
}
static int btmrvl_sdio_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct btmrvl_sdio_card *card;
struct btmrvl_private *priv;
mmc_pm_flag_t pm_flags;
struct hci_dev *hcidev;
if (func) {
pm_flags = sdio_get_host_pm_caps(func);
BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func),
pm_flags);
card = sdio_get_drvdata(func);
if (!card || !card->priv) {
BT_ERR("card or priv structure is not valid");
return 0;
}
} else {
BT_ERR("sdio_func is not specified");
return 0;
}
priv = card->priv;
if (!priv->adapter->is_suspended) {
BT_DBG("device already resumed");
return 0;
}
priv->adapter->is_suspended = false;
hcidev = priv->btmrvl_dev.hcidev;
BT_DBG("%s: SDIO resume", hcidev->name);
hci_resume_dev(hcidev);
priv->hw_wakeup_firmware(priv);
priv->adapter->hs_state = HS_DEACTIVATED;
BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name);
return 0;
}
static const struct dev_pm_ops btmrvl_sdio_pm_ops = {
.suspend = btmrvl_sdio_suspend,
.resume = btmrvl_sdio_resume,
};
static struct sdio_driver bt_mrvl_sdio = {
.name = "btmrvl_sdio",
.id_table = btmrvl_sdio_ids,
.probe = btmrvl_sdio_probe,
.remove = btmrvl_sdio_remove,
.drv = {
.owner = THIS_MODULE,
.pm = &btmrvl_sdio_pm_ops,
}
};
static int __init btmrvl_sdio_init_module(void)

View File

@ -143,6 +143,9 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@ -855,6 +858,7 @@ static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
int new_alts;
int err;
if (hdev->conn_hash.sco_num > 0) {
@ -868,11 +872,19 @@ static void btusb_work(struct work_struct *work)
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
if (data->isoc_altsetting != 2) {
if (hdev->voice_setting & 0x0020) {
static const int alts[3] = { 2, 4, 5 };
new_alts = alts[hdev->conn_hash.sco_num - 1];
} else {
new_alts = hdev->conn_hash.sco_num;
}
if (data->isoc_altsetting != new_alts) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
if (__set_isoc_interface(hdev, 2) < 0)
if (__set_isoc_interface(hdev, new_alts) < 0)
return;
}

View File

@ -388,7 +388,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
hdev->parent = hu->tty->dev;
SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);

View File

@ -252,8 +252,9 @@ static int vhci_open(struct inode *inode, struct file *file)
}
file->private_data = data;
nonseekable_open(inode, file);
return nonseekable_open(inode, file);
return 0;
}
static int vhci_release(struct inode *inode, struct file *file)

View File

@ -1853,14 +1853,6 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
/*aspm */
rtl_pci_init_aspm(hw);
@ -1879,6 +1871,14 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
goto fail3;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,

View File

@ -971,11 +971,6 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
rtlpriv->cfg->ops->read_chip_version(hw);
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
goto error_out;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = _rtl_usb_init(hw);
if (err)
goto error_out;
@ -987,6 +982,11 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
"Can't allocate sw for mac80211\n");
goto error_out;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
goto error_out;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
return 0;
error_out:

View File

@ -163,6 +163,11 @@ typedef struct {
__u8 b[6];
} __packed bdaddr_t;
/* BD Address type */
#define BDADDR_BREDR 0x00
#define BDADDR_LE_PUBLIC 0x01
#define BDADDR_LE_RANDOM 0x02
#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
@ -178,7 +183,6 @@ static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
void baswap(bdaddr_t *dst, bdaddr_t *src);
char *batostr(bdaddr_t *ba);
bdaddr_t *strtoba(char *str);
/* Common socket structures and functions */
@ -190,7 +194,12 @@ struct bt_sock {
bdaddr_t dst;
struct list_head accept_q;
struct sock *parent;
u32 defer_setup;
unsigned long flags;
};
enum {
BT_SK_DEFER_SETUP,
BT_SK_SUSPEND,
};
struct bt_sock_list {
@ -215,14 +224,24 @@ void bt_accept_unlink(struct sock *sk);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
struct l2cap_ctrl {
unsigned int sframe : 1,
poll : 1,
final : 1,
fcs : 1,
sar : 2,
super : 2;
__u16 reqseq;
__u16 txseq;
__u8 retries;
};
struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
__u16 expect;
__u16 tx_seq;
__u8 retries;
__u8 sar;
__u8 force_active;
struct l2cap_ctrl control;
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@ -242,12 +261,10 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
{
struct sk_buff *skb;
release_sock(sk);
if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
lock_sock(sk);
if (!skb && *err)
return NULL;

View File

@ -102,6 +102,7 @@ enum {
HCI_DISCOVERABLE,
HCI_LINK_SECURITY,
HCI_PENDING_CLASS,
HCI_PERIODIC_INQ,
};
/* HCI ioctl defines */
@ -324,6 +325,8 @@ struct hci_cp_inquiry {
#define HCI_OP_INQUIRY_CANCEL 0x0402
#define HCI_OP_PERIODIC_INQ 0x0403
#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
#define HCI_OP_CREATE_CONN 0x0405
@ -717,6 +720,10 @@ struct hci_rp_read_local_oob_data {
} __packed;
#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
struct hci_rp_read_inq_rsp_tx_power {
__u8 status;
__s8 tx_power;
} __packed;
#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
struct hci_rp_read_flow_control_mode {
@ -1431,6 +1438,5 @@ struct hci_inquiry_req {
#define IREQ_CACHE_FLUSH 0x0001
extern bool enable_hs;
extern bool enable_le;
#endif /* __HCI_H */

View File

@ -155,9 +155,14 @@ struct hci_dev {
__u16 hci_rev;
__u8 lmp_ver;
__u16 manufacturer;
__le16 lmp_subver;
__u16 lmp_subver;
__u16 voice_setting;
__u8 io_capability;
__s8 inq_tx_power;
__u16 devid_source;
__u16 devid_vendor;
__u16 devid_product;
__u16 devid_version;
__u16 pkt_type;
__u16 esco_type;
@ -250,9 +255,6 @@ struct hci_dev {
struct list_head remote_oob_data;
struct list_head adv_entries;
struct delayed_work adv_work;
struct hci_dev_stats stat;
struct sk_buff_head driver_init;
@ -263,7 +265,6 @@ struct hci_dev {
struct dentry *debugfs;
struct device *parent;
struct device dev;
struct rfkill *rfkill;
@ -571,7 +572,7 @@ int hci_chan_del(struct hci_chan *chan);
void hci_chan_list_flush(struct hci_conn *conn);
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u8 sec_level, __u8 auth_type);
__u8 dst_type, __u8 sec_level, __u8 auth_type);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
@ -673,8 +674,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16 ediv,
u8 rand[8]);
int new_key, u8 authenticated, u8 tk[16], u8 enc_size,
__le16 ediv, u8 rand[8]);
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type);
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
@ -688,14 +689,6 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
u8 *randomizer);
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
#define ADV_CLEAR_TIMEOUT (3*60*HZ) /* Three minutes */
int hci_adv_entries_clear(struct hci_dev *hdev);
struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr);
int hci_add_adv_entry(struct hci_dev *hdev,
struct hci_ev_le_advertising_info *ev);
void hci_del_off_timer(struct hci_dev *hdev);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct sk_buff *skb);
@ -709,7 +702,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
/* ----- LMP capabilities ----- */
#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
@ -933,6 +926,23 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
return false;
}
static inline size_t eir_get_length(u8 *eir, size_t eir_len)
{
size_t parsed = 0;
while (parsed < eir_len) {
u8 field_len = eir[0];
if (field_len == 0)
return parsed;
parsed += field_len + 1;
eir += field_len + 1;
}
return eir_len;
}
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
u8 data_len)
{
@ -961,17 +971,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
/* Management interface */
#define MGMT_ADDR_BREDR 0x00
#define MGMT_ADDR_LE_PUBLIC 0x01
#define MGMT_ADDR_LE_RANDOM 0x02
#define MGMT_ADDR_INVALID 0xff
#define DISCOV_TYPE_BREDR (BIT(MGMT_ADDR_BREDR))
#define DISCOV_TYPE_LE (BIT(MGMT_ADDR_LE_PUBLIC) | \
BIT(MGMT_ADDR_LE_RANDOM))
#define DISCOV_TYPE_INTERLEAVED (BIT(MGMT_ADDR_BREDR) | \
BIT(MGMT_ADDR_LE_PUBLIC) | \
BIT(MGMT_ADDR_LE_RANDOM))
#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
BIT(BDADDR_LE_RANDOM))
#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
BIT(BDADDR_LE_PUBLIC) | \
BIT(BDADDR_LE_RANDOM))
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_index_added(struct hci_dev *hdev);
@ -1067,12 +1072,12 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
u16 latency, u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
__u8 ltk[16]);
void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
void hci_le_ltk_neg_reply(struct hci_conn *conn);
int hci_do_inquiry(struct hci_dev *hdev, u8 length);
int hci_cancel_inquiry(struct hci_dev *hdev);
int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
int timeout);
int hci_cancel_le_scan(struct hci_dev *hdev);
u8 bdaddr_to_le(u8 bdaddr_type);
#endif /* __HCI_CORE_H */

View File

@ -44,6 +44,7 @@
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@ -57,6 +58,7 @@ struct sockaddr_l2 {
__le16 l2_psm;
bdaddr_t l2_bdaddr;
__le16 l2_cid;
__u8 l2_bdaddr_type;
};
/* L2CAP socket options */
@ -139,6 +141,8 @@ struct l2cap_conninfo {
#define L2CAP_CTRL_TXSEQ_SHIFT 1
#define L2CAP_CTRL_SUPER_SHIFT 2
#define L2CAP_CTRL_POLL_SHIFT 4
#define L2CAP_CTRL_FINAL_SHIFT 7
#define L2CAP_CTRL_REQSEQ_SHIFT 8
#define L2CAP_CTRL_SAR_SHIFT 14
@ -152,9 +156,11 @@ struct l2cap_conninfo {
#define L2CAP_EXT_CTRL_FINAL 0x00000002
#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
#define L2CAP_EXT_CTRL_SAR_SHIFT 16
#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
#define L2CAP_EXT_CTRL_POLL_SHIFT 18
#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
/* L2CAP Supervisory Function */
@ -186,6 +192,8 @@ struct l2cap_hdr {
#define L2CAP_FCS_SIZE 2
#define L2CAP_SDULEN_SIZE 2
#define L2CAP_PSMLEN_SIZE 2
#define L2CAP_ENH_CTRL_SIZE 2
#define L2CAP_EXT_CTRL_SIZE 4
struct l2cap_cmd_hdr {
__u8 code;
@ -401,6 +409,16 @@ struct l2cap_conn_param_update_rsp {
#define L2CAP_CONN_PARAM_REJECTED 0x0001
/* ----- L2CAP channels and connections ----- */
struct l2cap_seq_list {
__u16 head;
__u16 tail;
__u16 mask;
__u16 *list;
};
#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
#define L2CAP_SEQ_LIST_TAIL 0x8000
struct srej_list {
__u16 tx_seq;
struct list_head list;
@ -446,6 +464,9 @@ struct l2cap_chan {
__u16 monitor_timeout;
__u16 mps;
__u8 tx_state;
__u8 rx_state;
unsigned long conf_state;
unsigned long conn_state;
unsigned long flags;
@ -456,9 +477,11 @@ struct l2cap_chan {
__u16 buffer_seq;
__u16 buffer_seq_srej;
__u16 srej_save_reqseq;
__u16 last_acked_seq;
__u16 frames_sent;
__u16 unacked_frames;
__u8 retry_count;
__u16 srej_queue_next;
__u8 num_acked;
__u16 sdu_len;
struct sk_buff *sdu;
@ -490,6 +513,8 @@ struct l2cap_chan {
struct sk_buff *tx_send_head;
struct sk_buff_head tx_q;
struct sk_buff_head srej_q;
struct l2cap_seq_list srej_list;
struct l2cap_seq_list retrans_list;
struct list_head srej_l;
struct list_head list;
@ -508,8 +533,7 @@ struct l2cap_ops {
void (*close) (void *data);
void (*state_change) (void *data, int state);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long len, int nb, int *err);
unsigned long len, int nb);
};
struct l2cap_conn {
@ -600,6 +624,44 @@ enum {
FLAG_EFS_ENABLE,
};
enum {
L2CAP_TX_STATE_XMIT,
L2CAP_TX_STATE_WAIT_F,
};
enum {
L2CAP_RX_STATE_RECV,
L2CAP_RX_STATE_SREJ_SENT,
};
enum {
L2CAP_TXSEQ_EXPECTED,
L2CAP_TXSEQ_EXPECTED_SREJ,
L2CAP_TXSEQ_UNEXPECTED,
L2CAP_TXSEQ_UNEXPECTED_SREJ,
L2CAP_TXSEQ_DUPLICATE,
L2CAP_TXSEQ_DUPLICATE_SREJ,
L2CAP_TXSEQ_INVALID,
L2CAP_TXSEQ_INVALID_IGNORE,
};
enum {
L2CAP_EV_DATA_REQUEST,
L2CAP_EV_LOCAL_BUSY_DETECTED,
L2CAP_EV_LOCAL_BUSY_CLEAR,
L2CAP_EV_RECV_REQSEQ_AND_FBIT,
L2CAP_EV_RECV_FBIT,
L2CAP_EV_RETRANS_TO,
L2CAP_EV_MONITOR_TO,
L2CAP_EV_EXPLICIT_POLL,
L2CAP_EV_RECV_IFRAME,
L2CAP_EV_RECV_RR,
L2CAP_EV_RECV_REJ,
L2CAP_EV_RECV_RNR,
L2CAP_EV_RECV_SREJ,
L2CAP_EV_RECV_FRAME,
};
static inline void l2cap_chan_hold(struct l2cap_chan *c)
{
atomic_inc(&c->refcnt);
@ -622,21 +684,26 @@ static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
}
static inline void l2cap_set_timer(struct l2cap_chan *chan,
struct delayed_work *work, long timeout)
struct delayed_work *work, long timeout)
{
BT_DBG("chan %p state %s timeout %ld", chan,
state_to_string(chan->state), timeout);
state_to_string(chan->state), timeout);
/* If delayed work cancelled do not hold(chan)
since it is already done with previous set_timer */
if (!cancel_delayed_work(work))
l2cap_chan_hold(chan);
schedule_delayed_work(work, timeout);
}
static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
struct delayed_work *work)
struct delayed_work *work)
{
bool ret;
/* put(chan) if delayed work cancelled otherwise it
is done in delayed work function */
ret = cancel_delayed_work(work);
if (ret)
l2cap_chan_put(chan);
@ -658,13 +725,10 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
{
int offset;
offset = (seq1 - seq2) % (chan->tx_win_max + 1);
if (offset < 0)
offset += (chan->tx_win_max + 1);
return offset;
if (seq1 >= seq2)
return seq1 - seq2;
else
return chan->tx_win_max + 1 - seq2 + seq1;
}
static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
@ -852,14 +916,15 @@ int __l2cap_wait_ack(struct sock *sk);
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
struct l2cap_chan *l2cap_chan_create(struct sock *sk);
struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
void l2cap_chan_destroy(struct l2cap_chan *chan);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst);
bdaddr_t *dst, u8 dst_type);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
u32 priority);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
int l2cap_chan_check_security(struct l2cap_chan *chan);
void l2cap_chan_set_defaults(struct l2cap_chan *chan);
#endif /* __L2CAP_H */

View File

@ -341,6 +341,15 @@ struct mgmt_cp_unblock_device {
} __packed;
#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
#define MGMT_OP_SET_DEVICE_ID 0x0028
struct mgmt_cp_set_device_id {
__le16 source;
__le16 vendor;
__le16 product;
__le16 version;
} __packed;
#define MGMT_SET_DEVICE_ID_SIZE 8
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;

View File

@ -77,7 +77,7 @@ struct smp_cmd_encrypt_info {
#define SMP_CMD_MASTER_IDENT 0x07
struct smp_cmd_master_ident {
__u16 ediv;
__le16 ediv;
__u8 rand[8];
} __packed;

View File

@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
bt_sk(parent)->defer_setup) {
test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
(bt_sk(parent)->defer_setup &&
sk->sk_state == BT_CONNECT2))
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
sk->sk_state == BT_CONFIG)
return mask;
if (sock_writeable(sk))
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);

View File

@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
}
/* Strip 802.1p header */
if (ntohs(s->eh.h_proto) == 0x8100) {
if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));

View File

@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
}
EXPORT_SYMBOL(hci_le_start_enc);
void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_ltk_reply cp;
BT_DBG("%p", conn);
memset(&cp, 0, sizeof(cp));
cp.handle = cpu_to_le16(conn->handle);
memcpy(cp.ltk, ltk, sizeof(ltk));
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
}
EXPORT_SYMBOL(hci_le_ltk_reply);
void hci_le_ltk_neg_reply(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_ltk_neg_reply cp;
BT_DBG("%p", conn);
memset(&cp, 0, sizeof(cp));
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
}
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u8 dst_type, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (type == LE_LINK) {
struct adv_entry *entry;
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
if (le)
return ERR_PTR(-EBUSY);
if (!le) {
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
return ERR_PTR(-ENOMEM);
entry = hci_find_adv_entry(hdev, dst);
if (!entry)
return ERR_PTR(-EHOSTUNREACH);
le->dst_type = bdaddr_to_le(dst_type);
hci_le_connect(le);
}
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
return ERR_PTR(-ENOMEM);
le->dst_type = entry->bdaddr_type;
hci_le_connect(le);
le->pending_sec_level = sec_level;
le->auth_type = auth_type;
hci_conn_hold(le);

View File

@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
*/
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
u16 opcode = __le16_to_cpu(sent->opcode);
struct sk_buff *skb;
/* Some CSR based controllers generate a spontaneous
@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
* command.
*/
if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
return;
skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
/* Read Local AMP Info */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
}
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STOPPED:
if (hdev->discovery.state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
hdev->discovery.type = 0;
break;
case DISCOVERY_STARTING:
break;
@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
struct hci_dev *hdev;
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
if (!hdev)
return NULL;
hci_init_sysfs(hdev);
skb_queue_head_init(&hdev->driver_init);
return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);
/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
skb_queue_purge(&hdev->driver_init);
/* will free via device release */
put_device(&hdev->dev);
}
EXPORT_SYMBOL(hci_free_dev);
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
}
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
ediv, u8 rand[8])
{
struct smp_ltk *key, *old_key;
@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return mgmt_device_unblocked(hdev, bdaddr, type);
}
static void hci_clear_adv_cache(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
adv_work.work);
hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
}
int hci_adv_entries_clear(struct hci_dev *hdev)
{
struct adv_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
list_del(&entry->list);
kfree(entry);
}
BT_DBG("%s adv cache cleared", hdev->name);
return 0;
}
struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct adv_entry *entry;
list_for_each_entry(entry, &hdev->adv_entries, list)
if (bacmp(bdaddr, &entry->bdaddr) == 0)
return entry;
return NULL;
}
static inline int is_connectable_adv(u8 evt_type)
{
if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
return 1;
return 0;
}
int hci_add_adv_entry(struct hci_dev *hdev,
struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
return -EINVAL;
/* Only new entries should be added to adv_entries. So, if
* bdaddr was found, don't add it. */
if (hci_find_adv_entry(hdev, &ev->bdaddr))
return 0;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
bacpy(&entry->bdaddr, &ev->bdaddr);
entry->bdaddr_type = ev->bdaddr_type;
list_add(&entry->list, &hdev->adv_entries);
BT_DBG("%s adv entry added: address %s type %u", hdev->name,
batostr(&entry->bdaddr), entry->bdaddr_type);
return 0;
}
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
{
struct le_scan_params *param = (struct le_scan_params *) opt;
@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return 0;
}
int hci_cancel_le_scan(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
return -EALREADY;
if (cancel_delayed_work(&hdev->le_scan_disable)) {
struct hci_cp_le_set_scan_enable cp;
/* Send HCI command to disable LE Scan */
memset(&cp, 0, sizeof(cp));
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
return 0;
}
static void le_scan_disable_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
return 0;
}
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
struct list_head *head = &hci_dev_list, *p;
int i, id, error;
struct hci_dev *hdev;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
if (!hdev)
return NULL;
if (!hdev->open || !hdev->close)
return -EINVAL;
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID.
*/
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
write_lock(&hci_dev_list_lock);
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
if (list_entry(p, struct hci_dev, list)->id != id)
break;
head = p; id++;
}
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
list_add_tail(&hdev->list, head);
mutex_init(&hdev->lock);
hdev->flags = 0;
hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);
INIT_LIST_HEAD(&hdev->mgmt_pending);
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys);
INIT_LIST_HEAD(&hdev->long_term_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->le_scan, le_scan_work);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
init_waitqueue_head(&hdev->req_wait_q);
setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
for (i = 0; i < NUM_REASSEMBLY; i++)
hdev->reassembly[i] = NULL;
init_waitqueue_head(&hdev->req_wait_q);
mutex_init(&hdev->req_lock);
hci_init_sysfs(hdev);
discovery_init(hdev);
hci_conn_hash_init(hdev);
INIT_LIST_HEAD(&hdev->mgmt_pending);
return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);
INIT_LIST_HEAD(&hdev->blacklist);
/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
skb_queue_purge(&hdev->driver_init);
INIT_LIST_HEAD(&hdev->uuids);
/* will free via device release */
put_device(&hdev->dev);
}
EXPORT_SYMBOL(hci_free_dev);
INIT_LIST_HEAD(&hdev->link_keys);
INIT_LIST_HEAD(&hdev->long_term_keys);
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head, *p;
int id, error;
INIT_LIST_HEAD(&hdev->remote_oob_data);
if (!hdev->open || !hdev->close)
return -EINVAL;
INIT_LIST_HEAD(&hdev->adv_entries);
write_lock(&hci_dev_list_lock);
INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID.
*/
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
head = &hci_dev_list;
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
int nid = list_entry(p, struct hci_dev, list)->id;
if (nid > id)
break;
if (nid == id)
id++;
head = p;
}
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
atomic_set(&hdev->promisc, 0);
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
INIT_WORK(&hdev->le_scan, le_scan_work);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
list_add(&hdev->list, head);
write_unlock(&hci_dev_list_lock);
@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_del_sysfs(hdev);
cancel_delayed_work_sync(&hdev->adv_work);
destroy_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_link_keys_clear(hdev);
hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
skb->len = skb_headlen(skb);
skb->data_len = 0;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags);
hci_queue_acl(conn, &chan->data_q, skb, flags);
@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
int num = 0, min = ~0;
unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
int num = 0, min = ~0, cur_prio = 0;
unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
int cnt, q, conn_num = 0;
@ -2784,6 +2719,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
if (conn) {
hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
mgmt_device_connected(hdev, &conn->dst, conn->type,
conn->dst_type, 0, NULL, 0,
conn->dev_class);
hci_dev_unlock(hdev);
/* Send to upper protocol */
l2cap_recv_acldata(conn, skb, flags);
return;
@ -2937,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_INQUIRY, &hdev->flags))
return -EPERM;
return -EALREADY;
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
u8 bdaddr_to_le(u8 bdaddr_type)
{
switch (bdaddr_type) {
case BDADDR_LE_PUBLIC:
return ADDR_LE_DEV_PUBLIC;
default:
/* Fallback to LE Random address type */
return ADDR_LE_DEV_RANDOM;
}
}

View File

@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
}
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
hci_conn_check_pending(hdev);
}
@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_RESET, status);
/* Reset all non-persistent flags */
hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
BIT(HCI_PERIODIC_INQ));
hdev->discovery.state = DISCOVERY_STOPPED;
}
@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
events[4] |= 0x04; /* Inquiry Result with RSSI */
events[4] |= 0x02; /* Inquiry Result with RSSI */
if (hdev->features[5] & LMP_SNIFF_SUBR)
events[5] |= 0x20; /* Sniff Subrating */
@ -615,6 +630,7 @@ done:
static void hci_setup_link_policy(struct hci_dev *hdev)
{
struct hci_cp_write_def_link_policy cp;
u16 link_policy = 0;
if (hdev->features[0] & LMP_RSWITCH)
@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
if (hdev->features[1] & LMP_PARK)
link_policy |= HCI_LP_PARK;
link_policy = cpu_to_le16(link_policy);
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
&link_policy);
cp.policy = cpu_to_le16(link_policy);
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
memset(&cp, 0, sizeof(cp));
if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
}
@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, status);
BT_DBG("%s status 0x%x", hdev->name, rp->status);
hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
if (!rp->status)
hdev->inq_tx_power = rp->tx_power;
hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
}
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
cancel_delayed_work_sync(&hdev->adv_work);
hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
if (status)
if (status) {
hci_dev_lock(hdev);
mgmt_stop_discovery_failed(hdev, status);
hci_dev_unlock(hdev);
return;
}
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
hdev->discovery.state == DISCOVERY_FINDING) {
mgmt_interleaved_discovery(hdev);
} else {
hci_dev_lock(hdev);
@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
conn->state = BT_CLOSED;
mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
conn->dst_type, status);
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
if (!num_rsp)
return;
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
return;
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@ -2039,6 +2062,12 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
if (ev->status && conn->state == BT_CONNECTED) {
hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_put(conn);
goto unlock;
}
if (conn->state == BT_CONFIG) {
if (!ev->status)
conn->state = BT_CONNECTED;
@ -2049,6 +2078,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
hci_encrypt_cfm(conn, ev->status, ev->encrypt);
}
unlock:
hci_dev_unlock(hdev);
}
@ -2102,7 +2132,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
}
if (!ev->status) {
if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
@ -2147,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_inquiry_cancel(hdev, skb);
break;
case HCI_OP_PERIODIC_INQ:
hci_cc_periodic_inq(hdev, skb);
break;
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
@ -2799,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
return;
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@ -2871,7 +2908,7 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
if (conn->state != BT_CONFIG)
goto unlock;
if (!ev->status) {
if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
@ -2964,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
return;
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@ -2993,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
name_known = hci_inquiry_cache_update(hdev, &data, name_known,
&ssp);
eir_len = eir_get_length(info->data, sizeof(info->data));
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi, !name_known,
ssp, info->data, sizeof(info->data));
ssp, info->data, eir_len);
}
hci_dev_unlock(hdev);
@ -3315,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
while (num_reports--) {
struct hci_ev_le_advertising_info *ev = ptr;
hci_add_adv_entry(hdev, ev);
rssi = ev->data[ev->length];
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
NULL, rssi, 0, 1, ev->data, ev->length);
@ -3336,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);

View File

@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
u32 data0, data4;
u16 data1, data2, data3, data5;
__be32 data0, data4;
__be16 data1, data2, data3, data5;
memcpy(&data0, &uuid[0], 4);
memcpy(&data1, &uuid[4], 2);
@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
dev->parent = hdev->parent;
dev_set_name(dev, "%s", hdev->name);
err = device_add(dev);

File diff suppressed because it is too large Load Diff

View File

@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
return -EINVAL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr);
&la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
return err;
@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
|| sk->sk_state != BT_BOUND) {
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!(sk->sk_state == BT_CONNECT2 &&
bt_sk(sk)->defer_setup)) {
!(sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
}
memset(&sec, 0, sizeof(sec));
sec.level = chan->sec_level;
if (chan->conn)
sec.level = chan->conn->hcon->sec_level;
else
sec.level = chan->sec_level;
if (sk->sk_state == BT_CONNECTED)
sec.key_size = chan->conn->hcon->enc_key_size;
@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
@ -592,10 +600,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
sk->sk_state = BT_CONFIG;
chan->state = BT_CONFIG;
/* or for ACL link, under defer_setup time */
} else if (sk->sk_state == BT_CONNECT2 &&
bt_sk(sk)->defer_setup) {
err = l2cap_chan_check_security(chan);
/* or for ACL link */
} else if ((sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
sk->sk_state == BT_CONNECTED) {
if (!l2cap_chan_check_security(chan))
set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
else
sk->sk_state_change(sk);
} else {
err = -EINVAL;
}
@ -612,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
break;
}
bt_sk(sk)->defer_setup = opt;
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_FLUSHABLE:
@ -712,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED) {
release_sock(sk);
if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
}
l2cap_chan_lock(chan);
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
l2cap_chan_unlock(chan);
release_sock(sk);
return err;
}
@ -733,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
&bt_sk(sk)->flags)) {
sk->sk_state = BT_CONFIG;
pi->chan->state = BT_CONFIG;
@ -927,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
}
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
unsigned long len, int nb,
int *err)
unsigned long len, int nb)
{
struct sock *sk = chan->sk;
struct sk_buff *skb;
int err;
return bt_skb_send_alloc(sk, len, nb, err);
l2cap_chan_unlock(chan);
skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
l2cap_chan_lock(chan);
if (!skb)
return ERR_PTR(err);
return skb;
}
static struct l2cap_ops l2cap_chan_ops = {
@ -948,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
l2cap_chan_put(l2cap_pi(sk)->chan);
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
@ -968,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
sk->sk_type = parent->sk_type;
bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
bt_sk(sk)->flags = bt_sk(parent)->flags;
chan->chan_type = pchan->chan_type;
chan->imtu = pchan->imtu;
@ -1006,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
} else {
chan->mode = L2CAP_MODE_BASIC;
}
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->fcs = L2CAP_FCS_CRC16;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
chan->flags = 0;
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
l2cap_chan_set_defaults(chan);
}
/* Default config options */
@ -1048,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
chan = l2cap_chan_create(sk);
chan = l2cap_chan_create();
if (!chan) {
l2cap_sock_kill(sk);
return NULL;
}
l2cap_chan_hold(chan);
chan->sk = sk;
l2cap_pi(sk)->chan = chan;
return sk;

View File

@ -35,10 +35,9 @@
#include <net/bluetooth/smp.h>
bool enable_hs;
bool enable_le;
#define MGMT_VERSION 1
#define MGMT_REVISION 0
#define MGMT_REVISION 1
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_CONFIRM_NAME,
MGMT_OP_BLOCK_DEVICE,
MGMT_OP_UNBLOCK_DEVICE,
MGMT_OP_SET_DEVICE_ID,
};
static const u16 mgmt_events[] = {
@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
put_unaligned_le16(cmd, &ev->opcode);
ev->opcode = cpu_to_le16(cmd);
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
put_unaligned_le16(cmd, &ev->opcode);
ev->opcode = cpu_to_le16(cmd);
ev->status = status;
if (rp)
@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
rp.version = MGMT_VERSION;
put_unaligned_le16(MGMT_REVISION, &rp.revision);
rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
sizeof(rp));
@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_rp_read_commands *rp;
u16 num_commands = ARRAY_SIZE(mgmt_commands);
u16 num_events = ARRAY_SIZE(mgmt_events);
u16 *opcode;
const u16 num_commands = ARRAY_SIZE(mgmt_commands);
const u16 num_events = ARRAY_SIZE(mgmt_events);
__le16 *opcode;
size_t rp_size;
int i, err;
@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
if (!rp)
return -ENOMEM;
put_unaligned_le16(num_commands, &rp->num_commands);
put_unaligned_le16(num_events, &rp->num_events);
rp->num_commands = __constant_cpu_to_le16(num_commands);
rp->num_events = __constant_cpu_to_le16(num_events);
for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
put_unaligned_le16(mgmt_commands[i], opcode);
@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
return -ENOMEM;
}
put_unaligned_le16(count, &rp->num_controllers);
rp->num_controllers = cpu_to_le16(count);
i = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
put_unaligned_le16(d->id, &rp->index[i++]);
rp->index[i++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (enable_hs)
settings |= MGMT_SETTING_HS;
if (enable_le) {
if (hdev->features[4] & LMP_LE)
settings |= MGMT_SETTING_LE;
}
if (hdev->features[4] & LMP_LE)
settings |= MGMT_SETTING_LE;
return settings;
}
@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
return 0;
}
memcpy(&val, &uuid128[12], 4);
val = le32_to_cpu(val);
val = get_unaligned_le32(&uuid128[12]);
if (val > 0xffff)
return 0;
@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
ptr += (name_len + 2);
}
if (hdev->inq_tx_power) {
ptr[0] = 2;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8) hdev->inq_tx_power;
eir_len += 3;
ptr += 3;
}
if (hdev->devid_source > 0) {
ptr[0] = 9;
ptr[1] = EIR_DEVICE_ID;
put_unaligned_le16(hdev->devid_source, ptr + 2);
put_unaligned_le16(hdev->devid_vendor, ptr + 4);
put_unaligned_le16(hdev->devid_product, ptr + 6);
put_unaligned_le16(hdev->devid_version, ptr + 8);
eir_len += 10;
ptr += 10;
}
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
bacpy(&rp.bdaddr, &hdev->bdaddr);
rp.version = hdev->hci_ver;
put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
timeout = get_unaligned_le16(&cp->timeout);
timeout = __le16_to_cpu(cp->timeout);
if (!cp->val && timeout > 0)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
MGMT_STATUS_BUSY);
err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
MGMT_STATUS_BUSY);
goto failed;
}
@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
if (!enable_le || !(hdev->features[4] & LMP_LE)) {
if (!(hdev->features[4] & LMP_LE)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
&hci_cp);
if (err < 0) {
if (err < 0)
mgmt_pending_remove(cmd);
goto unlock;
}
unlock:
hci_dev_unlock(hdev);
@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
if (!cmd) {
if (!cmd)
err = -ENOMEM;
goto failed;
}
failed:
hci_dev_unlock(hdev);
@ -1368,10 +1381,8 @@ update_class:
}
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
if (!cmd) {
if (!cmd)
err = -ENOMEM;
goto unlock;
}
unlock:
hci_dev_unlock(hdev);
@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
if (!cmd) {
if (!cmd)
err = -ENOMEM;
goto unlock;
}
unlock:
hci_dev_unlock(hdev);
@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 key_count, expected_len;
int i;
key_count = get_unaligned_le16(&cp->key_count);
key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_link_key_info);
@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
if (cp->addr.type == MGMT_ADDR_BREDR)
if (cp->addr.type == BDADDR_BREDR)
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
else
err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->disconnect) {
if (cp->addr.type == MGMT_ADDR_BREDR)
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
put_unaligned_le16(conn->handle, &dc.handle);
dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
if (cp->addr.type == MGMT_ADDR_BREDR)
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
put_unaligned_le16(conn->handle, &dc.handle);
dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@ -1613,22 +1622,22 @@ failed:
return err;
}
static u8 link_to_mgmt(u8 link_type, u8 addr_type)
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
return MGMT_ADDR_LE_PUBLIC;
case ADDR_LE_DEV_RANDOM:
return MGMT_ADDR_LE_RANDOM;
return BDADDR_LE_PUBLIC;
default:
return MGMT_ADDR_INVALID;
/* Fallback to LE Random address type */
return BDADDR_LE_RANDOM;
}
case ACL_LINK:
return MGMT_ADDR_BREDR;
default:
return MGMT_ADDR_INVALID;
/* Fallback to BR/EDR type */
return BDADDR_BREDR;
}
}
@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
continue;
bacpy(&rp->addr[i].bdaddr, &c->dst);
rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
if (rp->addr[i].type == MGMT_ADDR_INVALID)
rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
if (c->type == SCO_LINK || c->type == ESCO_LINK)
continue;
i++;
}
put_unaligned_le16(i, &rp->conn_count);
rp->conn_count = cpu_to_le16(i);
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct hci_conn *conn = cmd->user_data;
bacpy(&rp.addr.bdaddr, &conn->dst);
rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
&rp, sizeof(rp));
@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
if (cp->addr.type == MGMT_ADDR_BREDR)
conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
auth_type);
if (cp->addr.type == BDADDR_BREDR)
conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
cp->addr.type, sec_level, auth_type);
else
conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
auth_type);
conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
cp->addr.type, sec_level, auth_type);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
/* For LE, just connecting isn't a proof that the pairing finished */
if (cp->addr.type == MGMT_ADDR_BREDR)
if (cp->addr.type == BDADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
if (type == MGMT_ADDR_BREDR)
if (type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_BUSY);
goto failed;
}
if (hdev->discovery.state != DISCOVERY_STOPPED) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_BUSY);
@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
if (hdev->discovery.state == DISCOVERY_FINDING) {
err = hci_cancel_inquiry(hdev);
if (err < 0)
mgmt_pending_remove(cmd);
switch (hdev->discovery.state) {
case DISCOVERY_FINDING:
if (test_bit(HCI_INQUIRY, &hdev->flags))
err = hci_cancel_inquiry(hdev);
else
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
goto unlock;
err = hci_cancel_le_scan(hdev);
break;
case DISCOVERY_RESOLVING:
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
NAME_PENDING);
if (!e) {
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id,
MGMT_OP_STOP_DISCOVERY, 0,
&mgmt_cp->type,
sizeof(mgmt_cp->type));
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
goto unlock;
}
bacpy(&cp.bdaddr, &e->data.bdaddr);
err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
sizeof(cp), &cp);
break;
default:
BT_DBG("unknown discovery state %u", hdev->discovery.state);
err = -EFAULT;
}
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
if (!e) {
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
&mgmt_cp->type, sizeof(mgmt_cp->type));
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
goto unlock;
}
bacpy(&cp.bdaddr, &e->data.bdaddr);
err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
&cp);
if (err < 0)
mgmt_pending_remove(cmd);
else
@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_device_id *cp = data;
int err;
__u16 source;
BT_DBG("%s", hdev->name);
source = __le16_to_cpu(cp->source);
if (source > 0x0002)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
hdev->devid_source = source;
hdev->devid_vendor = __le16_to_cpu(cp->vendor);
hdev->devid_product = __le16_to_cpu(cp->product);
hdev->devid_version = __le16_to_cpu(cp->version);
err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
update_eir(hdev);
hci_dev_unlock(hdev);
return err;
}
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@ -2565,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
u16 key_count, expected_len;
int i;
key_count = get_unaligned_le16(&cp->key_count);
key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_ltk_info);
@ -2591,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
type = HCI_SMP_LTK_SLAVE;
hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
hci_add_ltk(hdev, &key->addr.bdaddr,
bdaddr_to_le(key->addr.type),
type, 0, key->authenticated, key->val,
key->enc_size, key->ediv, key->rand);
}
@ -2601,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return 0;
}
struct mgmt_handler {
static const struct mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
bool var_len;
@ -2647,6 +2706,7 @@ struct mgmt_handler {
{ confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
{ set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
};
@ -2657,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
struct mgmt_hdr *hdr;
u16 opcode, index, len;
struct hci_dev *hdev = NULL;
struct mgmt_handler *handler;
const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@ -2675,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
hdr = buf;
opcode = get_unaligned_le16(&hdr->opcode);
index = get_unaligned_le16(&hdr->index);
len = get_unaligned_le16(&hdr->len);
opcode = __le16_to_cpu(hdr->opcode);
index = __le16_to_cpu(hdr->index);
len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
@ -2884,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent)
{
struct mgmt_ev_new_link_key ev;
@ -2892,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = MGMT_ADDR_BREDR;
ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
@ -2908,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = key->bdaddr_type;
ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.authenticated = key->authenticated;
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@ -2932,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u16 eir_len = 0;
bacpy(&ev->addr.bdaddr, bdaddr);
ev->addr.type = link_to_mgmt(link_type, addr_type);
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->flags = __cpu_to_le32(flags);
@ -2944,7 +3005,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, dev_class, 3);
put_unaligned_le16(eir_len, &ev->eir_len);
ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
sizeof(*ev) + eir_len, NULL);
@ -2995,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
ev.type = link_to_mgmt(link_type, addr_type);
ev.type = link_to_bdaddr(link_type, addr_type);
err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
sk);
if (sk)
sock_put(sk);
sock_put(sk);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
@ -3021,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
rp.addr.type = link_to_mgmt(link_type, addr_type);
rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
mgmt_status(status), &rp, sizeof(rp));
@ -3039,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_connect_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@ -3050,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = MGMT_ADDR_BREDR;
ev.addr.type = BDADDR_BREDR;
ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
@ -3069,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
rp.addr.type = MGMT_ADDR_BREDR;
rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@ -3091,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
rp.addr.type = MGMT_ADDR_BREDR;
rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@ -3110,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.confirm_hint = confirm_hint;
put_unaligned_le32(value, &ev.value);
ev.value = value;
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@ -3126,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@ -3145,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
rp.addr.type = link_to_mgmt(link_type, addr_type);
rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
&rp, sizeof(rp));
@ -3188,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_auth_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
@ -3413,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (enable && test_and_clear_bit(HCI_LE_ENABLED,
&hdev->dev_flags))
err = new_settings(hdev, NULL);
err = new_settings(hdev, NULL);
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
cmd_status_rsp, &mgmt_err);
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
&mgmt_err);
return err;
}
@ -3455,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
ev->addr.type = link_to_mgmt(link_type, addr_type);
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
@ -3469,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
put_unaligned_le16(eir_len, &ev->eir_len);
ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
@ -3488,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
ev->addr.type = link_to_mgmt(link_type, addr_type);
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
name_len);
put_unaligned_le16(eir_len, &ev->eir_len);
ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
sizeof(*ev) + eir_len, NULL);
@ -3594,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
module_param(enable_hs, bool, 0644);
MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
module_param(enable_le, bool, 0644);
MODULE_PARM_DESC(enable_le, "Enable Low Energy support");

View File

@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
if (parent) {
sk->sk_type = parent->sk_type;
pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
&bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
break;
}
bt_sk(sk)->defer_setup = opt;
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
default:
@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
break;
}
if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
done:
bh_unlock_sock(parent);
if (bt_sk(parent)->defer_setup)
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;

View File

@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static int sco_conn_del(struct hci_conn *conn, int err);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn || status)
if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
conn = sco_conn_add(hcon, 0);
conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err, count;
int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
skb = bt_skb_send_alloc(sk, count,
msg->msg_flags & MSG_DONTWAIT, &err);
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return count;
return len;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@ -277,17 +274,20 @@ drop:
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
struct sock *sk;
struct hlist_node *node;
struct sock *sk;
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
goto found;
sk = NULL;
found:
return sk;
return sk;
}
return NULL;
}
/* Find socket listening on source bdaddr.
@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
write_lock(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
write_unlock(&sco_sk_list.lock);
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@ -537,21 +533,38 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
bdaddr_t *src = &bt_sk(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
write_lock(&sco_sk_list.lock);
if (__sco_get_sock_listen_by_addr(src)) {
err = -EADDRINUSE;
goto unlock;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
unlock:
write_unlock(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon, status);
conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else

View File

@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
HCI_SMP_LTK_SLAVE, 1, authenticated,
enc.ltk, smp->enc_key_size, ediv, ident.rand);
ident.ediv = cpu_to_le16(ediv);
ident.ediv = ediv;
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);