bluetooth-next pull request for net-next:

- btusb: add Foxconn 0xe0fc for Qualcomm WCN785x
  - btmtk: Fix ISO interface handling
  - Add quirk for ATS2851
  - btusb: Add RTL8852BE device 0489:e123
  - ISO: Do not emit LE PA/BIG Create Sync if previous is pending
  - btusb: Add USB HW IDs for MT7920/MT7925
  - btintel_pcie: Add handshake between driver and firmware
  - btintel_pcie: Add recovery mechanism
  - hci_conn: Use disable_delayed_work_sync
  - SCO: Use kref to track lifetime of sco_conn
  - ISO: Use kref to track lifetime of iso_conn
  - btnxpuart: Add GPIO support to power save feature
  - btusb: Add 0x0489:0xe0f3 and 0x13d3:0x3623 for Qualcomm WCN785x
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE7E6oRXp8w05ovYr/9JCA4xAyCykFAmc2bBcZHGx1aXoudm9u
 LmRlbnR6QGludGVsLmNvbQAKCRD0kIDjEDILKfnGEACV1YylQ9kJxzyDwyxrZtYG
 3T2I+8qQwIpokyGcYHpMC0kJFAzCbywGxjS3njpOrM0nTuvGpvLAD40Sn+pMHbKb
 3XzNNSixuxgJvr3CyDM0KOua/2nFQZyPe0DXe4D9bzOproIHDyoQkWbVqntLCyXO
 DDwwSF/CcgfZsIf5dfGir6erUBOXzYUUlCL7Q0ap2DNdeYAv6XLWwVSMu7okIFpH
 H/vBcWMWXwNNyEtDIHPmRYut14qEFASTRWsSe7IiIoL2V5VG5BVUALk8rAmoVv10
 IjE5kAmdLBHplhtnDIEg55CHIivlnbOp9d7WMhKkwY+vrPx571uFuwXeCBrg+5cd
 SyekP701TtS5oscT2SazimZTdtS0YLmJgVlhxX8DAIeElO9pEPdJt9CNCafFKmEY
 LMleGXDrH4bnTA1k6nMX2Ky4/oqlSonPbYXZ4GzL5ZMRg9biIkRI3YyRvKLM1plh
 MoO14zXhS184Cf0vSaGSeZ2nqsv7Z0lPkGJxCpGyzkOoA1VnzORl9teik5C6eeCw
 7wOoM+x+aJU8hxyD65DkyPNzLkvrEohRJWx7XMOKZEC1uFvBrJfEc/lb7TH5E+Zd
 PbPG1+x5Y3CAwvOcQzbpVeF0ujL0+KvJF+Y1q7eJ3mB+KoDPBEVbcO1ypvL6kbfW
 ETYrD/fuBiuxQE/XM0Xdlw==
 =niGG
 -----END PGP SIGNATURE-----

Merge tag 'for-net-next-2024-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

 - btusb: add Foxconn 0xe0fc for Qualcomm WCN785x
 - btmtk: Fix ISO interface handling
 - Add quirk for ATS2851
 - btusb: Add RTL8852BE device 0489:e123
 - ISO: Do not emit LE PA/BIG Create Sync if previous is pending
 - btusb: Add USB HW IDs for MT7920/MT7925
 - btintel_pcie: Add handshake between driver and firmware
 - btintel_pcie: Add recovery mechanism
 - hci_conn: Use disable_delayed_work_sync
 - SCO: Use kref to track lifetime of sco_conn
 - ISO: Use kref to track lifetime of iso_conn
 - btnxpuart: Add GPIO support to power save feature
 - btusb: Add 0x0489:0xe0f3 and 0x13d3:0x3623 for Qualcomm WCN785x

* tag 'for-net-next-2024-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next: (51 commits)
  Bluetooth: MGMT: Add initial implementation of MGMT_OP_HCI_CMD_SYNC
  Bluetooth: fix use-after-free in device_for_each_child()
  Bluetooth: btintel: Direct exception event to bluetooth stack
  Bluetooth: hci_core: Fix calling mgmt_device_connected
  Bluetooth: hci_bcm: Use the devm_clk_get_optional() helper
  Bluetooth: ISO: Send BIG Create Sync via hci_sync
  Bluetooth: hci_conn: Remove alloc from critical section
  Bluetooth: ISO: Use kref to track lifetime of iso_conn
  Bluetooth: SCO: Use kref to track lifetime of sco_conn
  Bluetooth: HCI: Add IPC(11) bus type
  Bluetooth: btusb: Add 3 HWIDs for MT7925
  Bluetooth: btusb: Add new VID/PID 0489/e124 for MT7925
  Bluetooth: ISO: Update hci_conn_hash_lookup_big for Broadcast slave
  Bluetooth: ISO: Do not emit LE BIG Create Sync if previous is pending
  Bluetooth: ISO: Fix matching parent socket for BIS slave
  Bluetooth: ISO: Do not emit LE PA Create Sync if previous is pending
  Bluetooth: btrtl: Decrease HCI_OP_RESET timeout from 10 s to 2 s
  Bluetooth: btbcm: fix missing of_node_put() in btbcm_get_board_name()
  Bluetooth: btusb: Add new VID/PID 0489/e111 for MT7925
  Bluetooth: btmtk: adjust the position to init iso data anchor
  ...
====================

Link: https://patch.msgid.link/20241114214731.1994446-1-luiz.dentz@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-11-15 14:16:28 -08:00
commit 6cd663f03f
29 changed files with 1220 additions and 300 deletions

View File

@ -34,6 +34,12 @@ properties:
firmware-name:
maxItems: 1
device-wakeup-gpios:
maxItems: 1
description:
Host-To-Chip power save mechanism is driven by this GPIO
connected to BT_WAKE_IN pin of the NXP chipset.
required:
- compatible
@ -41,10 +47,12 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
serial {
bluetooth {
compatible = "nxp,88w8987-bt";
fw-init-baudrate = <3000000>;
firmware-name = "uartuart8987_bt_v0.bin";
device-wakeup-gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
};
};

View File

@ -541,11 +541,10 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = {
static const char *btbcm_get_board_name(struct device *dev)
{
#ifdef CONFIG_OF
struct device_node *root;
struct device_node *root __free(device_node) = of_find_node_by_path("/");
char *board_type;
const char *tmp;
root = of_find_node_by_path("/");
if (!root)
return NULL;
@ -555,7 +554,6 @@ static const char *btbcm_get_board_name(struct device *dev)
/* get rid of any '/' in the compatible string */
board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
strreplace(board_type, '/', '-');
of_node_put(root);
return board_type;
#else

View File

@ -1040,7 +1040,7 @@ static int btintel_download_firmware_payload(struct hci_dev *hdev,
* as needed.
*
* Send set of commands with 4 byte alignment from the
* firmware data buffer as a single Data fragement.
* firmware data buffer as a single Data fragment.
*/
if (!(frag_len % 4)) {
err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
@ -1252,6 +1252,12 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
struct intel_reset params;
struct sk_buff *skb;
/* PCIe transport uses shared hardware reset mechanism for recovery
* which gets triggered in pcie *setup* function on error.
*/
if (hdev->bus == HCI_PCI)
return;
/* Send Intel Reset command. This will result in
* re-enumeration of BT controller.
*
@ -1267,6 +1273,7 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
* boot_param: Boot address
*
*/
params.reset_type = 0x01;
params.patch_enable = 0x01;
params.ddc_reload = 0x01;
@ -1841,6 +1848,37 @@ static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
return 0;
}
static int btintel_boot_wait_d0(struct hci_dev *hdev, ktime_t calltime,
int msec)
{
ktime_t delta, rettime;
unsigned long long duration;
int err;
bt_dev_info(hdev, "Waiting for device transition to d0");
err = btintel_wait_on_flag_timeout(hdev, INTEL_WAIT_FOR_D0,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(msec));
if (err == -EINTR) {
bt_dev_err(hdev, "Device d0 move interrupted");
return -EINTR;
}
if (err) {
bt_dev_err(hdev, "Device d0 move timeout");
return -ETIMEDOUT;
}
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
bt_dev_info(hdev, "Device moved to D0 in %llu usecs", duration);
return 0;
}
static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
{
ktime_t calltime;
@ -1849,6 +1887,7 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
calltime = ktime_get();
btintel_set_flag(hdev, INTEL_BOOTING);
btintel_set_flag(hdev, INTEL_WAIT_FOR_D0);
err = btintel_send_intel_reset(hdev, boot_addr);
if (err) {
@ -1861,13 +1900,28 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
* is done by the operational firmware sending bootup notification.
*
* Booting into operational firmware should not take longer than
* 1 second. However if that happens, then just fail the setup
* 5 second. However if that happens, then just fail the setup
* since something went wrong.
*/
err = btintel_boot_wait(hdev, calltime, 1000);
if (err == -ETIMEDOUT)
err = btintel_boot_wait(hdev, calltime, 5000);
if (err == -ETIMEDOUT) {
btintel_reset_to_bootloader(hdev);
goto exit_error;
}
if (hdev->bus == HCI_PCI) {
/* In case of PCIe, after receiving bootup event, driver performs
* D0 entry by writing 0 to sleep control register (check
* btintel_pcie_recv_event())
* Firmware acks with alive interrupt indicating host is full ready to
* perform BT operation. Lets wait here till INTEL_WAIT_FOR_D0
* bit is cleared.
*/
calltime = ktime_get();
err = btintel_boot_wait_d0(hdev, calltime, 2000);
}
exit_error:
return err;
}
@ -2693,20 +2747,32 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
struct btintel_dsbr_cmd cmd;
struct sk_buff *skb;
u32 dsbr, cnvi;
u8 status;
u32 dsbr;
bool apply_dsbr;
int err;
/* DSBR command needs to be sent for BlazarI + B0 step product after
* downloading IML image.
cnvi = ver->cnvi_top & 0xfff;
/* DSBR command needs to be sent for,
* 1. BlazarI or BlazarIW + B0 step product in IML image.
* 2. Gale Peak2 or BlazarU in OP image.
*/
apply_dsbr = (ver->img_type == BTINTEL_IMG_IML &&
((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) &&
INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01);
if (!apply_dsbr)
switch (cnvi) {
case BTINTEL_CNVI_BLAZARI:
case BTINTEL_CNVI_BLAZARIW:
if (ver->img_type == BTINTEL_IMG_IML &&
INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01)
break;
return 0;
case BTINTEL_CNVI_GAP:
case BTINTEL_CNVI_BLAZARU:
if (ver->img_type == BTINTEL_IMG_OP &&
hdev->bus == HCI_USB)
break;
return 0;
default:
return 0;
}
dsbr = 0;
err = btintel_uefi_get_dsbr(&dsbr);
@ -2749,6 +2815,13 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
boot_param = 0x00000000;
/* In case of PCIe, this function might get called multiple times with
* same hdev instance if there is any error on firmware download.
* Need to clear stale bits of previous firmware download attempt.
*/
for (int i = 0; i < __INTEL_NUM_FLAGS; i++)
btintel_clear_flag(hdev, i);
btintel_set_flag(hdev, INTEL_BOOTLOADER);
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@ -2835,7 +2908,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
/* All Intel new genration controllers support the Microsoft vendor
/* All Intel new generation controllers support the Microsoft vendor
* extension are using 0xFC1E for VsMsftOpCode.
*/
case 0x17:
@ -3273,7 +3346,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
{
struct intel_tlv *tlv = (void *)&skb->data[5];
@ -3301,6 +3374,7 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
recv_frame:
return hci_recv_frame(hdev, skb);
}
EXPORT_SYMBOL_GPL(btintel_diagnostics);
int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
@ -3320,7 +3394,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* indicating that the bootup completed.
*/
btintel_bootup(hdev, ptr, len);
break;
kfree_skb(skb);
return 0;
case 0x06:
/* When the firmware loading completes the
* device sends out a vendor specific event
@ -3328,7 +3403,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* loading.
*/
btintel_secure_send_result(hdev, ptr, len);
break;
kfree_skb(skb);
return 0;
}
}

View File

@ -53,6 +53,9 @@ struct intel_tlv {
} __packed;
#define BTINTEL_CNVI_BLAZARI 0x900
#define BTINTEL_CNVI_BLAZARIW 0x901
#define BTINTEL_CNVI_GAP 0x910
#define BTINTEL_CNVI_BLAZARU 0x930
#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
@ -178,6 +181,7 @@ enum {
INTEL_ROM_LEGACY,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
INTEL_ACPI_RESET_ACTIVE,
INTEL_WAIT_FOR_D0,
__INTEL_NUM_FLAGS,
};
@ -249,6 +253,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_print_fseq_info(struct hci_dev *hdev);
int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@ -382,4 +387,9 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
static inline void btintel_print_fseq_info(struct hci_dev *hdev)
{
}
static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
#endif

View File

@ -48,6 +48,17 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
/* Alive interrupt context */
enum {
BTINTEL_PCIE_ROM,
BTINTEL_PCIE_FW_DL,
BTINTEL_PCIE_HCI_RESET,
BTINTEL_PCIE_INTEL_HCI_RESET1,
BTINTEL_PCIE_INTEL_HCI_RESET2,
BTINTEL_PCIE_D0,
BTINTEL_PCIE_D3
};
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
{
@ -64,24 +75,6 @@ static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
}
static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset,
u32 bits, u32 mask, int timeout_us)
{
int t = 0;
u32 reg;
do {
reg = btintel_pcie_rd_reg32(data, offset);
if ((reg & mask) == (bits & mask))
return t;
udelay(POLL_INTERVAL_US);
t += POLL_INTERVAL_US;
} while (t < timeout_us);
return -ETIMEDOUT;
}
static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
{
u8 queue = entry->entry;
@ -237,10 +230,47 @@ static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
}
static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
{
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
u32 reg;
int retry = 3;
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
do {
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
break;
usleep_range(10000, 12000);
} while (--retry > 0);
usleep_range(10000, 12000);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
usleep_range(10000, 12000);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
/* If shared hardware reset is success then boot stage register shall be
* set to 0
*/
return reg == 0 ? 0 : -ENODEV;
}
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
@ -252,6 +282,7 @@ static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
{
int err;
u32 reg;
data->gp0_received = false;
@ -267,22 +298,17 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
data->boot_stage_cache = 0x0;
/* Set MAC_INIT bit to start primary bootloader */
btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
/* Wait until MAC_ACCESS is granted */
err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US);
if (err < 0)
return -ENODEV;
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
/* MAC is ready. Enable BT FUNC */
btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
@ -290,8 +316,9 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
/* wait for interrupt from the device after booting up to primary
* bootloader.
*/
data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
if (!err)
return -ETIME;
@ -302,12 +329,77 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
return 0;
}
/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP
* Sometimes during firmware image switching from ROM to IML or IML to OP image,
* the previous image bit is not cleared by firmware when alive interrupt is
* received. Driver needs to take care of these sticky bits when deciding the
* current image running on controller.
* Ex: 0x10 and 0x11 - both represents that controller is running IML
*/
static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data)
{
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM &&
!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) &&
!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
}
static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
{
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
}
static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
{
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
}
static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
{
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
}
static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
{
return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
}
static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
u32 dxstate)
{
bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
}
static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
{
switch (alive_intr_ctxt) {
case BTINTEL_PCIE_ROM:
return "rom";
case BTINTEL_PCIE_FW_DL:
return "fw_dl";
case BTINTEL_PCIE_D0:
return "d0";
case BTINTEL_PCIE_D3:
return "d3";
case BTINTEL_PCIE_HCI_RESET:
return "hci_reset";
case BTINTEL_PCIE_INTEL_HCI_RESET1:
return "intel_reset1";
case BTINTEL_PCIE_INTEL_HCI_RESET2:
return "intel_reset2";
default:
return "unknown";
}
}
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
{
u32 reg;
bool submit_rx, signal_waitq;
u32 reg, old_ctxt;
/* This interrupt is for three different causes and it is not easy to
* know what causes the interrupt. So, it compares each register value
@ -317,20 +409,87 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->boot_stage_cache)
data->boot_stage_cache = reg;
bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
data->boot_stage_cache, reg);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
data->gp0_received = true;
/* If the boot stage is OP or IML, reset IA and start RX again */
if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
old_ctxt = data->alive_intr_ctxt;
submit_rx = false;
signal_waitq = false;
switch (data->alive_intr_ctxt) {
case BTINTEL_PCIE_ROM:
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
signal_waitq = true;
break;
case BTINTEL_PCIE_FW_DL:
/* Error case is already handled. Ideally control shall not
* reach here
*/
break;
case BTINTEL_PCIE_INTEL_HCI_RESET1:
if (btintel_pcie_in_op(data)) {
submit_rx = true;
break;
}
if (btintel_pcie_in_iml(data)) {
submit_rx = true;
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
break;
}
break;
case BTINTEL_PCIE_INTEL_HCI_RESET2:
if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
}
break;
case BTINTEL_PCIE_D0:
if (btintel_pcie_in_d3(data)) {
data->alive_intr_ctxt = BTINTEL_PCIE_D3;
signal_waitq = true;
break;
}
break;
case BTINTEL_PCIE_D3:
if (btintel_pcie_in_d0(data)) {
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
submit_rx = true;
signal_waitq = true;
break;
}
break;
case BTINTEL_PCIE_HCI_RESET:
data->alive_intr_ctxt = BTINTEL_PCIE_D0;
submit_rx = true;
signal_waitq = true;
break;
default:
bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
data->alive_intr_ctxt);
break;
}
if (submit_rx) {
btintel_pcie_reset_ia(data);
btintel_pcie_start_rx(data);
}
wake_up(&data->gp0_wait_q);
if (signal_waitq) {
bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
wake_up(&data->gp0_wait_q);
}
if (old_ctxt != data->alive_intr_ctxt)
bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
btintel_pcie_alivectxt_state2str(old_ctxt),
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
}
/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
@ -364,6 +523,83 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
}
}
static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *)skb->data;
const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
hdr->plen > 0) {
const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
switch (skb->data[2]) {
case 0x02:
/* When switching to the operational firmware
* the device sends a vendor specific event
* indicating that the bootup completed.
*/
btintel_bootup(hdev, ptr, len);
/* If bootup event is from operational image,
* driver needs to write sleep control register to
* move into D0 state
*/
if (btintel_pcie_in_op(data)) {
btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
kfree_skb(skb);
return 0;
}
if (btintel_pcie_in_iml(data)) {
/* In case of IML, there is no concept
* of D0 transition. Just mimic as if
* IML moved to D0 by clearing INTEL_WAIT_FOR_D0
* bit and waking up the task waiting on
* INTEL_WAIT_FOR_D0. This is required
* as intel_boot() is common function for
* both IML and OP image loading.
*/
if (btintel_test_and_clear_flag(data->hdev,
INTEL_WAIT_FOR_D0))
btintel_wake_up_flag(data->hdev,
INTEL_WAIT_FOR_D0);
}
kfree_skb(skb);
return 0;
case 0x06:
/* When the firmware loading completes the
* device sends out a vendor specific event
* indicating the result of the firmware
* loading.
*/
btintel_secure_send_result(hdev, ptr, len);
kfree_skb(skb);
return 0;
}
}
/* Handle all diagnostics events separately. May still call
* hci_recv_frame.
*/
if (len >= sizeof(diagnostics_hdr) &&
memcmp(&skb->data[2], diagnostics_hdr,
sizeof(diagnostics_hdr)) == 0) {
return btintel_diagnostics(hdev, skb);
}
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
if (skb->data[2] == 0x97)
return 0;
}
return hci_recv_frame(hdev, skb);
}
/* Process the received rx data
* It check the frame header to identify the data type and create skb
* and calling HCI API
@ -465,7 +701,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
hdev->stat.byte_rx += plen;
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
ret = btintel_recv_event(hdev, new_skb);
ret = btintel_pcie_recv_event(hdev, new_skb);
else
ret = hci_recv_frame(hdev, new_skb);
@ -516,10 +752,8 @@ static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status
buf += sizeof(*rfh_hdr);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
ret = -ENOMEM;
if (!skb)
goto resubmit;
}
skb_put_data(skb, buf, len);
skb_queue_tail(&data->rx_skb_q, skb);
@ -734,13 +968,9 @@ static int btintel_pcie_config_pcie(struct pci_dev *pdev,
return err;
}
err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
if (err)
return err;
data->base_addr = pcim_iomap_table(pdev)[0];
if (!data->base_addr)
return -ENODEV;
data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
if (IS_ERR(data->base_addr))
return PTR_ERR(data->base_addr);
err = btintel_pcie_setup_irq(data);
if (err)
@ -1053,8 +1283,11 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
struct hci_command_hdr *cmd;
__u16 opcode = ~0;
int ret;
u32 type;
u32 old_ctxt;
/* Due to the fw limitation, the type header of the packet should be
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
@ -1073,6 +1306,8 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
type = BTINTEL_PCIE_HCI_CMD_PKT;
cmd = (void *)skb->data;
opcode = le16_to_cpu(cmd->opcode);
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
@ -1111,6 +1346,30 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
goto exit_error;
}
if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
(opcode == HCI_OP_RESET || opcode == 0xfc01)) {
old_ctxt = data->alive_intr_ctxt;
data->alive_intr_ctxt =
(opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
BTINTEL_PCIE_HCI_RESET);
bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
if (opcode == HCI_OP_RESET) {
data->gp0_received = false;
ret = wait_event_timeout(data->gp0_wait_q,
data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
if (!ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "No alive interrupt received for %s",
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
ret = -ETIME;
goto exit_error;
}
}
}
hdev->stat.byte_tx += skb->len;
kfree_skb(skb);
@ -1128,7 +1387,7 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
data->hdev = NULL;
}
static int btintel_pcie_setup(struct hci_dev *hdev)
static int btintel_pcie_setup_internal(struct hci_dev *hdev)
{
const u8 param[1] = { 0xFF };
struct intel_version_tlv ver_tlv;
@ -1219,6 +1478,32 @@ exit_error:
return err;
}
static int btintel_pcie_setup(struct hci_dev *hdev)
{
int err, fw_dl_retry = 0;
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
bt_dev_err(hdev, "Firmware download retry count: %d",
fw_dl_retry);
err = btintel_pcie_reset_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
break;
}
usleep_range(10000, 12000);
btintel_pcie_reset_ia(data);
btintel_pcie_config_msix(data);
err = btintel_pcie_enable_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to enable hardware: %d", err);
break;
}
btintel_pcie_start_rx(data);
}
return err;
}
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;

View File

@ -12,6 +12,7 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
@ -22,6 +23,8 @@
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
/* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */
@ -32,6 +35,7 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
/* Registers for MSI-X */
#define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000)
@ -55,6 +59,16 @@ enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
};
/* PCIe device states
* Host-Device interface is active
* Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
* Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
*/
enum {
BTINTEL_PCIE_STATE_D0 = 0,
BTINTEL_PCIE_STATE_D3_HOT = 2,
BTINTEL_PCIE_STATE_D3_COLD = 3,
};
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
@ -67,7 +81,7 @@ enum msix_hw_int_causes {
#define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000
/* Default interrupt timeout in msec */
#define BTINTEL_DEFAULT_INTR_TIMEOUT 3000
#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
/* The number of descriptors in TX/RX queues */
#define BTINTEL_DESCS_COUNT 16
@ -343,6 +357,7 @@ struct rxq {
* @ia: Index Array struct
* @txq: TX Queue struct
* @rxq: RX Queue struct
* @alive_intr_ctxt: Alive interrupt context
*/
struct btintel_pcie_data {
struct pci_dev *pdev;
@ -389,6 +404,7 @@ struct btintel_pcie_data {
struct ia ia;
struct txq txq;
struct rxq rxq;
u32 alive_intr_ctxt;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,

View File

@ -324,7 +324,7 @@ int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_params.data = NULL;
wmt_params.status = NULL;
/* Activate funciton the firmware providing to */
/* Activate function the firmware providing to */
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
@ -1215,7 +1215,6 @@ static int btmtk_usb_isointf_init(struct hci_dev *hdev)
struct sk_buff *skb;
int err;
init_usb_anchor(&btmtk_data->isopkt_anchor);
spin_lock_init(&btmtk_data->isorxlock);
__set_mtk_intr_interface(hdev);

View File

@ -681,7 +681,7 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0)
goto err_release_irq;
/* Explitly set write-1-clear method */
/* Explicitly set write-1-clear method */
val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
@ -1328,6 +1328,8 @@ static int btmtksdio_probe(struct sdio_func *func,
{
struct btmtksdio_dev *bdev;
struct hci_dev *hdev;
struct device_node *old_node;
bool restore_node;
int err;
bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
@ -1396,7 +1398,7 @@ static int btmtksdio_probe(struct sdio_func *func,
if (pm_runtime_enabled(bdev->dev))
pm_runtime_disable(bdev->dev);
/* As explaination in drivers/mmc/core/sdio_bus.c tells us:
/* As explanation in drivers/mmc/core/sdio_bus.c tells us:
* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@ -1411,13 +1413,24 @@ static int btmtksdio_probe(struct sdio_func *func,
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
"mediatek,mt7921s-bluetooth");
restore_node = false;
if (!of_device_is_compatible(bdev->dev->of_node, "mediatek,mt7921s-bluetooth")) {
restore_node = true;
old_node = bdev->dev->of_node;
bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
"mediatek,mt7921s-bluetooth");
}
bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(bdev->reset))
err = PTR_ERR(bdev->reset);
if (restore_node) {
of_node_put(bdev->dev->of_node);
bdev->dev->of_node = old_node;
}
return err;
}

View File

@ -327,7 +327,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
if (count <= 0)
return NULL;
/* Tranlate to how much the size of data H4 can handle so far */
/* Translate to how much the size of data H4 can handle so far */
*sz_h4 = min_t(int, count, bdev->stp_dlen);
/* Update the remaining size of STP packet */

View File

@ -16,6 +16,7 @@
#include <linux/crc8.h>
#include <linux/crc32.h>
#include <linux/string_helpers.h>
#include <linux/gpio/consumer.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -34,16 +35,17 @@
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
#define FIRMWARE_W8987 "uart8987_bt_v0.bin"
#define FIRMWARE_W8987 "uart8987_bt.bin"
#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin"
#define FIRMWARE_W8997 "uart8997_bt_v4.bin"
#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin"
#define FIRMWARE_W9098 "uart9098_bt_v1.bin"
#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin"
#define FIRMWARE_IW416 "uartiw416_bt_v0.bin"
#define FIRMWARE_IW416 "uartiw416_bt.bin"
#define FIRMWARE_IW416_OLD "uartiw416_bt_v0.bin"
#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se"
#define FIRMWARE_IW615 "uartspi_iw610_v0.bin"
#define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se"
#define FIRMWARE_IW610 "uartspi_iw610.bin"
#define FIRMWARE_SECURE_IW610 "uartspi_iw610.bin.se"
#define FIRMWARE_IW624 "uartiw624_bt.bin"
#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se"
#define FIRMWARE_AW693 "uartaw693_bt.bin"
@ -59,8 +61,8 @@
#define CHIP_ID_IW624c 0x8001
#define CHIP_ID_AW693a0 0x8200
#define CHIP_ID_AW693a1 0x8201
#define CHIP_ID_IW615a0 0x8800
#define CHIP_ID_IW615a1 0x8801
#define CHIP_ID_IW610a0 0x8800
#define CHIP_ID_IW610a1 0x8801
#define FW_SECURE_MASK 0xc0
#define FW_OPEN 0x00
@ -81,6 +83,7 @@
#define WAKEUP_METHOD_BREAK 1
#define WAKEUP_METHOD_EXT_BREAK 2
#define WAKEUP_METHOD_RTS 3
#define WAKEUP_METHOD_GPIO 4
#define WAKEUP_METHOD_INVALID 0xff
/* power save mode status */
@ -134,6 +137,7 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
struct gpio_desc *h2c_ps_gpio;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@ -364,7 +368,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
int status;
int status = 0;
if (psdata->ps_state == ps_state ||
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
@ -372,6 +376,14 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
case WAKEUP_METHOD_GPIO:
if (ps_state == PS_STATE_AWAKE)
gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
else
gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 1);
bt_dev_dbg(hdev, "Set h2c_ps_gpio: %s",
str_high_low(ps_state == PS_STATE_SLEEP));
break;
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0);
@ -421,15 +433,29 @@ static void ps_timeout_func(struct timer_list *t)
}
}
static void ps_setup(struct hci_dev *hdev)
static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(psdata->h2c_ps_gpio)) {
bt_dev_err(hdev, "Error fetching device-wakeup-gpios: %ld",
PTR_ERR(psdata->h2c_ps_gpio));
return PTR_ERR(psdata->h2c_ps_gpio);
}
if (!psdata->h2c_ps_gpio)
psdata->h2c_wakeup_gpio = 0xff;
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
return 0;
}
static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
@ -515,6 +541,9 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
switch (psdata->h2c_wakeupmode) {
case WAKEUP_METHOD_GPIO:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO;
break;
case WAKEUP_METHOD_DTR:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
break;
@ -549,6 +578,7 @@ static void ps_init(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
u8 default_h2c_wakeup_mode = DEFAULT_H2C_WAKEUP_MODE;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS);
usleep_range(5000, 10000);
@ -560,8 +590,17 @@ static void ps_init(struct hci_dev *hdev)
psdata->c2h_wakeup_gpio = 0xff;
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
if (psdata->h2c_ps_gpio)
default_h2c_wakeup_mode = WAKEUP_METHOD_GPIO;
psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS;
switch (DEFAULT_H2C_WAKEUP_MODE) {
switch (default_h2c_wakeup_mode) {
case WAKEUP_METHOD_GPIO:
psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
usleep_range(5000, 10000);
break;
case WAKEUP_METHOD_DTR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR);
@ -946,12 +985,12 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
case CHIP_ID_IW615a0:
case CHIP_ID_IW615a1:
case CHIP_ID_IW610a0:
case CHIP_ID_IW610a1:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
fw_name = FIRMWARE_IW615;
fw_name = FIRMWARE_IW610;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
fw_name = FIRMWARE_SECURE_IW615;
fw_name = FIRMWARE_SECURE_IW610;
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
@ -971,6 +1010,9 @@ static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
case CHIP_ID_W9098:
fw_name_old = FIRMWARE_W9098_OLD;
break;
case CHIP_ID_IW416:
fw_name_old = FIRMWARE_IW416_OLD;
break;
}
return fw_name_old;
}
@ -1275,6 +1317,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio;
psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio;
switch (wakeup_parm.h2c_wakeupmode) {
case BT_CTRL_WAKEUP_METHOD_GPIO:
psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
break;
case BT_CTRL_WAKEUP_METHOD_DSR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
break;
@ -1505,13 +1550,17 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
hci_free_dev(hdev);
return -ENODEV;
goto probe_fail;
}
ps_setup(hdev);
if (ps_setup(hdev))
goto probe_fail;
return 0;
probe_fail:
hci_free_dev(hdev);
return -ENODEV;
}
static void nxp_serdev_remove(struct serdev_device *serdev)

View File

@ -1371,7 +1371,7 @@ int btrtl_shutdown_realtek(struct hci_dev *hdev)
/* According to the vendor driver, BT must be reset on close to avoid
* firmware crash.
*/
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "HCI reset during shutdown failed");

View File

@ -371,6 +371,12 @@ static const struct usb_device_id quirks_table[] = {
/* QCA WCN785x chipset */
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0fc), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@ -524,6 +530,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@ -563,6 +571,16 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7920 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@ -630,12 +648,24 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7925 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
@ -846,6 +876,7 @@ struct btusb_data {
int (*suspend)(struct hci_dev *hdev);
int (*resume)(struct hci_dev *hdev);
int (*disconnect)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
unsigned cmd_timeout_cnt;
@ -1061,7 +1092,7 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
/* Trigger dequeue immediatelly if an event is received */
/* Trigger dequeue immediately if an event is received */
schedule_delayed_work(&data->rx_work, 0);
}
@ -2616,13 +2647,14 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
}
set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
init_usb_anchor(&btmtk_data->isopkt_anchor);
}
static void btusb_mtk_release_iso_intf(struct btusb_data *data)
static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
if (btmtk_data->isopkt_intf) {
if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
@ -2636,6 +2668,16 @@ static void btusb_mtk_release_iso_intf(struct btusb_data *data)
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
static int btusb_mtk_disconnect(struct hci_dev *hdev)
{
/* This function describes the specific additional steps taken by MediaTek
* when Bluetooth usb driver's resume function is called.
*/
btusb_mtk_release_iso_intf(hdev);
return 0;
}
static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@ -2652,8 +2694,8 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
if (err < 0)
return err;
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
btusb_mtk_release_iso_intf(data);
/* Release MediaTek ISO data interface */
btusb_mtk_release_iso_intf(hdev);
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@ -2698,22 +2740,24 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
btmtk_data->reset_sync = btusb_mtk_reset;
/* Claim ISO data interface and endpoint */
btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
if (btmtk_data->isopkt_intf)
if (!test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
btusb_mtk_claim_iso_intf(data);
}
return btmtk_usb_setup(hdev);
}
static int btusb_mtk_shutdown(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
int ret;
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
btusb_mtk_release_iso_intf(data);
ret = btmtk_usb_shutdown(hdev);
return btmtk_usb_shutdown(hdev);
/* Release MediaTek iso interface after shutdown */
btusb_mtk_release_iso_intf(hdev);
return ret;
}
#ifdef CONFIG_PM
@ -3825,6 +3869,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = btmtk_usb_recv_acl;
data->suspend = btmtk_usb_suspend;
data->resume = btmtk_usb_resume;
data->disconnect = btusb_mtk_disconnect;
}
if (id->driver_info & BTUSB_SWAVE) {
@ -3896,6 +3941,8 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
}
if (!reset)
@ -4013,6 +4060,9 @@ static void btusb_disconnect(struct usb_interface *intf)
if (data->diag)
usb_set_intfdata(data->diag, NULL);
if (data->disconnect)
data->disconnect(hdev);
hci_unregister_dev(hdev);
if (intf == data->intf) {

View File

@ -1068,17 +1068,17 @@ static struct clk *bcm_get_txco(struct device *dev)
struct clk *clk;
/* New explicit name */
clk = devm_clk_get(dev, "txco");
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
clk = devm_clk_get_optional(dev, "txco");
if (clk)
return clk;
/* Deprecated name */
clk = devm_clk_get(dev, "extclk");
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
clk = devm_clk_get_optional(dev, "extclk");
if (clk)
return clk;
/* Original code used no name at all */
return devm_clk_get(dev, NULL);
return devm_clk_get_optional(dev, NULL);
}
static int bcm_get_resources(struct bcm_device *dev)
@ -1093,21 +1093,12 @@ static int bcm_get_resources(struct bcm_device *dev)
return 0;
dev->txco_clk = bcm_get_txco(dev->dev);
/* Handle deferred probing */
if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER))
if (IS_ERR(dev->txco_clk))
return PTR_ERR(dev->txco_clk);
/* Ignore all other errors as before */
if (IS_ERR(dev->txco_clk))
dev->txco_clk = NULL;
dev->lpo_clk = devm_clk_get(dev->dev, "lpo");
if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER))
return PTR_ERR(dev->lpo_clk);
dev->lpo_clk = devm_clk_get_optional(dev->dev, "lpo");
if (IS_ERR(dev->lpo_clk))
dev->lpo_clk = NULL;
return PTR_ERR(dev->lpo_clk);
/* Check if we accidentally fetched the lpo clock twice */
if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) {

View File

@ -594,7 +594,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is
* available.
*
* Arguments: tty pointer to tty isntance data
* Arguments: tty pointer to tty instance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes

View File

@ -305,7 +305,7 @@ static void ll_device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
/* Enqueue frame for transmittion (padding, crc, etc) */
/* Enqueue frame for transmission (padding, crc, etc) */
/* may be called from two simultaneous tasklets */
static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{

View File

@ -501,7 +501,7 @@ static int nokia_close(struct hci_uart *hu)
return 0;
}
/* Enqueue frame for transmittion (padding, crc, etc) */
/* Enqueue frame for transmission (padding, crc, etc) */
static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct nokia_bt_dev *btdev = hu->priv;

View File

@ -873,7 +873,7 @@ static void device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
/* Enqueue frame for transmittion (padding, crc, etc) may be called from
/* Enqueue frame for transmission (padding, crc, etc) may be called from
* two simultaneous tasklets.
*/
static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
@ -1059,7 +1059,7 @@ static void qca_controller_memdump(struct work_struct *work)
if (!seq_no) {
/* This is the first frame of memdump packet from
* the controller, Disable IBS to recevie dump
* the controller, Disable IBS to receive dump
* with out any interruption, ideally time required for
* the controller to send the dump is 8 seconds. let us
* start timer to handle this asynchronous activity.
@ -2294,13 +2294,6 @@ static int qca_init_regulators(struct qca_power *qca,
return 0;
}
static void qca_clk_disable_unprepare(void *data)
{
struct clk *clk = data;
clk_disable_unprepare(clk);
}
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
@ -2358,7 +2351,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
* Backward compatibility with old DT sources. If the
* node doesn't have the 'enable-gpios' property then
* let's use the power sequencer. Otherwise, let's
* drive everything outselves.
* drive everything ourselves.
*/
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
@ -2433,25 +2426,12 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->bt_en)
power_ctrl_enabled = false;
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
qcadev->susclk = devm_clk_get_optional_enabled_with_rate(
&serdev->dev, NULL, SUSCLK_RATE_32KHZ);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
if (err)
return err;
err = clk_prepare_enable(qcadev->susclk);
if (err)
return err;
err = devm_add_action_or_reset(&serdev->dev,
qca_clk_disable_unprepare,
qcadev->susclk);
if (err)
return err;
}
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
@ -2530,7 +2510,7 @@ static void qca_serdev_shutdown(struct device *dev)
hci_dev_test_flag(hdev, HCI_SETUP))
return;
/* The serdev must be in open state when conrol logic arrives
/* The serdev must be in open state when control logic arrives
* here, so also fix the use-after-free issue caused by that
* the serdev is flushed or wrote after it is closed.
*/

View File

@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright 2023 NXP
Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@ -29,6 +29,7 @@
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
#define HCI_MAX_ISO_SIZE 251
#define HCI_MAX_ISO_BIS 31
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
@ -67,6 +68,7 @@
#define HCI_I2C 8
#define HCI_SMD 9
#define HCI_VIRTIO 10
#define HCI_IPC 11
/* HCI device quirks */
enum {
@ -300,6 +302,20 @@ enum {
*/
HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
/*
* When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is
* disabled. This is required for the Actions Semiconductor ATS2851
* based controllers, which erroneously claims to support it.
*/
HCI_QUIRK_BROKEN_EXT_CREATE_CONN,
/*
* When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is
* skipped. This is required for the Actions Semiconductor ATS2851
* based controllers, due to a race condition in pairing process.
*/
HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
/* When this quirk is set, MSFT extension monitor tracking by
* address filter is supported. Since tracking quantity of each
* pattern is limited, this feature supports tracking multiple
@ -683,6 +699,7 @@ enum {
#define HCI_RSSI_INVALID 127
#define HCI_SYNC_HANDLE_INVALID 0xffff
#define HCI_SID_INVALID 0xff
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01

View File

@ -668,6 +668,7 @@ struct hci_conn {
__u8 adv_instance;
__u16 handle;
__u16 sync_handle;
__u8 sid;
__u16 state;
__u16 mtu;
__u8 mode;
@ -710,6 +711,9 @@ struct hci_conn {
__s8 tx_power;
__s8 max_tx_power;
struct bt_iso_qos iso_qos;
__u8 num_bis;
__u8 bis[HCI_MAX_ISO_BIS];
unsigned long flags;
enum conn_reasons conn_reason;
@ -945,8 +949,10 @@ enum {
HCI_CONN_PER_ADV,
HCI_CONN_BIG_CREATED,
HCI_CONN_CREATE_CIS,
HCI_CONN_CREATE_BIG_SYNC,
HCI_CONN_BIG_SYNC,
HCI_CONN_BIG_SYNC_FAILED,
HCI_CONN_CREATE_PA_SYNC,
HCI_CONN_PA_SYNC,
HCI_CONN_PA_SYNC_FAILED,
};
@ -1099,6 +1105,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
return NULL;
}
static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
__u8 sid,
bdaddr_t *dst,
__u8 dst_type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != ISO_LINK || bacmp(&c->dst, dst) ||
c->dst_type != dst_type || c->sid != sid)
continue;
rcu_read_unlock();
return c;
}
rcu_read_unlock();
return NULL;
}
static inline struct hci_conn *
hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
bdaddr_t *ba,
@ -1255,7 +1285,17 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
if (c->type != ISO_LINK)
continue;
/* An ISO_LINK hcon with BDADDR_ANY as destination
* address is a Broadcast connection. A Broadcast
* slave connection is associated with a PA train,
* so the sync_handle can be used to differentiate
* from unicast.
*/
if (bacmp(&c->dst, BDADDR_ANY) &&
c->sync_handle == HCI_SYNC_HANDLE_INVALID)
continue;
if (handle == c->iso_qos.bcast.big) {
@ -1269,6 +1309,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
return NULL;
}
static inline struct hci_conn *
hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
__u8 handle, __u8 num_bis)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != ISO_LINK)
continue;
if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
rcu_read_unlock();
return c;
}
}
rcu_read_unlock();
return NULL;
}
static inline struct hci_conn *
hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
{
@ -1328,6 +1392,13 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
if (c->type != ISO_LINK)
continue;
/* Ignore the listen hcon, we are looking
* for the child hcon that was created as
* a result of the PA sync established event.
*/
if (c->state == BT_LISTEN)
continue;
if (c->sync_handle == sync_handle) {
rcu_read_unlock();
return c;
@ -1445,6 +1516,8 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
int hci_le_create_cis_pending(struct hci_dev *hdev);
int hci_pa_create_sync_pending(struct hci_dev *hdev);
int hci_le_big_create_sync_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@ -1871,8 +1944,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
!test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
/* Use ext create connection if command is supported */
#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \
!test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks))
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
@ -1885,8 +1958,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
* C24: Mandatory if the LE Controller supports Connection State and either
* LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
*/
#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
ext_adv_capable(dev))
#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \
ext_adv_capable(dev)) && \
!test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \
&(dev)->quirks))
/* Periodic advertising support */
#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))

View File

@ -878,6 +878,16 @@ struct mgmt_cp_mesh_send_cancel {
} __packed;
#define MGMT_MESH_SEND_CANCEL_SIZE 1
#define MGMT_OP_HCI_CMD_SYNC 0x005B
struct mgmt_cp_hci_cmd_sync {
__le16 opcode;
__u8 event;
__u8 timeout;
__le16 params_len;
__u8 params[];
} __packed;
#define MGMT_HCI_CMD_SYNC_SIZE 6
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;

View File

@ -952,6 +952,7 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
conn->tx_power = HCI_TX_POWER_INVALID;
conn->max_tx_power = HCI_TX_POWER_INVALID;
conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
conn->sid = HCI_SID_INVALID;
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@ -1127,9 +1128,9 @@ void hci_conn_del(struct hci_conn *conn)
hci_conn_unlink(conn);
cancel_delayed_work_sync(&conn->disc_work);
cancel_delayed_work_sync(&conn->auto_accept_work);
cancel_delayed_work_sync(&conn->idle_work);
disable_delayed_work_sync(&conn->disc_work);
disable_delayed_work_sync(&conn->auto_accept_work);
disable_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
/* Unacked frames */
@ -2062,105 +2063,217 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
{
struct hci_cp_le_pa_create_sync *cp = data;
bt_dev_dbg(hdev, "");
if (err)
bt_dev_err(hdev, "Unable to create PA: %d", err);
}
kfree(cp);
static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
{
if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
return false;
return true;
}
static int create_pa_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_pa_create_sync *cp = data;
int err;
struct hci_cp_le_pa_create_sync cp = {0};
struct hci_conn *conn;
int err = 0;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
sizeof(*cp), cp, HCI_CMD_TIMEOUT);
if (err) {
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
return err;
hci_dev_lock(hdev);
rcu_read_lock();
/* The spec allows only one pending LE Periodic Advertising Create
* Sync command at a time. If the command is pending now, don't do
* anything. We check for pending connections after each PA Sync
* Established event.
*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
* page 2493:
*
* If the Host issues this command when another HCI_LE_Periodic_
* Advertising_Create_Sync command is pending, the Controller shall
* return the error code Command Disallowed (0x0C).
*/
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
goto unlock;
}
return hci_update_passive_scan_sync(hdev);
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (hci_conn_check_create_pa_sync(conn)) {
struct bt_iso_qos *qos = &conn->iso_qos;
cp.options = qos->bcast.options;
cp.sid = conn->sid;
cp.addr_type = conn->dst_type;
bacpy(&cp.addr, &conn->dst);
cp.skip = cpu_to_le16(qos->bcast.skip);
cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
cp.sync_cte_type = qos->bcast.sync_cte_type;
break;
}
}
unlock:
rcu_read_unlock();
hci_dev_unlock(hdev);
if (bacmp(&cp.addr, BDADDR_ANY)) {
hci_dev_set_flag(hdev, HCI_PA_SYNC);
set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
if (!err)
err = hci_update_passive_scan_sync(hdev);
if (err) {
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
}
}
return err;
}
int hci_pa_create_sync_pending(struct hci_dev *hdev)
{
/* Queue start pa_create_sync and scan */
return hci_cmd_sync_queue(hdev, create_pa_sync,
NULL, create_pa_complete);
}
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos)
{
struct hci_cp_le_pa_create_sync *cp;
struct hci_conn *conn;
int err;
if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
return ERR_PTR(-EBUSY);
conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
if (IS_ERR(conn))
return conn;
conn->iso_qos = *qos;
conn->dst_type = dst_type;
conn->sid = sid;
conn->state = BT_LISTEN;
hci_conn_hold(conn);
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp) {
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
hci_conn_drop(conn);
return ERR_PTR(-ENOMEM);
}
cp->options = qos->bcast.options;
cp->sid = sid;
cp->addr_type = dst_type;
bacpy(&cp->addr, dst);
cp->skip = cpu_to_le16(qos->bcast.skip);
cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
cp->sync_cte_type = qos->bcast.sync_cte_type;
/* Queue start pa_create_sync and scan */
err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
if (err < 0) {
hci_conn_drop(conn);
kfree(cp);
return ERR_PTR(err);
}
hci_pa_create_sync_pending(hdev);
return conn;
}
static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
{
if (!conn->num_bis)
return false;
return true;
}
static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
{
bt_dev_dbg(hdev, "");
if (err)
bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
}
static int big_create_sync(struct hci_dev *hdev, void *data)
{
DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
struct hci_conn *conn;
rcu_read_lock();
pdu->num_bis = 0;
/* The spec allows only one pending LE BIG Create Sync command at
* a time. If the command is pending now, don't do anything. We
* check for pending connections after each BIG Sync Established
* event.
*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
* page 2586:
*
* If the Host sends this command when the Controller is in the
* process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
* Established event has not been generated, the Controller shall
* return the error code Command Disallowed (0x0C).
*/
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
goto unlock;
}
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (hci_conn_check_create_big_sync(conn)) {
struct bt_iso_qos *qos = &conn->iso_qos;
set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
pdu->handle = qos->bcast.big;
pdu->sync_handle = cpu_to_le16(conn->sync_handle);
pdu->encryption = qos->bcast.encryption;
memcpy(pdu->bcode, qos->bcast.bcode,
sizeof(pdu->bcode));
pdu->mse = qos->bcast.mse;
pdu->timeout = cpu_to_le16(qos->bcast.timeout);
pdu->num_bis = conn->num_bis;
memcpy(pdu->bis, conn->bis, conn->num_bis);
break;
}
}
unlock:
rcu_read_unlock();
if (!pdu->num_bis)
return 0;
return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
struct_size(pdu, bis, pdu->num_bis), pdu);
}
int hci_le_big_create_sync_pending(struct hci_dev *hdev)
{
/* Queue big_create_sync */
return hci_cmd_sync_queue_once(hdev, big_create_sync,
NULL, big_create_sync_complete);
}
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[])
{
DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
int err;
if (num_bis < 0x01 || num_bis > pdu->num_bis)
if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
return -EINVAL;
err = qos_set_big(hdev, qos);
if (err)
return err;
if (hcon)
hcon->iso_qos.bcast.big = qos->bcast.big;
if (hcon) {
/* Update hcon QoS */
hcon->iso_qos = *qos;
pdu->handle = qos->bcast.big;
pdu->sync_handle = cpu_to_le16(sync_handle);
pdu->encryption = qos->bcast.encryption;
memcpy(pdu->bcode, qos->bcast.bcode, sizeof(pdu->bcode));
pdu->mse = qos->bcast.mse;
pdu->timeout = cpu_to_le16(qos->bcast.timeout);
pdu->num_bis = num_bis;
memcpy(pdu->bis, bis, num_bis);
hcon->num_bis = num_bis;
memcpy(hcon->bis, bis, num_bis);
}
return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
struct_size(pdu, bis, num_bis), pdu);
return hci_le_big_create_sync_pending(hdev);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
@ -2224,13 +2337,9 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
conn->iso_qos.bcast.big);
if (parent && parent != conn) {
link = hci_conn_link(parent, conn);
if (!link) {
hci_conn_drop(conn);
return ERR_PTR(-ENOLINK);
}
/* Link takes the refcount */
hci_conn_drop(conn);
if (!link)
return ERR_PTR(-ENOLINK);
}
return conn;
@ -2320,15 +2429,12 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
}
link = hci_conn_link(le, cis);
hci_conn_drop(cis);
if (!link) {
hci_conn_drop(le);
hci_conn_drop(cis);
return ERR_PTR(-ENOLINK);
}
/* Link takes the refcount */
hci_conn_drop(cis);
cis->state = BT_CONNECT;
hci_le_create_cis_pending(hdev);

View File

@ -3771,18 +3771,22 @@ static void hci_tx_work(struct work_struct *work)
/* ACL data packet */
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_acl_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
skb_pull(skb, HCI_ACL_HDR_SIZE);
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ACL packet too small");
goto drop;
}
handle = __le16_to_cpu(hdr->handle);
flags = hci_flags(handle);
handle = hci_handle(handle);
BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
handle, flags);
bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
handle, flags);
hdev->stat.acl_rx++;
@ -3801,24 +3805,29 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle);
}
drop:
kfree_skb(skb);
}
/* SCO data packet */
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_sco_hdr *hdr;
struct hci_conn *conn;
__u16 handle, flags;
skb_pull(skb, HCI_SCO_HDR_SIZE);
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "SCO packet too small");
goto drop;
}
handle = __le16_to_cpu(hdr->handle);
flags = hci_flags(handle);
handle = hci_handle(handle);
BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
handle, flags);
bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
handle, flags);
hdev->stat.sco_rx++;
@ -3836,6 +3845,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle);
}
drop:
kfree_skb(skb);
}

View File

@ -3626,6 +3626,13 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
* to avoid unexpected SMP command errors when pairing.
*/
if (test_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
&hdev->quirks))
goto notify;
/* Set the default Authenticated Payload Timeout after
* an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
* Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
@ -6345,7 +6352,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
struct hci_ev_le_pa_sync_established *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
struct hci_conn *pa_sync;
struct hci_conn *pa_sync, *conn;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@ -6353,6 +6360,20 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
ev->bdaddr_type);
if (!conn) {
bt_dev_err(hdev,
"Unable to find connection for dst %pMR sid 0x%2.2x",
&ev->bdaddr, ev->sid);
goto unlock;
}
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
conn->sync_handle = le16_to_cpu(ev->handle);
conn->sid = HCI_SID_INVALID;
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
if (!(mask & HCI_LM_ACCEPT)) {
hci_le_pa_term_sync(hdev, ev->handle);
@ -6379,6 +6400,9 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
}
unlock:
/* Handle any other pending PA sync command */
hci_pa_create_sync_pending(hdev);
hci_dev_unlock(hdev);
}
@ -6896,7 +6920,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_evt_le_big_sync_estabilished *ev = data;
struct hci_conn *bis;
struct hci_conn *bis, *conn;
int i;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@ -6907,6 +6931,20 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
ev->num_bis);
if (!conn) {
bt_dev_err(hdev,
"Unable to find connection for big 0x%2.2x",
ev->handle);
goto unlock;
}
clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
conn->num_bis = 0;
memset(conn->bis, 0, sizeof(conn->num_bis));
for (i = 0; i < ev->num_bis; i++) {
u16 handle = le16_to_cpu(ev->bis[i]);
__le32 interval;
@ -6927,6 +6965,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
/* Mark PA sync as established */
set_bit(HCI_CONN_PA_SYNC, &bis->flags);
bis->sync_handle = conn->sync_handle;
bis->iso_qos.bcast.big = ev->handle;
memset(&interval, 0, sizeof(interval));
memcpy(&interval, ev->latency, sizeof(ev->latency));
@ -6956,6 +6995,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
hci_connect_cfm(bis, ev->status);
}
unlock:
/* Handle any other pending BIG sync command */
hci_le_big_create_sync_pending(hdev);
hci_dev_unlock(hdev);
}

View File

@ -4842,6 +4842,13 @@ static const struct {
HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
"HCI LE Set Random Private Address Timeout command is "
"advertised, but not supported."),
HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
"HCI LE Extended Create Connection command is "
"advertised, but not supported."),
HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
"HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
"to unexpected SMP errors when pairing "
"and will not be used."),
HCI_QUIRK_BROKEN(LE_CODED,
"HCI LE Coded PHY feature bit is set, "
"but its usage is not supported.")
@ -6477,7 +6484,7 @@ static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
&own_addr_type);
if (err)
goto done;
/* Send command LE Extended Create Connection if supported */
if (use_ext_conn(hdev)) {
err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
goto done;

View File

@ -21,16 +21,6 @@ static const struct device_type bt_link = {
.release = bt_link_release,
};
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
* so we should move the device before conn device is destroyed.
*/
static int __match_tty(struct device *dev, void *data)
{
return !strncmp(dev_name(dev), "rfcomm", 6);
}
void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@ -73,10 +63,13 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
return;
}
/* If there are devices using the connection as parent reset it to NULL
* before unregistering the device.
*/
while (1) {
struct device *dev;
dev = device_find_child(&conn->dev, NULL, __match_tty);
dev = device_find_any_child(&conn->dev);
if (!dev)
break;
device_move(dev, NULL, DPM_ORDER_DEV_LAST);

View File

@ -35,6 +35,7 @@ struct iso_conn {
struct sk_buff *rx_skb;
__u32 rx_len;
__u16 tx_sn;
struct kref ref;
};
#define iso_conn_lock(c) spin_lock(&(c)->lock)
@ -93,6 +94,49 @@ static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
#define ISO_CONN_TIMEOUT (HZ * 40)
#define ISO_DISCONN_TIMEOUT (HZ * 2)
static void iso_conn_free(struct kref *ref)
{
struct iso_conn *conn = container_of(ref, struct iso_conn, ref);
BT_DBG("conn %p", conn);
if (conn->sk)
iso_pi(conn->sk)->conn = NULL;
if (conn->hcon) {
conn->hcon->iso_data = NULL;
hci_conn_drop(conn->hcon);
}
/* Ensure no more work items will run since hci_conn has been dropped */
disable_delayed_work_sync(&conn->timeout_work);
kfree(conn);
}
static void iso_conn_put(struct iso_conn *conn)
{
if (!conn)
return;
BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref));
kref_put(&conn->ref, iso_conn_free);
}
static struct iso_conn *iso_conn_hold_unless_zero(struct iso_conn *conn)
{
if (!conn)
return NULL;
BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref));
if (!kref_get_unless_zero(&conn->ref))
return NULL;
return conn;
}
static struct sock *iso_sock_hold(struct iso_conn *conn)
{
if (!conn || !bt_sock_linked(&iso_sk_list, conn->sk))
@ -109,9 +153,14 @@ static void iso_sock_timeout(struct work_struct *work)
timeout_work.work);
struct sock *sk;
conn = iso_conn_hold_unless_zero(conn);
if (!conn)
return;
iso_conn_lock(conn);
sk = iso_sock_hold(conn);
iso_conn_unlock(conn);
iso_conn_put(conn);
if (!sk)
return;
@ -149,9 +198,14 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
{
struct iso_conn *conn = hcon->iso_data;
conn = iso_conn_hold_unless_zero(conn);
if (conn) {
if (!conn->hcon)
if (!conn->hcon) {
iso_conn_lock(conn);
conn->hcon = hcon;
iso_conn_unlock(conn);
}
iso_conn_put(conn);
return conn;
}
@ -159,6 +213,7 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
if (!conn)
return NULL;
kref_init(&conn->ref);
spin_lock_init(&conn->lock);
INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout);
@ -178,17 +233,15 @@ static void iso_chan_del(struct sock *sk, int err)
struct sock *parent;
conn = iso_pi(sk)->conn;
iso_pi(sk)->conn = NULL;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
iso_conn_lock(conn);
conn->sk = NULL;
iso_pi(sk)->conn = NULL;
iso_conn_unlock(conn);
if (conn->hcon)
hci_conn_drop(conn->hcon);
iso_conn_put(conn);
}
sk->sk_state = BT_CLOSED;
@ -210,6 +263,7 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
struct iso_conn *conn = hcon->iso_data;
struct sock *sk;
conn = iso_conn_hold_unless_zero(conn);
if (!conn)
return;
@ -219,20 +273,18 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
iso_conn_lock(conn);
sk = iso_sock_hold(conn);
iso_conn_unlock(conn);
iso_conn_put(conn);
if (sk) {
lock_sock(sk);
iso_sock_clear_timer(sk);
iso_chan_del(sk, err);
release_sock(sk);
sock_put(sk);
if (!sk) {
iso_conn_put(conn);
return;
}
/* Ensure no more work items will run before freeing conn. */
cancel_delayed_work_sync(&conn->timeout_work);
hcon->iso_data = NULL;
kfree(conn);
lock_sock(sk);
iso_sock_clear_timer(sk);
iso_chan_del(sk, err);
release_sock(sk);
sock_put(sk);
}
static int __iso_chan_add(struct iso_conn *conn, struct sock *sk,
@ -652,6 +704,8 @@ static void iso_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
iso_conn_put(iso_pi(sk)->conn);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
@ -711,6 +765,7 @@ static void iso_sock_disconn(struct sock *sk)
*/
if (bis_sk) {
hcon->state = BT_OPEN;
hcon->iso_data = NULL;
iso_pi(sk)->conn->hcon = NULL;
iso_sock_clear_timer(sk);
iso_chan_del(sk, bt_to_errno(hcon->abort_reason));
@ -720,7 +775,6 @@ static void iso_sock_disconn(struct sock *sk)
}
sk->sk_state = BT_DISCONN;
iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT);
iso_conn_lock(iso_pi(sk)->conn);
hci_conn_drop(iso_pi(sk)->conn->hcon);
iso_pi(sk)->conn->hcon = NULL;
@ -1338,6 +1392,13 @@ static void iso_conn_big_sync(struct sock *sk)
if (!hdev)
return;
/* hci_le_big_create_sync requires hdev lock to be held, since
* it enqueues the HCI LE BIG Create Sync command via
* hci_cmd_sync_queue_once, which checks hdev flags that might
* change.
*/
hci_dev_lock(hdev);
if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
&iso_pi(sk)->qos,
@ -1348,6 +1409,8 @@ static void iso_conn_big_sync(struct sock *sk)
bt_dev_err(hdev, "hci_le_big_create_sync: %d",
err);
}
hci_dev_unlock(hdev);
}
static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
@ -1733,6 +1796,13 @@ static bool iso_match_big(struct sock *sk, void *data)
return ev->handle == iso_pi(sk)->qos.bcast.big;
}
static bool iso_match_big_hcon(struct sock *sk, void *data)
{
struct hci_conn *hcon = data;
return hcon->iso_qos.bcast.big == iso_pi(sk)->qos.bcast.big;
}
static bool iso_match_pa_sync_flag(struct sock *sk, void *data)
{
return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
@ -1756,8 +1826,16 @@ static void iso_conn_ready(struct iso_conn *conn)
if (!hcon)
return;
if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags) ||
test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags)) {
/* A BIS slave hcon is notified to the ISO layer
* after the Command Complete for the LE Setup
* ISO Data Path command is received. Get the
* parent socket that matches the hcon BIG handle.
*/
parent = iso_get_sock(&hcon->src, &hcon->dst,
BT_LISTEN, iso_match_big_hcon,
hcon);
} else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
ev = hci_recv_event_data(hcon->hdev,
HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
@ -1824,7 +1902,6 @@ static void iso_conn_ready(struct iso_conn *conn)
if (!bacmp(&hcon->dst, BDADDR_ANY)) {
bacpy(&hcon->dst, &iso_pi(parent)->dst);
hcon->dst_type = iso_pi(parent)->dst_type;
hcon->sync_handle = iso_pi(parent)->sync_handle;
}
if (ev3) {
@ -1942,6 +2019,7 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (sk) {
int err;
struct hci_conn *hcon = iso_pi(sk)->conn->hcon;
iso_pi(sk)->qos.bcast.encryption = ev2->encryption;
@ -1950,7 +2028,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
err = hci_le_big_create_sync(hdev, NULL,
err = hci_le_big_create_sync(hdev,
hcon,
&iso_pi(sk)->qos,
iso_pi(sk)->sync_handle,
iso_pi(sk)->bc_num_bis,

View File

@ -132,6 +132,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_MESH_READ_FEATURES,
MGMT_OP_MESH_SEND,
MGMT_OP_MESH_SEND_CANCEL,
MGMT_OP_HCI_CMD_SYNC,
};
static const u16 mgmt_events[] = {
@ -2515,6 +2516,64 @@ unlock:
return err;
}
static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
struct sk_buff *skb;
skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
le16_to_cpu(cp->params_len), cp->params,
cp->event, cp->timeout ?
msecs_to_jiffies(cp->timeout * 1000) :
HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
mgmt_status(PTR_ERR(skb)));
goto done;
}
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
skb->data, skb->len);
kfree_skb(skb);
done:
mgmt_pending_free(cmd);
return 0;
}
static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_hci_cmd_sync *cp = data;
struct mgmt_pending_cmd *cmd;
int err;
if (len < sizeof(*cp))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
if (!cmd)
err = -ENOMEM;
else
err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
if (err < 0) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
MGMT_STATUS_FAILED);
if (cmd)
mgmt_pending_free(cmd);
}
hci_dev_unlock(hdev);
return err;
}
/* This is a helper function to test for pending mgmt commands that can
* cause CoD or EIR HCI commands. We can only allow one such pending
* mgmt command at a time since otherwise we cannot easily track what
@ -9371,6 +9430,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ mesh_send, MGMT_MESH_SEND_SIZE,
HCI_MGMT_VAR_LEN },
{ mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
{ mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
};
void mgmt_index_added(struct hci_dev *hdev)

View File

@ -729,7 +729,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
struct sock *l2cap_sk;
struct l2cap_conn *conn;
struct rfcomm_conninfo cinfo;
int len, err = 0;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@ -783,7 +784,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
@ -802,7 +803,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
{
struct sock *sk = sock->sk;
struct bt_security sec;
int len, err = 0;
int err = 0;
size_t len;
BT_DBG("sk %p", sk);
@ -827,7 +829,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
len = min(len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;

View File

@ -51,6 +51,7 @@ struct sco_conn {
struct delayed_work timeout_work;
unsigned int mtu;
struct kref ref;
};
#define sco_conn_lock(c) spin_lock(&c->lock)
@ -76,6 +77,49 @@ struct sco_pinfo {
#define SCO_CONN_TIMEOUT (HZ * 40)
#define SCO_DISCONN_TIMEOUT (HZ * 2)
static void sco_conn_free(struct kref *ref)
{
struct sco_conn *conn = container_of(ref, struct sco_conn, ref);
BT_DBG("conn %p", conn);
if (conn->sk)
sco_pi(conn->sk)->conn = NULL;
if (conn->hcon) {
conn->hcon->sco_data = NULL;
hci_conn_drop(conn->hcon);
}
/* Ensure no more work items will run since hci_conn has been dropped */
disable_delayed_work_sync(&conn->timeout_work);
kfree(conn);
}
static void sco_conn_put(struct sco_conn *conn)
{
if (!conn)
return;
BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref));
kref_put(&conn->ref, sco_conn_free);
}
static struct sco_conn *sco_conn_hold_unless_zero(struct sco_conn *conn)
{
if (!conn)
return NULL;
BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref));
if (!kref_get_unless_zero(&conn->ref))
return NULL;
return conn;
}
static struct sock *sco_sock_hold(struct sco_conn *conn)
{
if (!conn || !bt_sock_linked(&sco_sk_list, conn->sk))
@ -92,6 +136,10 @@ static void sco_sock_timeout(struct work_struct *work)
timeout_work.work);
struct sock *sk;
conn = sco_conn_hold_unless_zero(conn);
if (!conn)
return;
sco_conn_lock(conn);
if (!conn->hcon) {
sco_conn_unlock(conn);
@ -99,6 +147,7 @@ static void sco_sock_timeout(struct work_struct *work)
}
sk = sco_sock_hold(conn);
sco_conn_unlock(conn);
sco_conn_put(conn);
if (!sk)
return;
@ -136,9 +185,14 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct sco_conn *conn = hcon->sco_data;
conn = sco_conn_hold_unless_zero(conn);
if (conn) {
if (!conn->hcon)
if (!conn->hcon) {
sco_conn_lock(conn);
conn->hcon = hcon;
sco_conn_unlock(conn);
}
sco_conn_put(conn);
return conn;
}
@ -146,6 +200,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
if (!conn)
return NULL;
kref_init(&conn->ref);
spin_lock_init(&conn->lock);
INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
@ -170,17 +225,15 @@ static void sco_chan_del(struct sock *sk, int err)
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
sco_pi(sk)->conn = NULL;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
if (conn->hcon)
hci_conn_drop(conn->hcon);
sco_conn_put(conn);
}
sk->sk_state = BT_CLOSED;
@ -195,29 +248,28 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
conn = sco_conn_hold_unless_zero(conn);
if (!conn)
return;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sco_conn_lock(conn);
sk = sco_sock_hold(conn);
sco_conn_unlock(conn);
sco_conn_put(conn);
if (sk) {
lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
release_sock(sk);
sock_put(sk);
if (!sk) {
sco_conn_put(conn);
return;
}
/* Ensure no more work items will run before freeing conn. */
cancel_delayed_work_sync(&conn->timeout_work);
hcon->sco_data = NULL;
kfree(conn);
/* Kill socket */
lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
release_sock(sk);
sock_put(sk);
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
@ -401,6 +453,8 @@ static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
sco_conn_put(sco_pi(sk)->conn);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
@ -448,17 +502,6 @@ static void __sco_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
sco_conn_lock(sco_pi(sk)->conn);
hci_conn_drop(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
sco_conn_unlock(sco_pi(sk)->conn);
} else
sco_chan_del(sk, ECONNRESET);
break;
case BT_CONNECT2:
case BT_CONNECT:
case BT_DISCONN: