static int hci_uart_setup(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_rp_read_local_version *ver; struct sk_buff *skb; unsigned int speed; int err; /* Init speed if any */ if (hu->init_speed) speed = hu->init_speed; else if (hu->proto->init_speed) speed = hu->proto->init_speed; else speed = 0; if (speed) serdev_device_set_baudrate(hu->serdev, speed); /* Operational speed if any */ if (hu->oper_speed) speed = hu->oper_speed; else if (hu->proto->oper_speed) speed = hu->proto->oper_speed; else speed = 0; if (hu->proto->set_baudrate && speed) { err = hu->proto->set_baudrate(hu, speed); if (err) bt_dev_err(hdev, "Failed to set baudrate"); else serdev_device_set_baudrate(hu->serdev, speed); } if (hu->proto->setup) return hu->proto->setup(hu); if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags)) return 0; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading local version info failed (%ld)", PTR_ERR(skb)); return 0; } if (skb->len != sizeof(*ver)) bt_dev_err(hdev, "Event length mismatch for version info"); kfree_skb(skb); return 0; }
static int qca_wcn3990_init(struct hci_uart *hu) { struct qca_serdev *qcadev; int ret; /* Check for vregs status, may be hci down has turned * off the voltage regulator. */ qcadev = serdev_device_get_drvdata(hu->serdev); if (!qcadev->bt_power->vregs_on) { serdev_device_close(hu->serdev); ret = qca_power_setup(hu, true); if (ret) return ret; ret = serdev_device_open(hu->serdev); if (ret) { bt_dev_err(hu->hdev, "failed to open port"); return ret; } } /* Forcefully enable wcn3990 to enter in to boot mode. */ host_set_baudrate(hu, 2400); ret = qca_send_power_pulse(hu, false); if (ret) return ret; qca_set_speed(hu, QCA_INIT_SPEED); ret = qca_send_power_pulse(hu, true); if (ret) return ret; /* Now the device is in ready state to communicate with host. * To sync host with device we need to reopen port. * Without this, we will have RTS and CTS synchronization * issues. */ serdev_device_close(hu->serdev); ret = serdev_device_open(hu->serdev); if (ret) { bt_dev_err(hu->hdev, "failed to open port"); return ret; } hci_uart_set_flow_control(hu, false); return 0; }
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) { const struct firmware *fw; struct sk_buff *skb; const u8 *fw_ptr; int err; err = request_firmware(&fw, ddc_name, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)", ddc_name, err); return err; } bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name); fw_ptr = fw->data; /* DDC file contains one or more DDC structure which has * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2). */ while (fw->size > fw_ptr - fw->data) { u8 cmd_plen = fw_ptr[0] + sizeof(u8); skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)", PTR_ERR(skb)); release_firmware(fw); return PTR_ERR(skb); } fw_ptr += cmd_plen; kfree_skb(skb); } release_firmware(fw); bt_dev_info(hdev, "Applying Intel DDC parameters completed"); return 0; }
static int regmap_ibt_gather_write(void *context, const void *addr, size_t reg_size, const void *val, size_t val_size) { struct regmap_ibt_context *ctx = context; struct ibt_cp_reg_access *cp; struct sk_buff *skb; int plen = sizeof(*cp) + val_size; u8 mode; int err = 0; if (reg_size != sizeof(__le32)) return -EINVAL; switch (val_size) { case 1: mode = IBT_REG_MODE_8BIT; break; case 2: mode = IBT_REG_MODE_16BIT; break; case 4: mode = IBT_REG_MODE_32BIT; break; default: return -EINVAL; } cp = kmalloc(plen, GFP_KERNEL); if (!cp) return -ENOMEM; /* regmap provides a little-endian formatted addr/value */ cp->addr = *(__le32 *)addr; cp->mode = mode; cp->len = val_size; memcpy(&cp->data, val, val_size); bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr)); skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)", le32_to_cpu(cp->addr), err); goto done; } kfree_skb(skb); done: kfree(cp); return err; }
int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel version information failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*ver)) { bt_dev_err(hdev, "Intel version event size mismatch"); kfree_skb(skb); return -EILSEQ; } memcpy(ver, skb->data, sizeof(*ver)); kfree_skb(skb); return 0; }
int btintel_enter_mfg(struct hci_dev *hdev) { const u8 param[] = { 0x01, 0x00 }; struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); return 0; }
static int ath_recv(struct hci_uart *hu, const void *data, int count) { struct ath_struct *ath = hu->priv; ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); if (IS_ERR(ath->rx_skb)) { int err = PTR_ERR(ath->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); ath->rx_skb = NULL; return err; } return count; }
/* Recv data */ static int h4_recv(struct hci_uart *hu, const void *data, int count) { struct h4_struct *h4 = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count, h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts)); if (IS_ERR(h4->rx_skb)) { int err = PTR_ERR(h4->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); h4->rx_skb = NULL; return err; } return count; }
static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; struct qca_serdev *qcadev; struct sk_buff *skb; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; if (baudrate > QCA_BAUDRATE_3200000) return -EINVAL; cmd[4] = baudrate; skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "Failed to allocate baudrate packet"); return -ENOMEM; } /* Assign commands to change baudrate and packet type. */ skb_put_data(skb, cmd, sizeof(cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; skb_queue_tail(&qca->txq, skb); hci_uart_tx_wakeup(hu); qcadev = serdev_device_get_drvdata(hu->serdev); /* Wait for the baudrate change request to be sent */ while (!skb_queue_empty(&qca->txq)) usleep_range(100, 200); serdev_device_wait_until_sent(hu->serdev, msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); /* Give the controller time to process the request */ if (qcadev->btsoc_type == QCA_WCN3990) msleep(10); else msleep(300); return 0; }
int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) { u8 param[] = { 0x00, 0x00 }; struct sk_buff *skb; /* The 2nd command parameter specifies the manufacturing exit method: * 0x00: Just disable the manufacturing mode (0x00). * 0x01: Disable manufacturing mode and reset with patches deactivated. * 0x02: Disable manufacturing mode and reset with patches activated. */ if (reset) param[1] |= patched ? 0x02 : 0x01; skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); return 0; }
static int qca_send_power_pulse(struct hci_uart *hu, bool on) { int ret; int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE; /* These power pulses are single byte command which are sent * at required baudrate to wcn3990. On wcn3990, we have an external * circuit at Tx pin which decodes the pulse sent at specific baudrate. * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT * and also we use the same power inputs to turn on and off for * Wi-Fi/BT. Powering up the power sources will not enable BT, until * we send a power on pulse at 115200 bps. This algorithm will help to * save power. Disabling hardware flow control is mandatory while * sending power pulses to SoC. */ bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd); serdev_device_write_flush(hu->serdev); hci_uart_set_flow_control(hu, true); ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); if (ret < 0) { bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd); return ret; } serdev_device_wait_until_sent(hu->serdev, timeout); hci_uart_set_flow_control(hu, false); /* Give to controller time to boot/shutdown */ if (on) msleep(100); else msleep(10); return 0; }
static int regmap_ibt_read(void *context, const void *addr, size_t reg_size, void *val, size_t val_size) { struct regmap_ibt_context *ctx = context; struct ibt_cp_reg_access cp; struct ibt_rp_reg_access *rp; struct sk_buff *skb; int err = 0; if (reg_size != sizeof(__le32)) return -EINVAL; switch (val_size) { case 1: cp.mode = IBT_REG_MODE_8BIT; break; case 2: cp.mode = IBT_REG_MODE_16BIT; break; case 4: cp.mode = IBT_REG_MODE_32BIT; break; default: return -EINVAL; } /* regmap provides a little-endian formatted addr */ cp.addr = *(__le32 *)addr; cp.len = val_size; bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr)); skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)", le32_to_cpu(cp.addr), err); return err; } if (skb->len != sizeof(*rp) + val_size) { bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len", le32_to_cpu(cp.addr)); err = -EINVAL; goto done; } rp = (struct ibt_rp_reg_access *)skb->data; if (rp->addr != cp.addr) { bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr", le32_to_cpu(rp->addr)); err = -EINVAL; goto done; } memcpy(val, rp->data, val_size); done: kfree_skb(skb); return err; }