/* * Reset a device at different levels (warm, cold or bus) * * @i2400m: device descriptor * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS) * * Warm and cold resets get a USB reset if they fail. * * Warm reset: * * The device will be fully reset internally, but won't be * disconnected from the USB bus (so no reenumeration will * happen). Firmware upload will be neccessary. * * The device will send a reboot barker in the notification endpoint * that will trigger the driver to reinitialize the state * automatically from notif.c:i2400m_notification_grok() into * i2400m_dev_bootstrap_delayed(). * * Cold and bus (USB) reset: * * The device will be fully reset internally, disconnected from the * USB bus an a reenumeration will happen. Firmware upload will be * neccessary. Thus, we don't do any locking or struct * reinitialization, as we are going to be fully disconnected and * reenumerated. * * Note we need to return -ENODEV if a warm reset was requested and we * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset() * and wimax_dev->op_reset. * * WARNING: no driver state saved/fixed */ static int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) { int result; struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); struct device *dev = i2400m_dev(i2400m); static const __le32 i2400m_WARM_BOOT_BARKER[4] = { cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), }; static const __le32 i2400m_COLD_BOOT_BARKER[4] = { cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), }; d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); if (rt == I2400M_RT_WARM) result = __i2400mu_send_barker(i2400mu, i2400m_WARM_BOOT_BARKER, sizeof(i2400m_WARM_BOOT_BARKER), I2400MU_EP_BULK_OUT); else if (rt == I2400M_RT_COLD) result = __i2400mu_send_barker(i2400mu, i2400m_COLD_BOOT_BARKER, sizeof(i2400m_COLD_BOOT_BARKER), I2400MU_EP_RESET_COLD); else if (rt == I2400M_RT_BUS) { do_bus_reset: result = usb_reset_device(i2400mu->usb_dev); switch (result) { case 0: case -EINVAL: /* device is gone */ case -ENODEV: case -ENOENT: case -ESHUTDOWN: result = rt == I2400M_RT_WARM ? -ENODEV : 0; break; /* We assume the device is disconnected */ default: dev_err(dev, "USB reset failed (%d), giving up!\n", result); } } else { result = -EINVAL; /* shut gcc up in certain arches */ BUG(); } if (result < 0 && result != -EINVAL /* device is gone */ && rt != I2400M_RT_BUS) { dev_err(dev, "%s reset failed (%d); trying USB reset\n", rt == I2400M_RT_WARM ? "warm" : "cold", result); rt = I2400M_RT_BUS; goto do_bus_reset; } d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result); return result; }
/* * Run consistency tests on the firmware file and load up headers * * Check for the firmware being made for the i2400m device, * etc...These checks are mostly informative, as the device will make * them too; but the driver's response is more informative on what * went wrong. * * This will also look at all the headers present on the firmware * file, and update i2400m->fw_hdrs to point to them. */ static int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size) { int result; struct device *dev = i2400m_dev(i2400m); size_t headers = 0; const struct i2400m_bcf_hdr *bcf_hdr; const void *itr, *next, *top; size_t slots = 0, used_slots = 0; for (itr = bcf, top = itr + bcf_size; itr < top; headers++, itr = next) { size_t leftover, offset, header_len, size; leftover = top - itr; offset = itr - (const void *) bcf; if (leftover <= sizeof(*bcf_hdr)) { dev_err(dev, "firmware %s: %zu B left at @%zx, " "not enough for BCF header\n", i2400m->fw_name, leftover, offset); break; } bcf_hdr = itr; /* Only the first header is supposed to be followed by * payload */ header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); size = sizeof(u32) * le32_to_cpu(bcf_hdr->size); if (headers == 0) next = itr + size; else next = itr + header_len; result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset); if (result < 0) continue; if (used_slots + 1 >= slots) { /* +1 -> we need to account for the one we'll * occupy and at least an extra one for * always being NULL */ result = i2400m_zrealloc_2x( (void **) &i2400m->fw_hdrs, &slots, sizeof(i2400m->fw_hdrs[0]), GFP_KERNEL); if (result < 0) goto error_zrealloc; } i2400m->fw_hdrs[used_slots] = bcf_hdr; used_slots++; } if (headers == 0) { dev_err(dev, "firmware %s: no usable headers found\n", i2400m->fw_name); result = -EBADF; } else result = 0; error_zrealloc: return result; }
/* * Run consistency tests on the firmware file and load up headers * * Check for the firmware being made for the i2400m device, * etc...These checks are mostly informative, as the device will make * them too; but the driver's response is more informative on what * went wrong. * * This will also look at all the headers present on the firmware * file, and update i2400m->fw_bcf_hdr to point to them. */ static int i2400m_fw_hdr_check(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf_hdr, size_t index, size_t offset) { struct device *dev = i2400m_dev(i2400m); unsigned module_type, header_len, major_version, minor_version, module_id, module_vendor, date, size; module_type = le32_to_cpu(bcf_hdr->module_type); header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000) >> 16; minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff; module_id = le32_to_cpu(bcf_hdr->module_id); module_vendor = le32_to_cpu(bcf_hdr->module_vendor); date = le32_to_cpu(bcf_hdr->date); size = sizeof(u32) * le32_to_cpu(bcf_hdr->size); d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header " "type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n", i2400m->fw_name, index, offset, module_type, module_vendor, module_id, major_version, minor_version, header_len, size, date); /* Hard errors */ if (major_version != 1) { dev_err(dev, "firmware %s #%zd@%08zx: major header version " "v%u.%u not supported\n", i2400m->fw_name, index, offset, major_version, minor_version); return -EBADF; } if (module_type != 6) { /* built for the right hardware? */ dev_err(dev, "firmware %s #%zd@%08zx: unexpected module " "type 0x%x; aborting\n", i2400m->fw_name, index, offset, module_type); return -EBADF; } if (module_vendor != 0x8086) { dev_err(dev, "firmware %s #%zd@%08zx: unexpected module " "vendor 0x%x; aborting\n", i2400m->fw_name, index, offset, module_vendor); return -EBADF; } if (date < 0x20080300) dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x " "too old; unsupported\n", i2400m->fw_name, index, offset, date); return 0; }
/* * Wake up the device and transmit a held SKB, then restart the net queue * * When the device goes into basestation-idle mode, we need to tell it * to exit that mode; it will negotiate with the base station, user * space may have to intervene to rehandshake crypto and then tell us * when it is ready to transmit the packet we have "queued". Still we * need to give it sometime after it reports being ok. * * On error, there is not much we can do. If the error was on TX, we * still wake the queue up to see if the next packet will be luckier. * * If _cmd_exit_idle() fails...well, it could be many things; most * commonly it is that something else took the device out of IDLE mode * (for example, the base station). In that case we get an -EILSEQ and * we are just going to ignore that one. If the device is back to * connected, then fine -- if it is someother state, the packet will * be dropped anyway. */ void i2400m_wake_tx_work(struct work_struct *ws) { int result; struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); struct net_device *net_dev = i2400m->wimax_dev.net_dev; struct device *dev = i2400m_dev(i2400m); struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&i2400m->tx_lock, flags); skb = i2400m->wake_tx_skb; i2400m->wake_tx_skb = NULL; spin_unlock_irqrestore(&i2400m->tx_lock, flags); d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb); result = -EINVAL; if (skb == NULL) { dev_err(dev, "WAKE&TX: skb disappeared!\n"); goto out_put; } /* If we have, somehow, lost the connection after this was * queued, don't do anything; this might be the device got * reset or just disconnected. */ if (unlikely(!netif_carrier_ok(net_dev))) goto out_kfree; result = i2400m_cmd_exit_idle(i2400m); if (result == -EILSEQ) result = 0; if (result < 0) { dev_err(dev, "WAKE&TX: device didn't get out of idle: " "%d - resetting\n", result); i2400m_reset(i2400m, I2400M_RT_BUS); goto error; } result = wait_event_timeout(i2400m->state_wq, i2400m->state != I2400M_SS_IDLE, net_dev->watchdog_timeo - HZ/2); if (result == 0) result = -ETIMEDOUT; if (result < 0) { dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " "%d - resetting\n", result); i2400m_reset(i2400m, I2400M_RT_BUS); goto error; } msleep(20); /* device still needs some time or it drops it */ result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); error: netif_wake_queue(net_dev); out_kfree: kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ out_put: i2400m_put(i2400m); d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n", ws, i2400m, skb, result); }
/** * i2400m_dev_shutdown - Shutdown a running device * * @i2400m: device descriptor * * Gracefully stops the device, moving it to the lowest power * consumption state possible. */ void i2400m_dev_shutdown(struct i2400m *i2400m) { int result = -ENODEV; struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); result = i2400m->bus_reset(i2400m, I2400M_RT_WARM); d_fnend(3, dev, "(i2400m %p) = void [%d]\n", i2400m, result); return; }
/** * i2400m_get_device_info - Query the device for detailed device information * * @i2400m: device descriptor * * Returns: an skb whose skb->data points to a 'struct * i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The * skb is *guaranteed* to contain the whole TLV data structure. * * On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error * code. */ struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; const struct i2400m_tlv_hdr *tlv; const struct i2400m_tlv_detailed_device_info *ddi; char strerr[32]; ack_skb = ERR_PTR(-ENOMEM); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'get device info' command: %ld\n", PTR_ERR(ack_skb)); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get device info' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result, strerr); goto error_cmd_failed; } tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi)); if (tlv == NULL) { dev_err(dev, "GET DEVICE INFO: " "detailed device info TLV not found (0x%04x)\n", I2400M_TLV_DETAILED_DEVICE_INFO); result = -EIO; goto error_no_tlv; } skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data); error_msg_to_dev: kfree(cmd); error_alloc: return ack_skb; error_no_tlv: error_cmd_failed: kfree_skb(ack_skb); kfree(cmd); return ERR_PTR(result); }
static int i2400m_stop(struct net_device *net_dev) { struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); i2400m_net_wake_stop(i2400m); d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m); return 0; }
/* * WiMAX stack operation: implement SW RFKill toggling * * @wimax_dev: device descriptor * @skb: skb where the message has been received; skb->data is * expected to point to the message payload. * @genl_info: passed by the generic netlink layer * * Generic Netlink will call this function when a message is sent from * userspace to change the software RF-Kill switch status. * <<<<<<< HEAD * This function will set the device's software RF-Kill switch state to ======= * This function will set the device's sofware RF-Kill switch state to >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a * match what is requested. * * NOTE: the i2400m has a strict state machine; we can only set the * RF-Kill switch when it is on, the HW RF-Kill is on and the * device is initialized. So we ignore errors steaming from not * being in the right state (-EILSEQ). */ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, enum wimax_rf_state state) { int result; struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_rf_operation sw_rf; <<<<<<< HEAD } __packed *cmd;
/** * i2400m_bm_cmd - Execute a boot mode command * * @cmd: buffer containing the command data (pointing at the header). * This data can be ANYWHERE (for USB, we will copy it to an * specific buffer). Make sure everything is in proper little * endian. * * A raw buffer can be also sent, just cast it and set flags to * I2400M_BM_CMD_RAW. * * This function will generate a checksum for you if the * checksum bit in the command is set (unless I2400M_BM_CMD_RAW * is set). * * You can use the i2400m->bm_cmd_buf to stage your commands and * send them. * * If NULL, no command is sent (we just wait for an ack). * * @cmd_size: size of the command. Will be auto padded to the * bus-specific drivers padding requirements. * * @ack: buffer where to place the acknowledgement. If it is a regular * command response, all fields will be returned with the right, * native endianess. * * You *cannot* use i2400m->bm_ack_buf for this buffer. * * @ack_size: size of @ack, 16 aligned; you need to provide at least * sizeof(*ack) bytes and then enough to contain the return data * from the command * * @flags: see I2400M_BM_CMD_* above. * * @returns: bytes received by the notification; if < 0, an errno code * denoting an error or: * * -ERESTARTSYS The device has rebooted * * Executes a boot-mode command and waits for a response, doing basic * validation on it; if a zero length response is received, it retries * waiting for a response until a non-zero one is received (timing out * after %I2400M_BOOT_RETRIES retries). */ static ssize_t i2400m_bm_cmd(struct i2400m *i2400m, const struct i2400m_bootrom_header *cmd, size_t cmd_size, struct i2400m_bootrom_header *ack, size_t ack_size, int flags) { ssize_t result = -ENOMEM, rx_bytes; struct device *dev = i2400m_dev(i2400m); int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd); d_fnstart(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu)\n", i2400m, cmd, cmd_size, ack, ack_size); BUG_ON(ack_size < sizeof(*ack)); BUG_ON(i2400m->boot_mode == 0); if (cmd != NULL) { /* send the command */ result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags); if (result < 0) goto error_cmd_send; if ((flags & I2400M_BM_CMD_RAW) == 0) d_printf(5, dev, "boot-mode cmd %d csum %u rr %u da %u: " "addr 0x%04x size %u block csum 0x%04x\n", opcode, i2400m_brh_get_use_checksum(cmd), i2400m_brh_get_response_required(cmd), i2400m_brh_get_direct_access(cmd), cmd->target_addr, cmd->data_size, cmd->block_checksum); } result = i2400m->bus_bm_wait_for_ack(i2400m, ack, ack_size); if (result < 0) { dev_err(dev, "boot-mode cmd %d: error waiting for an ack: %d\n", opcode, (int) result); /* bah, %zd doesn't work */ goto error_wait_for_ack; } rx_bytes = result; /* verify the ack and read more if necessary [result is the * final amount of bytes we get in the ack] */ result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags); if (result < 0) goto error_bad_ack; /* Don't you love this stack of empty targets? Well, I don't * either, but it helps track exactly who comes in here and * why :) */ result = rx_bytes; error_bad_ack: error_wait_for_ack: error_cmd_send: d_fnend(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu) = %d\n", i2400m, cmd, cmd_size, ack, ack_size, (int) result); return result; }
/* * Recognize a boot barker * * @buf: buffer where the boot barker. * @buf_size: size of the buffer (has to be 16 bytes). It is passed * here so the function can check it for the caller. * * Note that as a side effect, upon identifying the obtained boot * barker, this function will set i2400m->barker to point to the right * barker database entry. Subsequent calls to the function will result * in verifying that the same type of boot barker is returned when the * device [re]boots (as long as the same device instance is used). * * Return: 0 if @buf matches a known boot barker. -ENOENT if the * buffer in @buf doesn't match any boot barker in the database or * -EILSEQ if the buffer doesn't have the right size. */ int i2400m_is_boot_barker(struct i2400m *i2400m, const void *buf, size_t buf_size) { int result; struct device *dev = i2400m_dev(i2400m); struct i2400m_barker_db *barker; int i; result = -ENOENT; if (buf_size != sizeof(i2400m_barker_db[i].data)) return result; /* Short circuit if we have already discovered the barker * associated with the device. */ if (i2400m->barker && !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) { unsigned index = (i2400m->barker - i2400m_barker_db) / sizeof(*i2400m->barker); d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n", index, le32_to_cpu(i2400m->barker->data[0])); return 0; } for (i = 0; i < i2400m_barker_db_used; i++) { barker = &i2400m_barker_db[i]; BUILD_BUG_ON(sizeof(barker->data) != 16); if (memcmp(buf, barker->data, sizeof(barker->data))) continue; if (i2400m->barker == NULL) { i2400m->barker = barker; d_printf(1, dev, "boot barker set to #%u/%08x\n", i, le32_to_cpu(barker->data[0])); if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER)) i2400m->sboot = 0; else i2400m->sboot = 1; } else if (i2400m->barker != barker) { dev_err(dev, "HW inconsistency: device " "reports a different boot barker " "than set (from %08x to %08x)\n", le32_to_cpu(i2400m->barker->data[0]), le32_to_cpu(barker->data[0])); result = -EIO; } else d_printf(2, dev, "boot barker confirmed #%u/%08x\n", i, le32_to_cpu(barker->data[0])); result = 0; break; } return result; }
/* * Mark the tail of the FIFO buffer as 'to-skip' * * We should never hit the BUG_ON() because all the sizes we push to * the FIFO are padded to be a multiple of 16 -- the size of *msg * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the * header). * * Note: * * Assumes i2400m->tx_lock is taken, and we use that as a barrier */ static void i2400m_tx_skip_tail(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; size_t tail_room = I2400M_TX_BUF_SIZE - tx_in; struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in; BUG_ON(tail_room < sizeof(*msg)); msg->size = tail_room | I2400M_TX_SKIP; d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n", tail_room, tx_in); i2400m->tx_in += tail_room; }
/** * i2400m_set_idle_timeout - Set the device's idle mode timeout * * @i2400m: i2400m device descriptor * * @msecs: milliseconds for the timeout to enter idle mode. Between * 100 to 300000 (5m); 0 to disable. In increments of 100. * * After this @msecs of the link being idle (no data being sent or * received), the device will negotiate with the basestation entering * idle mode for saving power. The connection is maintained, but * getting out of it (done in tx.c) will require some negotiation, * possible crypto re-handshake and a possible DHCP re-lease. * * Only available if fw_version >= 0x00090002. * * Returns: 0 if ok, < 0 errno code on error. */ int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_config_idle_timeout cit; } *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; char strerr[32]; result = -ENOSYS; if (i2400m_le_v1_3(i2400m)) goto error_alloc; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE); cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->cit.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout)); cmd->cit.timeout = cpu_to_le32(msecs); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'set idle timeout' command: " "%ld\n", PTR_ERR(ack_skb)); result = PTR_ERR(ack_skb); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'set idle timeout' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); goto error_cmd_failed; } result = 0; kfree_skb(ack_skb); error_cmd_failed: error_msg_to_dev: kfree(cmd); error_alloc: return result; }
/* * Do the final steps of uploading firmware * * @bcf_hdr: BCF header we are actually using * @bcf: pointer to the firmware image (which matches the first header * that is followed by the actual payloads). * @offset: [byte] offset into @bcf for the command we need to send. * * Depending on the boot mode (signed vs non-signed), different * actions need to be taken. */ static int i2400m_dnload_finalize(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf_hdr, const struct i2400m_bcf_hdr *bcf, size_t offset) { int ret = 0; struct device *dev = i2400m_dev(i2400m); struct i2400m_bootrom_header *cmd, ack; struct { struct i2400m_bootrom_header cmd; u8 cmd_pl[0]; } __packed *cmd_buf; size_t signature_block_offset, signature_block_size; d_fnstart(3, dev, "offset %zu\n", offset); cmd = (void *) bcf + offset; if (i2400m_boot_is_signed(i2400m) == 0) { struct i2400m_bootrom_header jump_ack; d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n", le32_to_cpu(cmd->target_addr)); cmd_buf = i2400m->bm_cmd_buf; memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd)); cmd = &cmd_buf->cmd; /* now cmd points to the actual bootrom_header in cmd_buf */ i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP); cmd->data_size = 0; ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), &jump_ack, sizeof(jump_ack), 0); } else { d_printf(1, dev, "secure boot, jumping to 0x%08x\n", le32_to_cpu(cmd->target_addr)); cmd_buf = i2400m->bm_cmd_buf; memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd)); signature_block_offset = sizeof(*bcf_hdr) + le32_to_cpu(bcf_hdr->key_size) * sizeof(u32) + le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32); signature_block_size = le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32); memcpy(cmd_buf->cmd_pl, (void *) bcf_hdr + signature_block_offset, signature_block_size); ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, sizeof(cmd_buf->cmd) + signature_block_size, &ack, sizeof(ack), I2400M_BM_CMD_RAW); } d_fnend(3, dev, "returning %d\n", ret); return ret; }
/* * Parse a 'state report' and extract carrier on/off information * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message (header + payload). The header length * declaration is assumed to be congruent with @size (as in * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) * * Extract from the report state the system state TLV and infer from * there if we have a carrier or not. Update our local state and tell * netdev. * * When setting the carrier, it's fine to set OFF twice (for example), * as netif_carrier_off() will not generate two OFF events (just on * the transitions). */ static void i2400m_report_state_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size, const char *tag) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv; const struct i2400m_tlv_system_state *ss; const struct i2400m_tlv_rf_switches_status *rfss; const struct i2400m_tlv_media_status *ms; size_t tlv_size = le16_to_cpu(l3l4_hdr->length); d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n", i2400m, l3l4_hdr, size, tag); tlv = NULL; while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, tlv_size, tlv))) { if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) { ss = container_of(tlv, typeof(*ss), hdr); d_printf(2, dev, "%s: system state TLV " "found (0x%04x), state 0x%08x\n", tag, I2400M_TLV_SYSTEM_STATE, le32_to_cpu(ss->state)); i2400m_report_tlv_system_state(i2400m, ss); } if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) { rfss = container_of(tlv, typeof(*rfss), hdr); d_printf(2, dev, "%s: RF status TLV " "found (0x%04x), sw 0x%02x hw 0x%02x\n", tag, I2400M_TLV_RF_STATUS, le32_to_cpu(rfss->sw_rf_switch), le32_to_cpu(rfss->hw_rf_switch)); i2400m_report_tlv_rf_switches_status(i2400m, rfss); } if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) { ms = container_of(tlv, typeof(*ms), hdr); d_printf(2, dev, "%s: Media Status TLV: %u\n", tag, le32_to_cpu(ms->media_status)); i2400m_report_tlv_media_status(i2400m, ms); } } d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n", i2400m, l3l4_hdr, size, tag); }
static void i2400m_tx_close(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; struct i2400m_msg_hdr *tx_msg_moved; size_t aligned_size, padding, hdr_size; void *pad_buf; unsigned num_pls; if (tx_msg->size & I2400M_TX_SKIP) goto out; num_pls = le16_to_cpu(tx_msg->num_pls); if (num_pls == 0) { tx_msg->size |= I2400M_TX_SKIP; goto out; } hdr_size = sizeof(*tx_msg) + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]); hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN); tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; tx_msg_moved = (void *) tx_msg + tx_msg->offset; memmove(tx_msg_moved, tx_msg, hdr_size); tx_msg_moved->size -= tx_msg->offset; aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); padding = aligned_size - tx_msg_moved->size; if (padding > 0) { pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0); if (unlikely(WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL))) { dev_err(dev, "SW BUG! Possible data leakage from memory the " "device should not read for padding - " "size %lu aligned_size %zu tx_buf %p in " "%zu out %zu\n", (unsigned long) tx_msg_moved->size, aligned_size, i2400m->tx_buf, i2400m->tx_in, i2400m->tx_out); } else memset(pad_buf, 0xad, padding); } tx_msg_moved->padding = cpu_to_le16(padding); tx_msg_moved->size += padding; if (tx_msg != tx_msg_moved) tx_msg->size += padding; out: i2400m->tx_msg = NULL; }
ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m, const struct i2400m_bootrom_header *_cmd, size_t cmd_size, int flags) { ssize_t result; struct device *dev = i2400m_dev(i2400m); struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd); struct i2400m_bootrom_header *cmd; size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE); d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n", i2400m, _cmd, cmd_size); result = -E2BIG; if (cmd_size > I2400M_BM_CMD_BUF_SIZE) goto error_too_big; if (_cmd != i2400m->bm_cmd_buf) memmove(i2400m->bm_cmd_buf, _cmd, cmd_size); cmd = i2400m->bm_cmd_buf; if (cmd_size_a > cmd_size) memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); if ((flags & I2400M_BM_CMD_RAW) == 0) { if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0)) dev_warn(dev, "SW BUG: response_required == 0\n"); i2400m_bm_cmd_prepare(cmd); } d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n", opcode, cmd_size, cmd_size_a); d_dump(5, dev, cmd, cmd_size); sdio_claim_host(i2400ms->func); result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR, i2400m->bm_cmd_buf, cmd_size_a); sdio_release_host(i2400ms->func); if (result < 0) { dev_err(dev, "BM cmd %d: cannot send: %ld\n", opcode, (long) result); goto error_cmd_send; } result = cmd_size; error_cmd_send: error_too_big: d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n", i2400m, _cmd, cmd_size, (int) result); return result; }
void i2400mu_tx_release(struct i2400mu *i2400mu) { unsigned long flags; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = i2400m_dev(i2400m); struct task_struct *kthread; spin_lock_irqsave(&i2400m->tx_lock, flags); kthread = i2400mu->tx_kthread; i2400mu->tx_kthread = NULL; spin_unlock_irqrestore(&i2400m->tx_lock, flags); if (kthread) kthread_stop(kthread); else d_printf(1, dev, "TX: kthread had already exited\n"); }
/* * Allocate @size bytes in the TX fifo, return a pointer to it * * @i2400m: device descriptor * @size: size of the buffer we need to allocate * @padding: ensure that there is at least this many bytes of free * contiguous space in the fifo. This is needed because later on * we might need to add padding. * @try_head: specify either to allocate head room or tail room space * in the TX FIFO. This boolean is required to avoids a system hang * due to an infinite loop caused by i2400m_tx_fifo_push(). * The caller must always try to allocate tail room space first by * calling this routine with try_head = 0. In case if there * is not enough tail room space but there is enough head room space, * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head * room space, by calling this routine again with try_head = 1. * * Returns: * * Pointer to the allocated space. NULL if there is no * space. TAIL_FULL if there is no space at the tail but there is at * the head (Case B below). * * These are the two basic cases we need to keep an eye for -- it is * much better explained in linux/kernel/kfifo.c, but this code * basically does the same. No rocket science here. * * Case A Case B * N ___________ ___________ * | tail room | | data | * | | | | * |<- IN ->| |<- OUT ->| * | | | | * | data | | room | * | | | | * |<- OUT ->| |<- IN ->| * | | | | * | head room | | data | * 0 ----------- ----------- * * We allocate only *contiguous* space. * * We can allocate only from 'room'. In Case B, it is simple; in case * A, we only try from the tail room; if it is not enough, we just * fail and return TAIL_FULL and let the caller figure out if we wants to * skip the tail room and try to allocate from the head. * * There is a corner case, wherein i2400m_tx_new() can get into * an infinite loop calling i2400m_tx_fifo_push(). * In certain situations, tx_in would have reached on the top of TX FIFO * and i2400m_tx_tail_room() returns 0, as described below: * * N ___________ tail room is zero * |<- IN ->| * | | * | | * | | * | data | * |<- OUT ->| * | | * | | * | head room | * 0 ----------- * During such a time, where tail room is zero in the TX FIFO and if there * is a request to add a payload to TX FIFO, which calls: * i2400m_tx() * ->calls i2400m_tx_close() * ->calls i2400m_tx_skip_tail() * goto try_new; * ->calls i2400m_tx_new() * |----> [try_head:] * infinite loop | ->calls i2400m_tx_fifo_push() * | if (tail_room < needed) * | if (head_room => needed) * | return TAIL_FULL; * |<---- goto try_head; * * i2400m_tx() calls i2400m_tx_close() to close the message, since there * is no tail room to accommodate the payload and calls * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls * i2400m_tx_new() to allocate space for new message header calling * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space * to accommodate the message header, but there is enough head space. * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push() * ending up in a loop causing system freeze. * * This corner case is avoided by using a try_head boolean, * as an argument to i2400m_tx_fifo_push(). * * Note: * * Assumes i2400m->tx_lock is taken, and we use that as a barrier * * The indexes keep increasing and we reset them to zero when we * pop data off the queue */ static void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding, bool try_head) { struct device *dev = i2400m_dev(i2400m); size_t room, tail_room, needed_size; void *ptr; needed_size = size + padding; room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out); if (room < needed_size) { /* this takes care of Case B */ d_printf(2, dev, "fifo push %zu/%zu: no space\n", size, padding); return NULL; } /* Is there space at the tail? */ tail_room = __i2400m_tx_tail_room(i2400m); if (!try_head && tail_room < needed_size) { /* * If the tail room space is not enough to push the message * in the TX FIFO, then there are two possibilities: * 1. There is enough head room space to accommodate * this message in the TX FIFO. * 2. There is not enough space in the head room and * in tail room of the TX FIFO to accommodate the message. * In the case (1), return TAIL_FULL so that the caller * can figure out, if the caller wants to push the message * into the head room space. * In the case (2), return NULL, indicating that the TX FIFO * cannot accommodate the message. */ if (room - tail_room >= needed_size) { d_printf(2, dev, "fifo push %zu/%zu: tail full\n", size, padding); return TAIL_FULL; /* There might be head space */ } else { d_printf(2, dev, "fifo push %zu/%zu: no head space\n", size, padding); return NULL; /* There is no space */ } } ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE; d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding, i2400m->tx_in % I2400M_TX_BUF_SIZE); i2400m->tx_in += size; return ptr; }
static int i2400m_open(struct net_device *net_dev) { int result; struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); if (i2400m->ready == 0) { dev_err(dev, "Device is still initializing\n"); result = -EBUSY; } else result = 0; d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", net_dev, i2400m, result); return result; }
/* * Given a buffer of TLVs, iterate over them * * @i2400m: device instance * @tlv_buf: pointer to the beginning of the TLV buffer * @buf_size: buffer size in bytes * @tlv_pos: seek position; this is assumed to be a pointer returned * by i2400m_tlv_buffer_walk() [and thus, validated]. The * TLV returned will be the one following this one. * * Usage: * * tlv_itr = NULL; * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) { * ... * // Do stuff with tlv_itr, DON'T MODIFY IT * ... * } */ static const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk( struct i2400m *i2400m, const void *tlv_buf, size_t buf_size, const struct i2400m_tlv_hdr *tlv_pos) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size; size_t offset, length, avail_size; unsigned type; if (tlv_pos == NULL) /* Take the first one? */ tlv_pos = tlv_buf; else /* Nope, the next one */ tlv_pos = (void *) tlv_pos + le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos); if (tlv_pos == tlv_top) { /* buffer done */ tlv_pos = NULL; goto error_beyond_end; } if (tlv_pos > tlv_top) { tlv_pos = NULL; WARN_ON(1); goto error_beyond_end; } offset = (void *) tlv_pos - (void *) tlv_buf; avail_size = buf_size - offset; if (avail_size < sizeof(*tlv_pos)) { dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: " "short header\n", tlv_buf, buf_size, offset); goto error_short_header; } type = le16_to_cpu(tlv_pos->type); length = le16_to_cpu(tlv_pos->length); if (avail_size < sizeof(*tlv_pos) + length) { dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], " "tlv type 0x%04x @%zu: " "short data (%zu bytes vs %zu needed)\n", tlv_buf, buf_size, type, offset, avail_size, sizeof(*tlv_pos) + length); goto error_short_header; } error_short_header: error_beyond_end: return tlv_pos; }
static int i2400m_change_mtu(struct net_device *net_dev, int new_mtu) { int result; struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct device *dev = i2400m_dev(i2400m); if (new_mtu >= I2400M_MAX_MTU) { dev_err(dev, "Cannot change MTU to %d (max is %d)\n", new_mtu, I2400M_MAX_MTU); result = -EINVAL; } else { net_dev->mtu = new_mtu; result = 0; } return result; }
/* * Wake up the device and transmit a held SKB, then restart the net queue * * When the device goes into basestation-idle mode, we need to tell it * to exit that mode; it will negotiate with the base station, user * space may have to intervene to rehandshake crypto and then tell us * when it is ready to transmit the packet we have "queued". Still we * need to give it sometime after it reports being ok. * * On error, there is not much we can do. If the error was on TX, we * still wake the queue up to see if the next packet will be luckier. * * If _cmd_exit_idle() fails...well, it could be many things; most * commonly it is that something else took the device out of IDLE mode * (for example, the base station). In that case we get an -EILSEQ and * we are just going to ignore that one. If the device is back to * connected, then fine -- if it is someother state, the packet will * be dropped anyway. */ void i2400m_wake_tx_work(struct work_struct *ws) { int result; struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); struct device *dev = i2400m_dev(i2400m); struct sk_buff *skb = i2400m->wake_tx_skb; unsigned long flags; spin_lock_irqsave(&i2400m->tx_lock, flags); skb = i2400m->wake_tx_skb; i2400m->wake_tx_skb = NULL; spin_unlock_irqrestore(&i2400m->tx_lock, flags); d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb); result = -EINVAL; if (skb == NULL) { dev_err(dev, "WAKE&TX: skb dissapeared!\n"); goto out_put; } result = i2400m_cmd_exit_idle(i2400m); if (result == -EILSEQ) result = 0; if (result < 0) { dev_err(dev, "WAKE&TX: device didn't get out of idle: " "%d\n", result); goto error; } result = wait_event_timeout(i2400m->state_wq, i2400m->state != I2400M_SS_IDLE, 5 * HZ); if (result == 0) result = -ETIMEDOUT; if (result < 0) { dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " "%d\n", result); goto error; } msleep(20); /* device still needs some time or it drops it */ result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); netif_wake_queue(i2400m->wimax_dev.net_dev); error: kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ out_put: i2400m_put(i2400m); d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n", ws, i2400m, skb, result); }
/* * Setup minimal device communication infrastructure needed to at * least be able to update the firmware. * * Note the ugly trick: if we are in the probe path * (i2400ms->debugfs_dentry == NULL), we only retry function * enablement one, to avoid racing with the iwmc3200 top controller. */ static int i2400ms_bus_setup(struct i2400m *i2400m) { int result; struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct device *dev = i2400m_dev(i2400m); struct sdio_func *func = i2400ms->func; int retries; sdio_claim_host(func); result = sdio_set_block_size(func, I2400MS_BLK_SIZE); sdio_release_host(func); if (result < 0) { dev_err(dev, "Failed to set block size: %d\n", result); goto error_set_blk_size; } if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL) retries = 1; else retries = 0; result = i2400ms_enable_function(i2400ms, retries); if (result < 0) { dev_err(dev, "Cannot enable SDIO function: %d\n", result); goto error_func_enable; } result = i2400ms_tx_setup(i2400ms); if (result < 0) goto error_tx_setup; result = i2400ms_rx_setup(i2400ms); if (result < 0) goto error_rx_setup; return 0; error_rx_setup: i2400ms_tx_release(i2400ms); error_tx_setup: sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); error_func_enable: error_set_blk_size: return result; }
/* * Send a boot-mode command over the bulk-out pipe * * Command can be a raw command, which requires no preparation (and * which might not even be following the command format). Checks that * the right amount of data was transferred. * * To satisfy USB requirements (no onstack, vmalloc or in data segment * buffers), we copy the command to i2400m->bm_cmd_buf and send it from * there. * * @flags: pass thru from i2400m_bm_cmd() * @return: cmd_size if ok, < 0 errno code on error. */ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m, const struct i2400m_bootrom_header *_cmd, size_t cmd_size, int flags) { ssize_t result; struct device *dev = i2400m_dev(i2400m); struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd); struct i2400m_bootrom_header *cmd; size_t cmd_size_a = ALIGN(cmd_size, 16); /* USB restriction */ d_fnstart(8, dev, "(i2400m %p cmd %p size %zu)\n", i2400m, _cmd, cmd_size); result = -E2BIG; if (cmd_size > I2400M_BM_CMD_BUF_SIZE) goto error_too_big; if (_cmd != i2400m->bm_cmd_buf) memmove(i2400m->bm_cmd_buf, _cmd, cmd_size); cmd = i2400m->bm_cmd_buf; if (cmd_size_a > cmd_size) /* Zero pad space */ memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); if ((flags & I2400M_BM_CMD_RAW) == 0) { if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0)) dev_warn(dev, "SW BUG: response_required == 0\n"); i2400m_bm_cmd_prepare(cmd); } result = i2400mu_tx_bulk_out(i2400mu, i2400m->bm_cmd_buf, cmd_size); if (result < 0) { dev_err(dev, "boot-mode cmd %d: cannot send: %zd\n", opcode, result); goto error_cmd_send; } if (result != cmd_size) { /* all was transferred? */ dev_err(dev, "boot-mode cmd %d: incomplete transfer " "(%zu vs %zu submitted)\n", opcode, result, cmd_size); result = -EIO; goto error_cmd_size; } error_cmd_size: error_cmd_send: error_too_big: d_fnend(8, dev, "(i2400m %p cmd %p size %zu) = %zd\n", i2400m, _cmd, cmd_size, result); return result; }
/* * Reset a device at different levels (warm, cold or bus) * * @i2400ms: device descriptor * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS) * * FIXME: not tested -- need to confirm expected effects * * Warm and cold resets get an SDIO reset if they fail (unimplemented) * * Warm reset: * * The device will be fully reset internally, but won't be * disconnected from the bus (so no reenumeration will * happen). Firmware upload will be necessary. * * The device will send a reboot barker that will trigger the driver * to reinitialize the state via __i2400m_dev_reset_handle. * * * Cold and bus reset: * * The device will be fully reset internally, disconnected from the * bus an a reenumeration will happen. Firmware upload will be * necessary. Thus, we don't do any locking or struct * reinitialization, as we are going to be fully disconnected and * reenumerated. * * Note we need to return -ENODEV if a warm reset was requested and we * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset() * and wimax_dev->op_reset. * * WARNING: no driver state saved/fixed */ static int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) { int result = 0; struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct device *dev = i2400m_dev(i2400m); static const __le32 i2400m_WARM_BOOT_BARKER[4] = { cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), }; static const __le32 i2400m_COLD_BOOT_BARKER[4] = { cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), }; if (rt == I2400M_RT_WARM) result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER, sizeof(i2400m_WARM_BOOT_BARKER)); else if (rt == I2400M_RT_COLD) result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER, sizeof(i2400m_COLD_BOOT_BARKER)); else if (rt == I2400M_RT_BUS) { do_bus_reset: i2400ms_bus_release(i2400m); /* Wait for the device to settle */ msleep(40); result = i2400ms_bus_setup(i2400m); } else BUG(); if (result < 0 && rt != I2400M_RT_BUS) { dev_err(dev, "%s reset failed (%d); trying SDIO reset\n", rt == I2400M_RT_WARM ? "warm" : "cold", result); rt = I2400M_RT_BUS; goto do_bus_reset; } return result; }
void i2400m_report_tlv_rf_switches_status( struct i2400m *i2400m, const struct i2400m_tlv_rf_switches_status *rfss) { struct device *dev = i2400m_dev(i2400m); enum i2400m_rf_switch_status hw, sw; enum wimax_st wimax_state; sw = le32_to_cpu(rfss->sw_rf_switch); hw = le32_to_cpu(rfss->hw_rf_switch); d_fnstart(3, dev, "(i2400m %p rfss %p [hw %u sw %u])\n", i2400m, rfss, hw, sw); wimax_state = wimax_state_get(&i2400m->wimax_dev); if (wimax_state < WIMAX_ST_RADIO_OFF) { d_printf(3, dev, "ignoring RF switches report, state %u\n", wimax_state); goto out; } switch (sw) { case I2400M_RF_SWITCH_ON: wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_ON); break; case I2400M_RF_SWITCH_OFF: wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_OFF); break; default: dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw); } switch (hw) { case I2400M_RF_SWITCH_ON: wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_ON); break; case I2400M_RF_SWITCH_OFF: wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_OFF); break; default: dev_err(dev, "HW BUG? Unknown RF HW state 0x%x\n", hw); } out: d_fnend(3, dev, "(i2400m %p rfss %p [hw %u sw %u]) = void\n", i2400m, rfss, hw, sw); }
/* * Parse a 'state report' and extract information * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message (header + payload). The header length * declaration is assumed to be congruent with @size (as in * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) * * Walk over the TLVs in a report state and act on them. */ static void i2400m_report_state_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size, const char *tag) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv; size_t tlv_size = le16_to_cpu(l3l4_hdr->length); d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n", i2400m, l3l4_hdr, size, tag); tlv = NULL; while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, tlv_size, tlv))) i2400m_report_state_parse_tlv(i2400m, tlv, tag); d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n", i2400m, l3l4_hdr, size, tag); }
static int i2400m_open(struct net_device *net_dev) { int result; struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); /* Make sure we wait until init is complete... */ mutex_lock(&i2400m->init_mutex); if (i2400m->updown) result = 0; else result = -EBUSY; mutex_unlock(&i2400m->init_mutex); d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", net_dev, i2400m, result); return result; }
/* * TX an skb to an idle device * * When the device is in basestation-idle mode, we need to wake it up * and then TX. So we queue a work_struct for doing so. * * We need to get an extra ref for the skb (so it is not dropped), as * well as be careful not to queue more than one request (won't help * at all). If more than one request comes or there are errors, we * just drop the packets (see i2400m_hard_start_xmit()). */ static int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev, struct sk_buff *skb) { int result; struct device *dev = i2400m_dev(i2400m); unsigned long flags; d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); if (net_ratelimit()) { d_printf(3, dev, "WAKE&NETTX: " "skb %p sending %d bytes to radio\n", skb, skb->len); d_dump(4, dev, skb->data, skb->len); } /* We hold a ref count for i2400m and skb, so when * stopping() the device, we need to cancel that work * and if pending, release those resources. */ result = 0; spin_lock_irqsave(&i2400m->tx_lock, flags); if (!i2400m->wake_tx_skb) { netif_stop_queue(net_dev); i2400m_get(i2400m); i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ i2400m_tx_prep_header(skb); result = schedule_work(&i2400m->wake_tx_ws); WARN_ON(result == 0); } spin_unlock_irqrestore(&i2400m->tx_lock, flags); if (result == 0) { /* Yes, this happens even if we stopped the * queue -- blame the queue disciplines that * queue without looking -- I guess there is a reason * for that. */ if (net_ratelimit()) d_printf(1, dev, "NETTX: device exiting idle, " "dropping skb %p, queue running %d\n", skb, netif_queue_stopped(net_dev)); result = -EBUSY; } d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); return result; }
/* * Request entering power save * * This command is (mainly) executed when the device indicates that it * is ready to go into powersave mode via a REPORT_POWERSAVE_READY. */ int i2400m_cmd_enter_powersave(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_cmd_enter_power_save *cmd; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE); cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE); cmd->tlv.length = cpu_to_le16(sizeof(cmd->val)); cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'Enter power save' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); if (result == -EACCES) d_printf(1, dev, "Cannot enter power save mode\n"); else if (result < 0) dev_err(dev, "'Enter power save' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE, result, strerr); else d_printf(1, dev, "device ready to power save\n"); kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; }