void mgt_unlatch_all(islpci_private *priv) { u32 u; int rvalue = 0; if (islpci_get_state(priv) < PRV_STATE_INIT) return; u = DOT11_OID_SSID; rvalue = mgt_commit_list(priv, &u, 1); #if 0 u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); u = DOT11_OID_MLMEAUTOLEVEL; rvalue |= mgt_commit_list(priv, &u, 1); u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); #endif if (rvalue) printk(KERN_DEBUG "%s: Unlatching OIDs failed\n", priv->ndev->name); }
int mgt_commit(islpci_private *priv) { int rvalue; enum oid_num_t u; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1)); if (priv->iw_mode != IW_MODE_MONITOR) rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2)); u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); rvalue |= mgt_update_addr(priv); if (rvalue) { /* some request have failed. The device might be in an incoherent state. We should reset it ! */ printk(KERN_DEBUG "%s: mgt_commit: failure\n", priv->ndev->name); } return rvalue; }
int mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len) { int ret = 0; struct islpci_mgmtframe *response; int response_op = PIMFOR_OP_ERROR; int dlen; u32 oid; BUG_ON(OID_NUM_LAST <= n); dlen = isl_oid[n].size; oid = isl_oid[n].oid; mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, data); if (islpci_get_state(priv) >= PRV_STATE_READY) { ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid, data, dlen + extra_len, &response); if (!ret) { response_op = response->header->operation; islpci_mgt_release(response); } if (ret || response_op == PIMFOR_OP_ERROR) ret = -EIO; } else ret = -EIO; if (data) mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); return ret; }
int mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data) { int ret = 0; struct islpci_mgmtframe *response = NULL; int response_op = PIMFOR_OP_ERROR; int dlen; void *cache, *_data = data; u32 oid; BUG_ON(OID_NUM_LAST <= n); BUG_ON(extra > isl_oid[n].range); if (!priv->mib) /* memory has been freed */ return -1; dlen = isl_oid[n].size; cache = priv->mib[n]; cache += (cache ? extra * dlen : 0); oid = isl_oid[n].oid + extra; if (_data == NULL) /* we are requested to re-set a cached value */ _data = cache; else mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data); /* If we are going to write to the cache, we don't want anyone to read * it -> acquire write lock. * Else we could acquire a read lock to be sure we don't bother the * commit process (which takes a write lock). But I'm not sure if it's * needed. */ if (cache) down_write(&priv->mib_sem); if (islpci_get_state(priv) >= PRV_STATE_READY) { ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid, _data, dlen, &response); if (!ret) { response_op = response->header->operation; islpci_mgt_release(response); } if (ret || response_op == PIMFOR_OP_ERROR) ret = -EIO; } else if (!cache) ret = -EIO; if (cache) { if (!ret && data) memcpy(cache, _data, dlen); up_write(&priv->mib_sem); } /* re-set given data to what it was */ if (data) mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); return ret; }
void mgt_unlatch_all(islpci_private *priv) { u32 u; int rvalue = 0; if (islpci_get_state(priv) < PRV_STATE_INIT) return; u = DOT11_OID_SSID; rvalue = mgt_commit_list(priv, &u, 1); /* Necessary if in MANUAL RUN mode? */ #if 0 u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); u = DOT11_OID_MLMEAUTOLEVEL; rvalue |= mgt_commit_list(priv, &u, 1); u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); #endif if (rvalue) ; }
int mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data) { int ret = 0; struct islpci_mgmtframe *response = NULL; int response_op = PIMFOR_OP_ERROR; int dlen; void *cache, *_data = data; u32 oid; BUG_ON(OID_NUM_LAST <= n); BUG_ON(extra > isl_oid[n].range); if (!priv->mib) return -1; dlen = isl_oid[n].size; cache = priv->mib[n]; cache += (cache ? extra * dlen : 0); oid = isl_oid[n].oid + extra; if (_data == NULL) _data = cache; else mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data); if (cache) down_write(&priv->mib_sem); if (islpci_get_state(priv) >= PRV_STATE_READY) { ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid, _data, dlen, &response); if (!ret) { response_op = response->header->operation; islpci_mgt_release(response); } if (ret || response_op == PIMFOR_OP_ERROR) ret = -EIO; } else if (!cache) ret = -EIO; if (cache) { if (!ret && data) memcpy(cache, _data, dlen); up_write(&priv->mib_sem); } if (data) mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); return ret; }
/* this one removes one(!!) instance only */ static void prism54_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; BUG_ON(!priv); if (!__in_cleanup_module) { printk(KERN_DEBUG "%s: hot unplug detected\n", ndev->name); islpci_set_state(priv, PRV_STATE_OFF); } printk(KERN_DEBUG "%s: removing device\n", ndev->name); unregister_netdev(ndev); /* free the interrupt request */ if (islpci_get_state(priv) != PRV_STATE_OFF) { isl38xx_disable_interrupts(priv->device_base); islpci_set_state(priv, PRV_STATE_OFF); /* This bellow causes a lockup at rmmod time. It might be * because some interrupts still linger after rmmod time, * see bug #17 */ /* pci_set_power_state(pdev, 3);*/ /* try to power-off */ } free_irq(pdev->irq, priv); /* free the PCI memory and unmap the remapped page */ islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; pci_clear_mwi(pdev); pci_release_regions(pdev); pci_disable_device(pdev); }
static void prism54_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; BUG_ON(!priv); if (!__in_cleanup_module) { printk(KERN_DEBUG "%s: hot unplug detected\n", ndev->name); islpci_set_state(priv, PRV_STATE_OFF); } printk(KERN_DEBUG "%s: removing device\n", ndev->name); unregister_netdev(ndev); if (islpci_get_state(priv) != PRV_STATE_OFF) { isl38xx_disable_interrupts(priv->device_base); islpci_set_state(priv, PRV_STATE_OFF); } free_irq(pdev->irq, priv); islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; pci_clear_mwi(pdev); pci_release_regions(pdev); pci_disable_device(pdev); }
int mgt_commit(islpci_private *priv) { int rvalue; enum oid_num_t u; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1)); if (priv->iw_mode != IW_MODE_MONITOR) rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2)); u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); rvalue |= mgt_update_addr(priv); if (rvalue) { printk(KERN_DEBUG "%s: mgt_commit: failure\n", priv->ndev->name); } return rvalue; }
void mgt_commit(islpci_private *priv) { int rvalue; u32 u; union oid_res_t r; if (islpci_get_state(priv) < PRV_STATE_INIT) return; rvalue = mgt_commit_list(priv, commit_part1, sizeof (commit_part1) / sizeof (commit_part1[0])); if (priv->iw_mode != IW_MODE_MONITOR) rvalue |= mgt_commit_list(priv, commit_part2, sizeof (commit_part2) / sizeof (commit_part2[0])); u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); if (rvalue) { /* some request have failed. The device might be in an incoherent state. We should reset it ! */ printk(KERN_DEBUG "%s: mgt_commit has failed. Restart the " "device \n", priv->ndev->name); } /* update the MAC addr. As it's not cached, no lock will be acquired by * the mgt_get_request */ mgt_get_request(priv, GEN_OID_MACADDRESS, 0, NULL, &r); memcpy(priv->ndev->dev_addr, r.ptr, 6); kfree(r.ptr); }
irqreturn_t islpci_interrupt(int irq, void *config) { u32 reg; islpci_private *priv = config; struct net_device *ndev = priv->ndev; void __iomem *device = priv->device_base; int powerstate = ISL38XX_PSM_POWERSAVE_STATE; /* lock the interrupt handler */ spin_lock(&priv->slock); /* received an interrupt request on a shared IRQ line * first check whether the device is in sleep mode */ reg = readl(device + ISL38XX_CTRL_STAT_REG); if (reg & ISL38XX_CTRL_STAT_SLEEPMODE) /* device is in sleep mode, IRQ was generated by someone else */ { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); #endif spin_unlock(&priv->slock); return IRQ_NONE; } /* check whether there is any source of interrupt on the device */ reg = readl(device + ISL38XX_INT_IDENT_REG); /* also check the contents of the Interrupt Enable Register, because this * will filter out interrupt sources from other devices on the same irq ! */ reg &= readl(device + ISL38XX_INT_EN_REG); reg &= ISL38XX_INT_SOURCES; if (reg != 0) { if (islpci_get_state(priv) != PRV_STATE_SLEEP) powerstate = ISL38XX_PSM_ACTIVE_STATE; /* reset the request bits in the Identification register */ isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_FUNCTION_CALLS, "IRQ: Identification register 0x%p 0x%x \n", device, reg); #endif /* check for each bit in the register separately */ if (reg & ISL38XX_INT_IDENT_UPDATE) { #if VERBOSE > SHOW_ERROR_MESSAGES /* Queue has been updated */ DEBUG(SHOW_TRACING, "IRQ: Update flag \n"); DEBUG(SHOW_QUEUE_INDEXES, "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n", le32_to_cpu(priv->control_block-> driver_curr_frag[0]), le32_to_cpu(priv->control_block-> driver_curr_frag[1]), le32_to_cpu(priv->control_block-> driver_curr_frag[2]), le32_to_cpu(priv->control_block-> driver_curr_frag[3]), le32_to_cpu(priv->control_block-> driver_curr_frag[4]), le32_to_cpu(priv->control_block-> driver_curr_frag[5]) ); DEBUG(SHOW_QUEUE_INDEXES, "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n", le32_to_cpu(priv->control_block-> device_curr_frag[0]), le32_to_cpu(priv->control_block-> device_curr_frag[1]), le32_to_cpu(priv->control_block-> device_curr_frag[2]), le32_to_cpu(priv->control_block-> device_curr_frag[3]), le32_to_cpu(priv->control_block-> device_curr_frag[4]), le32_to_cpu(priv->control_block-> device_curr_frag[5]) ); #endif /* cleanup the data low transmit queue */ islpci_eth_cleanup_transmit(priv, priv->control_block); /* device is in active state, update the * powerstate flag if necessary */ powerstate = ISL38XX_PSM_ACTIVE_STATE; /* check all three queues in priority order * call the PIMFOR receive function until the * queue is empty */ if (isl38xx_in_queue(priv->control_block, ISL38XX_CB_RX_MGMTQ) != 0) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Received frame in Management Queue\n"); #endif islpci_mgt_receive(ndev); islpci_mgt_cleanup_transmit(ndev); /* Refill slots in receive queue */ islpci_mgmt_rx_fill(ndev); /* no need to trigger the device, next islpci_mgt_transaction does it */ } while (isl38xx_in_queue(priv->control_block, ISL38XX_CB_RX_DATA_LQ) != 0) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Received frame in Data Low Queue \n"); #endif islpci_eth_receive(priv); } /* check whether the data transmit queues were full */ if (priv->data_low_tx_full) { /* check whether the transmit is not full anymore */ if (ISL38XX_CB_TX_QSIZE - isl38xx_in_queue(priv->control_block, ISL38XX_CB_TX_DATA_LQ) >= ISL38XX_MIN_QTHRESHOLD) { /* nope, the driver is ready for more network frames */ netif_wake_queue(priv->ndev); /* reset the full flag */ priv->data_low_tx_full = 0; } } } if (reg & ISL38XX_INT_IDENT_INIT) { /* Device has been initialized */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Init flag, device initialized \n"); #endif wake_up(&priv->reset_done); } if (reg & ISL38XX_INT_IDENT_SLEEP) { /* Device intends to move to powersave state */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n"); #endif isl38xx_handle_sleep_request(priv->control_block, &powerstate, priv->device_base); } if (reg & ISL38XX_INT_IDENT_WAKEUP) { /* Device has been woken up to active state */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n"); #endif isl38xx_handle_wakeup(priv->control_block, &powerstate, priv->device_base); } } else { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); #endif spin_unlock(&priv->slock); return IRQ_NONE; } /* sleep -> ready */ if (islpci_get_state(priv) == PRV_STATE_SLEEP && powerstate == ISL38XX_PSM_ACTIVE_STATE) islpci_set_state(priv, PRV_STATE_READY); /* !sleep -> sleep */ if (islpci_get_state(priv) != PRV_STATE_SLEEP && powerstate == ISL38XX_PSM_POWERSAVE_STATE) islpci_set_state(priv, PRV_STATE_SLEEP); /* unlock the interrupt handler */ spin_unlock(&priv->slock); return IRQ_HANDLED; }
int mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data, union oid_res_t *res) { int ret = -EIO; int reslen = 0; struct islpci_mgmtframe *response = NULL; int dlen; void *cache, *_res = NULL; u32 oid; BUG_ON(OID_NUM_LAST <= n); BUG_ON(extra > isl_oid[n].range); res->ptr = NULL; if (!priv->mib) return -1; dlen = isl_oid[n].size; cache = priv->mib[n]; cache += cache ? extra * dlen : 0; oid = isl_oid[n].oid + extra; reslen = dlen; if (cache) down_read(&priv->mib_sem); if (islpci_get_state(priv) >= PRV_STATE_READY) { ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET, oid, data, dlen, &response); if (ret || !response || response->header->operation == PIMFOR_OP_ERROR) { if (response) islpci_mgt_release(response); ret = -EIO; } if (!ret) { _res = response->data; reslen = response->header->length; } } else if (cache) { _res = cache; ret = 0; } if ((isl_oid[n].flags & OID_FLAG_TYPE) == OID_TYPE_U32) res->u = ret ? 0 : le32_to_cpu(*(u32 *) _res); else { res->ptr = kmalloc(reslen, GFP_KERNEL); BUG_ON(res->ptr == NULL); if (ret) memset(res->ptr, 0, reslen); else { memcpy(res->ptr, _res, reslen); mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, res->ptr); } } if (cache) up_read(&priv->mib_sem); if (response && !ret) islpci_mgt_release(response); if (reslen > isl_oid[n].size) printk(KERN_DEBUG "mgt_get_request(0x%x): received data length was bigger " "than expected (%d > %d). Memory is probably corrupted...", oid, reslen, isl_oid[n].size); return ret; }