/** * gcu_get_adapter * * gcu_get_adapter is used by the functions exported in gcu_if.c to get * access to the memory addresses needed to access the MMIO registers * of the GCU **/ const struct gcu_adapter * gcu_get_adapter(void) { GCU_DBG("%s\n", __func__); if(global_adapter == NULL) { GCU_DBG("global gcu_adapter is not available\n"); return NULL; } spin_lock_irqsave(&global_adapter_spinlock, g_intflags); return global_adapter; }
/** * gcu_suspend - device sleep function * @pdev: PCI device information struct * * gcu_supend is generally called to place a device in sleep mode, * however the GCU doesn't support power mangement. For this case, * it is part of the gcu_notify_reboot() call chain to quiese the * device before a reboot. **/ static int gcu_suspend(struct pci_dev *pdev, uint32_t state) { /*struct gcu_adapter *adapter = pci_get_drvdata(pdev); */ #if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ) struct net_device *netdev = pci_get_drvdata(pdev); struct gcu_adapter *adapter = netdev_priv(netdev); #endif GCU_DBG("%s\n", __func__); pci_save_state(pdev); pci_disable_device(pdev); state = (state > 0) ? 0 : 0; /* * GCU doesn't support power management, but want to * leave a hook incase that situation changes in the future * * pci_set_power_state(pdev, state); * */ return state; }
/** * gcu_exit_module - Driver Exit Cleanup Routine * * gcu_exit_module is called just before the driver is removed * from memory. **/ static void __exit gcu_exit_module(void) { GCU_DBG("%s\n", __func__); unregister_reboot_notifier(&gcu_notifier_reboot); pci_unregister_driver(&gcu_driver); }
/* * gcu_iegbe_resume * @pdev: gcu pci_dev * purpose - exported PM resume function used by iegbe * driver to enable the GCU device. */ void gcu_iegbe_resume(struct pci_dev *pdev) { GCU_DBG("%s\n", __func__); pci_restore_state(pdev); pci_enable_device(pdev); return; }
/* * gcu_iegbe_suspend * @pdev: gcu pci_dev * @state: PM state * purpose - exported PM suspend function used by iegbe * driver to disable the GCU device. */ int gcu_iegbe_suspend(struct pci_dev *pdev, uint32_t state) { GCU_DBG("%s\n", __func__); pci_save_state(pdev); pci_disable_device(pdev); state = (state > 0) ? 0 : 0; return state; }
/** * free_gcu_adapter * @adapter: gcu_adapter struct to be free'd * * free_gcu_adapter is a wrapper for the kfree call for the * device specific data block plus clears the global_adapter variable * * Note that this function assumes that the spinlock for the global * gcu_adapter struct as been acquired. **/ static void free_gcu_adapter(struct gcu_adapter *adapter) { GCU_DBG("%s\n", __func__); global_adapter = 0; if(adapter){ kfree(adapter); } }
/** * alloc_gcu_adapter * * alloc_gcu_adapter is a wrapper for the kmalloc call for the * device specific data block plus inits the global_adapter variable. * * Note that this function assumes that the spinlock for the global * gcu_adapter struct as been acquired. **/ static struct gcu_adapter * alloc_gcu_adapter() { struct gcu_adapter *adapter; GCU_DBG("%s\n", __func__); adapter = (struct gcu_adapter*) kmalloc(sizeof(*adapter), GFP_KERNEL); global_adapter = adapter; if(!adapter) { GCU_DBG("Unable to allocate space for global gcu_adapter"); return 0; } memset(adapter, 0, sizeof(*adapter)); return adapter; }
/** * gcu_remove - Device Removal Routine * @pdev: PCI device information struct * * gcu_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void gcu_remove(struct pci_dev *pdev) { struct gcu_adapter *adapter = pci_get_drvdata(pdev); GCU_DBG("%s\n", __func__); iounmap(adapter->hw_addr); pci_release_regions(pdev); free_gcu_adapter(adapter); pci_set_drvdata(pdev, NULL); }
/** * gcu_release_adapter * * gcu_release_adapter is used by the functions exported in gcu_if.c to get * release the adapter spinlock and the handle to the adapter **/ void gcu_release_adapter(const struct gcu_adapter **adapter) { GCU_DBG("%s\n", __func__); if(adapter == NULL) { GCU_ERR("global gcu_adapter handle is invalid\n"); } else { *adapter = 0; } spin_unlock_irqrestore(&global_adapter_spinlock, g_intflags); return; }
static int gcu_notify_reboot(struct notifier_block *nb, unsigned long event, void *p) { struct pci_dev *pdev = NULL; GCU_DBG("%s\n", __func__); switch(event) { case SYS_DOWN: case SYS_HALT: case SYS_POWER_OFF: while((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { if(pci_dev_driver(pdev) == &gcu_driver){ gcu_suspend(pdev, 0x3); } } } return NOTIFY_DONE; }
/** * gcu_suspend - device sleep function * @pdev: PCI device information struct * * gcu_supend is generally called to place a device in sleep mode, * however the GCU doesn't support power mangement. For this case, * it is part of the gcu_notify_reboot() call chain to quiese the * device before a reboot. **/ static int gcu_suspend(struct pci_dev *pdev, uint32_t state) { /*struct gcu_adapter *adapter = pci_get_drvdata(pdev); */ GCU_DBG("%s\n", __func__); pci_save_state(pdev); pci_disable_device(pdev); state = (state > 0) ? 0 : 0; /* * GCU doesn't support power management, but want to * leave a hook incase that situation changes in the future * * pci_set_power_state(pdev, state); * */ return state; }
/** * gcu_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in gcu_pci_tbl * * Returns 0 on success, negative on failure * * gcu_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int gcu_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct gcu_adapter *adapter=0; uint32_t mmio_start, mmio_len; int err; GCU_DBG("%s\n", __func__); if((err = pci_enable_device(pdev))) { GCU_DBG("Unable to enable PCI Device\n"); return err; } if((err = pci_request_regions(pdev, gcu_driver_name))) { GCU_DBG("Unable to acquire requested memory regions\n"); return err; } /* * acquire the adapter spinlock. Once the module is loaded, it is possible for * someone to access the adapter struct via the interface functions exported * in gcu_if.c */ spin_lock(&global_adapter_spinlock); adapter = alloc_gcu_adapter(); if(!adapter) { gcu_probe_err(err_alloc_gcu_adapter, pdev, adapter); spin_unlock(&global_adapter_spinlock); return -ENOMEM; } pci_set_drvdata(pdev, adapter); adapter->pdev = pdev; adapter->msg_enable = (1 << debug) - 1; mmio_start = pci_resource_start(pdev, BAR_0); mmio_len = pci_resource_len(pdev, BAR_0); adapter->hw_addr = ioremap(mmio_start, mmio_len); if(!adapter->hw_addr) { GCU_DBG("Unable to map mmio\n"); gcu_probe_err(err_ioremap, pdev, adapter); spin_unlock(&global_adapter_spinlock); return -EIO; } strncpy(adapter->name, pci_name(pdev), sizeof(adapter->name)-1); adapter->mem_start = mmio_start; adapter->mem_end = mmio_start + mmio_len; adapter->vendor_id = pdev->vendor; adapter->device_id = pdev->device; adapter->subsystem_vendor_id = pdev->subsystem_vendor; adapter->subsystem_id = pdev->subsystem_device; pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->revision_id); pci_read_config_word(pdev, PCI_COMMAND, &adapter->pci_cmd_word); global_adapter = adapter; spin_unlock(&global_adapter_spinlock); DPRINTK(PROBE, INFO, "Intel(R) GCU Initialized\n"); return 0; }
/** * gcu_write_verify * @phy_num: phy we want to write to, either 0, 1, or 2 * @reg_addr: address in PHY's register space to write to * @phy_data: data to be checked * @adapter: pointer to global adapter struct * * This f(n) assumes that the spinlock acquired for adapter is * still in force. **/ int32_t gcu_write_verify(uint32_t phy_num, uint32_t reg_addr, uint16_t written_data, const struct gcu_adapter *adapter) { uint32_t data = 0; uint32_t timeoutCounter = 0; const uint32_t timeoutCounterMax = GCU_MAX_ATTEMPTS; uint32_t complete = 0; GCU_DBG("%s\n", __func__); if(!adapter) { GCU_ERR("Invalid adapter pointer\n"); return 0; } if(phy_num > MDIO_COMMAND_PHY_ADDR_MAX) { GCU_ERR("phy_num = %d, which is greater than " "MDIO_COMMAND_PHY_ADDR_MAX\n", phy_num); return 0; } if(reg_addr > MDIO_COMMAND_PHY_REG_MAX) { GCU_ERR("reg_addr = %d, which is greater than " "MDIO_COMMAND_PHY_REG_MAX\n", phy_num); return 0; } /* format the data to be written to MDIO_COMMAND_REG */ data |= (reg_addr << MDIO_COMMAND_PHY_REG_OFFSET); data |= (phy_num << MDIO_COMMAND_PHY_ADDR_OFFSET); data |= MDIO_COMMAND_GO_MASK; /* * We write to MDIO_COMMAND_REG initially, then read that * same register until its MDIO_GO bit is cleared. When cleared, * the transaction is complete */ iowrite32(data, adapter->hw_addr + MDIO_COMMAND_REG); do { timeoutCounter++; udelay(0x32); /* 50 microsecond delay */ data = ioread32(adapter->hw_addr + MDIO_COMMAND_REG); complete = (data & MDIO_COMMAND_GO_MASK) >> MDIO_COMMAND_GO_OFFSET; } while(complete && timeoutCounter < timeoutCounterMax); if(timeoutCounter == timeoutCounterMax && !complete) { GCU_ERR("Reached maximum number of retries" " accessing MDIO_COMMAND_REG\n"); return 0; } /* we retrieve the data from the MDIO_STATUS_REGISTER */ data = ioread32(adapter->hw_addr + MDIO_STATUS_REG); if((data & MDIO_STATUS_STATUS_MASK) != 0) { GCU_ERR("Unable to retrieve data from MDIO_STATUS_REG\n"); return 0; } return written_data == (uint16_t) (data & MDIO_STATUS_READ_DATA_MASK); }
/** * gcu_read_eth_phy * @phy_num: phy we want to write to, either 0, 1, or 2 * @reg_addr: address in PHY's register space to write to * @phy_data: data to be written * * interface function for other modules to access the GCU **/ int32_t gcu_read_eth_phy(uint32_t phy_num, uint32_t reg_addr, uint16_t *phy_data) { const struct gcu_adapter *adapter; uint32_t data = 0; uint32_t timeoutCounter = 0; const uint32_t timeoutCounterMax = GCU_MAX_ATTEMPTS; uint32_t complete = 0; #if defined(CONFIG_UTM2000) || defined(CONFIG_UTM3000) static int once = 0; if (once++ == 0) { vt_enable_port(1); mvl_enable_ports(2); mvl_enable_ports(4); //mii_dump(); } #endif /* CONFIG_UTM2000 || CONFIG_UTM3000 */ GCU_DBG("%s\n", __func__); #if defined(CONFIG_UTM2000) || defined(CONFIG_UTM3000) /* * For the Marvell 88E6161, we use an indirect addressing mode * to get at all the registers. We map PHY addresses over 32 to * these devices. ie. Device has phy address 2: to access, use a * virtual phy address range of 64 to 96, which will be mapped into * the switch. */ if (phy_num >= MVL_PHYS_PHY_MAX) { int32_t mphy, phyid, result; adapter = gcu_get_adapter(); if(!adapter) { GCU_ERR("gcu_adapter not available, cannot access MMIO\n"); return -1; } mphy = phy_num / MVL_PHYS_PHY_MAX; phyid = phy_num % MVL_PHYS_PHY_MAX; if (phyid < 6) result = mvl_mii_phy_read(adapter, mphy, phyid, reg_addr, phy_data); else result = mvl_mii_read(adapter, mphy, phyid, reg_addr, phy_data); gcu_release_adapter(&adapter); if (result == -1) { GCU_ERR("Error reading from Marvell phy register\n"); return result; } return 0; } #endif /* CONFIG_UTM2000 || CONFIG_UTM3000 */ if(phy_num > MDIO_COMMAND_PHY_ADDR_MAX) { GCU_ERR("phy_num = %d, which is greater than " "MDIO_COMMAND_PHY_ADDR_MAX\n", phy_num); return -1; } if(reg_addr > MDIO_COMMAND_PHY_REG_MAX) { GCU_ERR("reg_addr = %d, which is greater than " "MDIO_COMMAND_PHY_REG_MAX\n", phy_num); return -1; } /* format the data to be written to MDIO_COMMAND_REG */ data |= (reg_addr << MDIO_COMMAND_PHY_REG_OFFSET); data |= (phy_num << MDIO_COMMAND_PHY_ADDR_OFFSET); data |= MDIO_COMMAND_GO_MASK; /* * this call contains a spinlock, so this may pause for a bit */ adapter = gcu_get_adapter(); if(!adapter) { GCU_ERR("gcu_adapter not available, cannot access MMIO\n"); return -1; } /* * We write to MDIO_COMMAND_REG initially, then read that * same register until its MDIO_GO bit is cleared. When cleared, * the transaction is complete */ iowrite32(data, adapter->hw_addr + MDIO_COMMAND_REG); do { timeoutCounter++; udelay(0x32); /* 50 microsecond delay */ data = ioread32(adapter->hw_addr + MDIO_COMMAND_REG); complete = (data & MDIO_COMMAND_GO_MASK) >> MDIO_COMMAND_GO_OFFSET; } while(complete && timeoutCounter < timeoutCounterMax); /* KAD !complete to complete */ if(timeoutCounter == timeoutCounterMax && !complete) { GCU_ERR("Reached maximum number of retries" " accessing MDIO_COMMAND_REG\n"); gcu_release_adapter(&adapter); return -1; } /* we retrieve the data from the MDIO_STATUS_REGISTER */ data = ioread32(adapter->hw_addr + MDIO_STATUS_REG); if((data & MDIO_STATUS_STATUS_MASK) != 0) { GCU_ERR("Unable to retrieve data from MDIO_STATUS_REG\n"); gcu_release_adapter(&adapter); return -1; } *phy_data = (uint16_t) (data & MDIO_STATUS_READ_DATA_MASK); gcu_release_adapter(&adapter); return 0; }
/** * gcu_write_eth_phy * @phy_num: phy we want to write to, either 0, 1, or 2 * @reg_addr: address in PHY's register space to write to * @phy_data: data to be written * * interface function for other modules to access the GCU **/ int32_t gcu_write_eth_phy(uint32_t phy_num, uint32_t reg_addr, uint16_t phy_data) { const struct gcu_adapter *adapter; uint32_t data = 0; uint32_t timeoutCounter = 0; const uint32_t timeoutCounterMax = GCU_MAX_ATTEMPTS; uint32_t complete; GCU_DBG("%s\n", __func__); #if defined(CONFIG_UTM2000) || defined(CONFIG_UTM3000) /* * For the Marvell 88E6161, we use an indirect addressing mode * to get at all the registers. We map PHY addresses over 32 to * these devices. ie. Device has phy address 2: to access, use a * virtual phy address range of 64 to 96, which will be mapped into * the switch. */ if (phy_num >= MVL_PHYS_PHY_MAX) { int32_t mphy, phyid, result; adapter = gcu_get_adapter(); if(!adapter) { GCU_ERR("gcu_adapter not available, cannot access MMIO\n"); return -1; } mphy = phy_num / MVL_PHYS_PHY_MAX; phyid = phy_num % MVL_PHYS_PHY_MAX; if (phyid < 6) result = mvl_mii_phy_write(adapter, mphy, phyid, reg_addr, phy_data); else result = mvl_mii_write(adapter, mphy, phyid, reg_addr, phy_data); gcu_release_adapter(&adapter); if (result == -1) { GCU_ERR("Error writing to Marvell phy register\n"); return result; } return 0; } #endif /* CONFIG_UTM2000 || CONFIG_UTM3000 */ if(phy_num > MDIO_COMMAND_PHY_ADDR_MAX) { GCU_ERR("phy_num = %d, which is greater than " "MDIO_COMMAND_PHY_ADDR_MAX\n", phy_num); return -1; } if(reg_addr > MDIO_COMMAND_PHY_REG_MAX) { GCU_ERR("reg_addr = %d, which is greater than " "MDIO_COMMAND_PHY_REG_MAX\n", phy_num); return -1; } /* format the data to be written to the MDIO_COMMAND_REG */ data = phy_data; data |= (reg_addr << MDIO_COMMAND_PHY_REG_OFFSET); data |= (phy_num << MDIO_COMMAND_PHY_ADDR_OFFSET); data |= MDIO_COMMAND_OPER_MASK | MDIO_COMMAND_GO_MASK; /* * get_gcu_adapter contains a spinlock, this may pause for a bit */ adapter = gcu_get_adapter(); if(!adapter) { GCU_ERR("gcu_adapter not available, cannot access MMIO\n"); return -1; } /* * We write to MDIO_COMMAND_REG initially, then read that * same register until its MDIO_GO bit is cleared. When cleared, * the transaction is complete */ iowrite32(data, adapter->hw_addr + MDIO_COMMAND_REG); do { timeoutCounter++; udelay(0x32); /* 50 microsecond delay */ data = ioread32(adapter->hw_addr + MDIO_COMMAND_REG); complete = (data & MDIO_COMMAND_GO_MASK) >> MDIO_COMMAND_GO_OFFSET; } while(complete && timeoutCounter < timeoutCounterMax); /* KAD !complete to complete */ if(timeoutCounter == timeoutCounterMax && !complete) { GCU_ERR("Reached maximum number of retries" " accessing MDIO_COMMAND_REG\n"); gcu_release_adapter(&adapter); return -1; } /* validate the write during debug */ #ifdef DBG if(!gcu_write_verify(phy_num, reg_addr, phy_data, adapter)) { GCU_ERR("Write verification failed for PHY=%d and addr=%d\n", phy_num, reg_addr); gcu_release_adapter(&adapter); return -1; } #endif gcu_release_adapter(&adapter); return 0; }