static int cvm_oct_remove(struct platform_device *pdev) { int port; /* Disable POW interrupt */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0); else cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); cvmx_ipd_disable(); /* Free the interrupt handler */ free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); atomic_inc_return(&cvm_oct_poll_queue_stopping); cancel_delayed_work_sync(&cvm_oct_rx_refill_work); cvm_oct_rx_shutdown(); cvm_oct_tx_shutdown(); cvmx_pko_disable(); /* Free the ethernet devices */ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; struct octeon_ethernet *priv = netdev_priv(dev); cancel_delayed_work_sync(&priv->port_periodic_work); cvm_oct_tx_shutdown_dev(dev); unregister_netdev(dev); free_netdev(dev); cvm_oct_device[port] = NULL; } } cvmx_pko_shutdown(); cvmx_ipd_free_ptr(); /* Free the HW pools */ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); return 0; }
/** * Shutdown a Memory pool and validate that it had all of * the buffers originally placed in it. * * @pool: Pool to shutdown * Returns Zero on success * - Positive is count of missing buffers * - Negative is too many buffers or corrupted pointers */ uint64_t cvmx_fpa_shutdown_pool(uint64_t pool) { uint64_t errors = 0; uint64_t count = 0; uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base); uint64_t finish = base + cvmx_fpa_pool_info[pool].size * cvmx_fpa_pool_info[pool].starting_element_count; void *ptr; uint64_t address; count = 0; do { ptr = cvmx_fpa_alloc(pool); if (ptr) address = cvmx_ptr_to_phys(ptr); else address = 0; if (address) { if ((address >= base) && (address < finish) && (((address - base) % cvmx_fpa_pool_info[pool].size) == 0)) { count++; } else { cvmx_dprintf ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n", (unsigned long long)address, cvmx_fpa_pool_info[pool].name, (int)pool); errors++; } } } while (address); #ifdef CVMX_ENABLE_PKO_FUNCTIONS if (pool == 0) cvmx_ipd_free_ptr(); #endif if (errors) { cvmx_dprintf ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n", cvmx_fpa_pool_info[pool].name, (int)pool, (unsigned long long)base, (unsigned long long)finish, (unsigned long long)cvmx_fpa_pool_info[pool].size); return -errors; } else return 0; }
static int cvm_oct_remove(struct platform_device *pdev) { int port; cvmx_ipd_disable(); atomic_inc_return(&cvm_oct_poll_queue_stopping); cancel_delayed_work_sync(&cvm_oct_rx_refill_work); cvm_oct_rx_shutdown(); cvm_oct_tx_shutdown(); cvmx_pko_disable(); /* Free the ethernet devices */ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; struct octeon_ethernet *priv = netdev_priv(dev); cancel_delayed_work_sync(&priv->port_periodic_work); cvm_oct_tx_shutdown_dev(dev); unregister_netdev(dev); free_netdev(dev); cvm_oct_device[port] = NULL; } } cvmx_pko_shutdown(); cvmx_ipd_free_ptr(); /* Free the HW pools */ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); return 0; }