/** * Shutdown and free resources required by packet output. */ void cvmx_pko_shutdown(void) { union cvmx_pko_mem_queue_ptrs config; int queue; cvmx_pko_disable(); for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { config.u64 = 0; config.s.tail = 1; config.s.index = 0; config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID; config.s.queue = queue & 0x7f; config.s.qos_mask = 0; config.s.buf_ptr = 0; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) { union cvmx_pko_reg_queue_ptrs1 config1; config1.u64 = 0; config1.s.qid7 = queue >> 7; cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); } cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue)); } __cvmx_pko_reset(); }
static int cvm_oct_remove(struct platform_device *pdev) { int port; /* Disable POW interrupt */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0); else cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); cvmx_ipd_disable(); /* Free the interrupt handler */ free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); atomic_inc_return(&cvm_oct_poll_queue_stopping); cancel_delayed_work_sync(&cvm_oct_rx_refill_work); cvm_oct_rx_shutdown(); cvm_oct_tx_shutdown(); cvmx_pko_disable(); /* Free the ethernet devices */ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; struct octeon_ethernet *priv = netdev_priv(dev); cancel_delayed_work_sync(&priv->port_periodic_work); cvm_oct_tx_shutdown_dev(dev); unregister_netdev(dev); free_netdev(dev); cvm_oct_device[port] = NULL; } } cvmx_pko_shutdown(); cvmx_ipd_free_ptr(); /* Free the HW pools */ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); return 0; }
static int cvm_oct_remove(struct platform_device *pdev) { int port; cvmx_ipd_disable(); atomic_inc_return(&cvm_oct_poll_queue_stopping); cancel_delayed_work_sync(&cvm_oct_rx_refill_work); cvm_oct_rx_shutdown(); cvm_oct_tx_shutdown(); cvmx_pko_disable(); /* Free the ethernet devices */ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; struct octeon_ethernet *priv = netdev_priv(dev); cancel_delayed_work_sync(&priv->port_periodic_work); cvm_oct_tx_shutdown_dev(dev); unregister_netdev(dev); free_netdev(dev); cvm_oct_device[port] = NULL; } } cvmx_pko_shutdown(); cvmx_ipd_free_ptr(); /* Free the HW pools */ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); return 0; }