int octeon_phy_mem_named_block_free(cvmx_bootmem_desc_t *bootmem_desc_ptr, char *name)
{
    cvmx_bootmem_named_block_desc_t *named_block_ptr;


    if (bootmem_desc_ptr->major_version != 3)
    {
	printf("ERROR: Incompatible bootmem descriptor version: %d.%d\n",
	       bootmem_desc_ptr->major_version, bootmem_desc_ptr->minor_version);
	return(0);
    }
    /* Take lock here, as name lookup/block free/name free need to be atomic */
    octeon_lock(CAST64(&(bootmem_desc_ptr->lock)));
    named_block_ptr = octeon_phy_mem_named_block_find_internal(bootmem_desc_ptr, name);
    if (named_block_ptr)
    {
	octeon_phy_mem_block_free(bootmem_desc_ptr, named_block_ptr->base_addr, named_block_ptr->size);
	named_block_ptr->size = 0;
	/* Set size to zero to indicate block not used. */


    }
    octeon_unlock(CAST64(&(bootmem_desc_ptr->lock)));
    return(!!named_block_ptr);  /* 0 on failure, 1 on success */

}
uint64_t octeon_phy_mem_named_block_alloc(cvmx_bootmem_desc_t *bootmem_desc_ptr, uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name)
{
    uint64_t addr_allocated;
    cvmx_bootmem_named_block_desc_t *named_block_desc_ptr;

    if (bootmem_desc_ptr->major_version != 3)
    {
	printf("ERROR: Incompatible bootmem descriptor version: %d.%d\n",
	       bootmem_desc_ptr->major_version, bootmem_desc_ptr->minor_version);
	return(0);
    }


    /* Take lock here, as name lookup/block alloc/name add need to be atomic */
    octeon_lock(CAST64(&(bootmem_desc_ptr->lock)));


    /* Get pointer to first available named block descriptor */
    named_block_desc_ptr = octeon_phy_mem_named_block_find_internal(bootmem_desc_ptr, NULL);

    /* Check to see if name already in use, return error if name
    ** not available or no more room for blocks.
    */
    if (octeon_phy_mem_named_block_find_internal(bootmem_desc_ptr, name) || !named_block_desc_ptr)
    {
	octeon_unlock(CAST64(&(bootmem_desc_ptr->lock)));
	return(0);
    }


    /* Round size up to mult of minimum alignment bytes
    ** We need the actual size allocated to allow for blocks to be coallesced
    ** when they are freed.  The alloc routine does the same rounding up
    ** on all allocations. */
    size = (size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);

    addr_allocated = octeon_phy_mem_block_alloc(bootmem_desc_ptr, size, min_addr, max_addr, alignment);
    if (addr_allocated)
    {
	named_block_desc_ptr->base_addr = addr_allocated;
	named_block_desc_ptr->size = size;
	strncpy(named_block_desc_ptr->name, name, bootmem_desc_ptr->named_block_name_len);
	named_block_desc_ptr->name[bootmem_desc_ptr->named_block_name_len - 1] = 0;
    }
    else
    {
	printf("octeon_phy_mem_named_block_alloc: alloc failed!\n");
    }


    octeon_unlock(CAST64(&(bootmem_desc_ptr->lock)));
    return(addr_allocated);

}
void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr)
{
    uint64_t address;
    octeon_lock(CAST64(&(cvmx_bootmem_desc->lock)));
    address = octeon_phy_mem_block_alloc(cvmx_bootmem_desc, size, min_addr, max_addr, alignment);
    octeon_unlock(CAST64(&(cvmx_bootmem_desc->lock)));

    if (address)
        return cvmx_phys_to_ptr(address);
    else
        return NULL;
}
/**
 * Finds a named memory block by name.
 *
 * @param bootmem_desc_ptr
 *               Pointer to bootmem memory descriptor block (cvmx_bootmem_desc_t).
 * @param name   Name of memory block to find.
 *               If NULL pointer given, then finds unused descriptor, if available.
 *
 * @return Pointer to memory block descriptor, NULL if not found.
 *         If NULL returned when name parameter is NULL, then no memory
 *         block descriptors are available.
 */
cvmx_bootmem_named_block_desc_t * octeon_phy_mem_named_block_find(cvmx_bootmem_desc_t *bootmem_desc_ptr, char *name)
{
    cvmx_bootmem_named_block_desc_t *block_ptr;

    /* Lock the structure to make sure that it is not being changed while we are
    ** examining it.
    */
    octeon_lock(CAST64(&(bootmem_desc_ptr->lock)));
    block_ptr = octeon_phy_mem_named_block_find_internal(bootmem_desc_ptr, name);
    octeon_unlock(CAST64(&(bootmem_desc_ptr->lock)));
    return(block_ptr);

}
Ejemplo n.º 5
0
/**
 * Function to display a POW internal queue to the user
 *
 * @param name       User visible name for the queue
 * @param name_param Parameter for printf in creating the name
 * @param valid      Set if the queue contains any elements
 * @param has_one    Set if the queue contains exactly one element
 * @param head       The head pointer
 * @param tail       The tail pointer
 */
static void __cvmx_pow_display_list(const char *name, int name_param, int valid, int has_one, uint64_t head, uint64_t tail)
{
    printf(name, name_param);
    printf(": ");
    if (valid)
    {
        if (has_one)
            printf("One element index=%llu(0x%llx)\n", CAST64(head), CAST64(head));
        else
            printf("Multiple elements head=%llu(0x%llx) tail=%llu(0x%llx)\n", CAST64(head), CAST64(head), CAST64(tail), CAST64(tail));
    }
    else
        printf("Empty\n");
}
/**
 * Setup access to the CONFIG_CAVIUM_RESERVE32 memory section
 * created by the kernel. This memory is used for shared
 * hardware buffers with 32 bit userspace applications.
 */
static void setup_reserve32(void)
{
    if (linux_mem32_min && linux_mem32_max)
    {
        int fd = open("/dev/mem", O_RDWR);
        if (fd < 0)
        {
            perror("ERROR opening /dev/mem");
            exit(-1);
        }

        void *linux_mem32_base_ptr = mmap(NULL,
                                          linux_mem32_max - linux_mem32_min,
                                          PROT_READ | PROT_WRITE,
                                          MAP_SHARED,
                                          fd,
                                          linux_mem32_min);

        close(fd);

        if (MAP_FAILED == linux_mem32_base_ptr)
        {
            perror("Error mapping reserve32");
            exit(-1);
        }

        linux_mem32_offset = CAST64(linux_mem32_base_ptr) - linux_mem32_min;
    }
}
Ejemplo n.º 7
0
int cvmx_bootmem_init(void *mem_desc_ptr)
{
	/* Here we set the global pointer to the bootmem descriptor
	 * block.  This pointer will be used directly, so we will set
	 * it up to be directly usable by the application.  It is set
	 * up as follows for the various runtime/ABI combinations:
	 *
	 * Linux 64 bit: Set XKPHYS bit
	 * Linux 32 bit: use mmap to create mapping, use virtual address
	 * CVMX 64 bit:  use physical address directly
	 * CVMX 32 bit:  use physical address directly
	 *
	 * Note that the CVMX environment assumes the use of 1-1 TLB
	 * mappings so that the physical addresses can be used
	 * directly
	 */
	if (!cvmx_bootmem_desc) {
#if   defined(CVMX_ABI_64)
		/* Set XKPHYS bit */
		cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
#else
		cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
#endif
	}

	return 0;
}
Ejemplo n.º 8
0
/**
 * This routine deprecates the the cvmx_app_hotplug_register method. This
 * registers application for hotplug and the application will have CPU
 * hotplug callbacks. Various callbacks are specified in cb.
 * cvmx_app_hotplug_callbacks_t documents the callbacks
 *
 * This routine only needs to be called once per application.
 *
 * @param cb      Callback routine from the application.
 * @param arg     Argument to the application callback routins
 * @param app_shutdown   When set to 1 the application will invoke core_shutdown
                         on each core. When set to 0 core shutdown will be
                         called invoked automatically after invoking the
                         application callback.
 * @return        Return index of app on success, -1 on failure
 *
 */
int cvmx_app_hotplug_register_cb(cvmx_app_hotplug_callbacks_t *cb, void* arg,
                                 int app_shutdown)
{
    cvmx_app_hotplug_info_t *app_info;

    /* Find the list of applications launched by bootoct utility. */
    app_info = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask);
    cvmx_app_hotplug_info_ptr = app_info;
    if (!app_info)
    {
        /* Application not launched by bootoct? */
        printf("ERROR: cmvx_app_hotplug_register() failed\n");
        return -1;
    }
    /* Register the callback */
    app_info->data = CAST64(arg);
    app_info->shutdown_callback  = CAST64(cb->shutdown_callback);
    app_info->cores_added_callback = CAST64(cb->cores_added_callback);
    app_info->cores_removed_callback = CAST64(cb->cores_removed_callback);
    app_info->unplug_callback = CAST64(cb->unplug_core_callback);
    app_info->hotplug_start = CAST64(cb->hotplug_start);
    app_info->app_shutdown = app_shutdown;
#ifdef DEBUG
    printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n",
           app_info->coremask, app_info->valid);
#endif

    cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);
    return 0;

}
int cvmx_bootmem_init(void *mem_desc_ptr)
{
    if (!cvmx_bootmem_desc) {
#if   defined(CVMX_ABI_64)

        cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
#else
        cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
#endif
    }

    return 0;
}
Ejemplo n.º 10
0
/**
 * @INTERNAL
 * Virtual sbrk, assigning virtual address in a global virtual address space.
 *
 * @param alignment   alignment requirement in bytes
 * @param size        size in bytes
 */
static inline void *__cvmx_shmem_vsbrk_64(uint64_t alignment, uint64_t size)
{
    uint64_t nbase_64 = CAST64(__smdr->break64);
    void *nbase = NULL;

    /* Skip unaligned bytes */
    if (nbase_64 & alignment)
        nbase_64 += ~(nbase_64 & alignment) + 1;

    if (nbase_64 + size  <  CVMX_SHMEM_VADDR64_END)
    {
        nbase = CASTPTR(void *, nbase_64);
        __smdr->break64 = nbase + size;
    }
Ejemplo n.º 11
0
/**
 * This routine registers an application for hotplug. It installs a handler for
 * any incoming shutdown request. It also registers a callback routine from the
 * application. This callback is invoked when the application receives a 
 * shutdown notification. 
 *
 * This routine only needs to be called once per application. 
 *
 * @param fn      Callback routine from the application. 
 * @param arg     Argument to the application callback routine. 
 * @return        Return 0 on success, -1 on failure
 *
 */
int cvmx_app_hotplug_register(void(*fn)(void*), void* arg)
{
    /* Find the list of applications launched by bootoct utility. */

    if (!(cvmx_app_hotplug_info_ptr = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask)))
    {
        /* Application not launched by bootoct? */
        printf("ERROR: cmvx_app_hotplug_register() failed\n");
        return -1;
    }

    /* Register the callback */
    cvmx_app_hotplug_info_ptr->data = CAST64(arg);
    cvmx_app_hotplug_info_ptr->shutdown_callback = CAST64(fn);

#ifdef DEBUG
    printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n", 
                  cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid);
#endif

    cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);

    return 0;
}
Ejemplo n.º 12
0
/**
 * @INTERNAL
 * Probe a RGMII interface and determine the number of ports
 * connected to it. The RGMII interface should still be down
 * after this call.
 *
 * @param interface Interface to probe
 *
 * @return Number of ports on the interface. Zero to disable.
 */
int __cvmx_helper_agl_probe(int interface)
{
	int port = cvmx_helper_agl_get_port(interface);
	union cvmx_agl_gmx_bist gmx_bist;
	union cvmx_agl_gmx_prtx_cfg gmx_prtx_cfg;
	union cvmx_agl_prtx_ctl agl_prtx_ctl;
	uint64_t clock_scale;
	int result;

	result = __cvmx_helper_agl_enumerate(interface);
	if (result == 0)
		return 0;

	/* Check BIST status */
	gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
	if (gmx_bist.u64)
		cvmx_warn("Managment port AGL failed BIST (0x%016llx) on AGL%d\n",
			  CAST64(gmx_bist.u64), port);

	/* Disable the external input/output */
	gmx_prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
        gmx_prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), gmx_prtx_cfg.u64);

	/* Set the rgx_ref_clk MUX with AGL_PRTx_CTL[REFCLK_SEL]. Default value
	   is 0 (RGMII REFCLK). Recommended to use RGMII RXC(1) or sclk/4 (2)
	   to save cost.
	 */

	/* MII clocks counts are based on the 125Mhz reference, so our
	 * delays need to be scaled to match the core clock rate. The
	 * "+1" is to make sure rounding always waits a little too
	 * long. FIXME.
	 */
	clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;

	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.clkrst = 0;
	agl_prtx_ctl.s.dllrst = 0;
	agl_prtx_ctl.s.clktx_byp = 0;


	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
		agl_prtx_ctl.s.refclk_sel = 0;
		agl_prtx_ctl.s.clkrx_set =
			cvmx_helper_get_agl_rx_clock_skew(interface, port);
		agl_prtx_ctl.s.clkrx_byp =
			cvmx_helper_get_agl_rx_clock_delay_bypass(interface,
								  port);
	}
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	/*
	 * Wait for the DLL to lock. External 125 MHz reference clock must be
	 * stable at this point.
	 */
	cvmx_wait(256 * clock_scale);

	/* Enable the componsation controller */
	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.drv_byp = 0;
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));

	if (!OCTEON_IS_OCTEON3()) {
		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	}

	/* Enable the compensation controller */
	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.comp = 1;
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));

	/* for componsation state to lock. */
	cvmx_wait(1024 * clock_scale);

	return result;
}
int cvmx_bootmem_init(void *mem_desc_ptr)
{
    /* Here we set the global pointer to the bootmem descriptor block.  This pointer will
    ** be used directly, so we will set it up to be directly usable by the application.
    ** It is set up as follows for the various runtime/ABI combinations:
    ** Linux 64 bit: Set XKPHYS bit
    ** Linux 32 bit: use mmap to create mapping, use virtual address
    ** CVMX 64 bit:  use physical address directly
    ** CVMX 32 bit:  use physical address directly
    ** Note that the CVMX environment assumes the use of 1-1 TLB mappings so that the physical addresses
    ** can be used directly
    */
    if (!cvmx_bootmem_desc)
    {
#if defined(__linux__) && defined(CVMX_ABI_N32)
        /* For 32 bit, we need to use mmap to create a mapping for the bootmem descriptor */
        int dm_fd = open("/dev/mem", O_RDWR);
        if (dm_fd < 0)
        {
            cvmx_dprintf("ERROR opening /dev/mem for boot descriptor mapping\n");
            return(-1);
        }

        void *base_ptr = mmap(NULL, 
                              sizeof(cvmx_bootmem_desc_t) + sysconf(_SC_PAGESIZE), 
                              PROT_READ | PROT_WRITE,
                              MAP_SHARED, 
                              dm_fd, 
                              ((off_t)mem_desc_ptr) & ~(sysconf(_SC_PAGESIZE) - 1));

        if (MAP_FAILED == base_ptr)
        {
            cvmx_dprintf("Error mapping bootmem descriptor!\n");
            close(dm_fd);
            return(-1);
        }

        /* Adjust pointer to point to bootmem_descriptor, rather than start of page it is in */
        cvmx_bootmem_desc =  base_ptr + (((off_t)mem_desc_ptr) & (sysconf(_SC_PAGESIZE) - 1));

        /* Also setup mapping for named memory block desc. while we are at it.  Here we must keep another
        ** pointer around, as the value in the bootmem descriptor is shared with other applications. */
        base_ptr = mmap(NULL, 
                              sizeof(cvmx_bootmem_named_block_desc_t) * cvmx_bootmem_desc->named_block_num_blocks + sysconf(_SC_PAGESIZE), 
                              PROT_READ | PROT_WRITE,
                              MAP_SHARED, 
                              dm_fd, 
                              ((off_t)cvmx_bootmem_desc->named_block_array_addr) & ~(sysconf(_SC_PAGESIZE) - 1));


        close(dm_fd);

        if (MAP_FAILED == base_ptr)
        {
            cvmx_dprintf("Error mapping named block descriptor!\n");
            return(-1);
        }

        /* Adjust pointer to point to named block array, rather than start of page it is in */
        linux32_named_block_array_ptr =  base_ptr + (((off_t)cvmx_bootmem_desc->named_block_array_addr) & (sysconf(_SC_PAGESIZE) - 1));


#elif defined(__linux__) && defined(CVMX_ABI_64)
        /* Set XKPHYS bit */
        cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));

#else
        /* Not linux, just copy pointer */
        cvmx_bootmem_desc = mem_desc_ptr;
#endif
    }


    return(0);
}
Ejemplo n.º 14
0
/**
 * Process incoming packets. 
 */
int inic_data_loop(void)
{
		cvm_common_wqe_t *swp = NULL;
		cvm_tcp_in_endpoints_t conn;
		cvm_tcp_tcphdr_t *th = NULL;
		cvm_ip_ip_t *ih = NULL;
		cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
		uint64_t cpu_clock_hz = sys_info_ptr->cpu_clock_hz;
		uint64_t tick_cycle = cvmx_get_cycle();
		uint64_t tick_step;
		uint32_t idle_processing_interval_ticks = (CVM_COMMON_IDLE_PROCESSING_INTERVAL)*(1000*1000)/(CVM_COMMON_TICK_LEN_US);
		uint32_t idle_processing_last_ticks = 0;
#ifdef INET6
		struct cvm_ip6_ip6_hdr *ip6 = NULL;
#ifdef CVM_ENET_TUNNEL
		struct cvm_ip6_ip6_hdr *i6h = NULL;
#endif
#endif


#ifdef CVM_CLI_APP
		uint64_t idle_cycle_start_value;
#endif

		/* for the simulator */
		if (cpu_clock_hz == 0)
		{
				cpu_clock_hz = 333000000;
		}

		tick_step = (CVM_COMMON_TICK_LEN_US * cpu_clock_hz) / 1000000;
		cvm_debug_print_interval = cpu_clock_hz;

#ifndef REAL_HW
		/* for the simulator, set the debug interval to be 3M cycles */
		cvm_debug_print_interval = 3000000;
#endif

#ifdef DUTY_CYCLE
		start_cycle = cvmx_get_cycle();
		process_count = 0;
#endif

		if (cvmx_coremask_first_core(coremask_data)) 
		{
				/* Initiate a timer transaction for arp entry timeouts */
				//if(cvm_enet_arp_timeout_init() != CVMX_TIM_STATUS_SUCCESS)
				//{
				//		printf("Failed init of cvm_ip_arp_timeout_init\n");
				//}
		}

#if defined(CVM_COMBINED_APP_STACK)
		/* Flush the packets sent by main_global and main_local */
		/*
		printf("before cvm_send_packet () \n ");
		if (out_swp)
		{
				cvm_send_packet ();
		}
		printf("after cvm_send_packet () \n ");
		*/
		uint64_t app_timeout = cvmx_get_cycle ();
#endif




		/* start the main loop */
		while (1)
		{


#ifdef DUTY_CYCLE
				end_cycle = cvmx_get_cycle();

				/* check the wrap around case */
				if (end_cycle < start_cycle) end_cycle += cpu_clock_hz;

				if ((end_cycle - start_cycle) > cvm_debug_print_interval)
				{
						inic_do_per_second_duty_cycle_processing();
				}
#endif /* DUTY_CYCLE */

				cvmx_pow_work_request_async_nocheck(CVMX_SCR_WORK, 1);

				/* update the ticks variable */
				while (cvmx_get_cycle() - tick_cycle > tick_step)
				{
						tick_cycle += tick_step;
						cvm_tcp_ticks++;
						if (!(cvm_tcp_ticks & 0x1f)) CVM_COMMON_HISTORY_SET_CYCLE();
				}


				/* do common idle processing */
				if ( (cvm_tcp_ticks - idle_processing_last_ticks) > idle_processing_interval_ticks)
				{
						if (cvmx_coremask_first_core(coremask_data)) 
						{
								cvm_common_do_idle_processing();
						}

						idle_processing_last_ticks = cvm_tcp_ticks;
				}


#ifdef CVM_CLI_APP
				idle_cycle_start_value = cvmx_get_cycle();
#endif

				/* get work entry */
				swp = (cvm_common_wqe_t *)cvmx_pow_work_response_async(CVMX_SCR_WORK);
				if (swp == NULL)
				{
						idle_counter++;

						if(core_id == highest_core_id)
						{
								cvm_enet_check_link_status();
						}

#ifdef CVM_CLI_APP
						cvmx_fau_atomic_add64(core_idle_cycles[core_id], (cvmx_get_cycle()-idle_cycle_start_value) );
#endif
						continue;
				}

				CVM_COMMON_EXTRA_STATS_ADD64 (CVM_FAU_REG_WQE_RCVD, 1);

#ifdef WORK_QUEUE_ENTRY_SIZE_128 // {
				CVMX_PREFETCH0(swp);
#else
				/* Prefetch work-queue entry */
				CVMX_PREFETCH0(swp);
				CVMX_PREFETCH128(swp);
#endif // WORK_QUEUE_ENTRY_SIZE_128 }

				out_swp = 0;
				out_swp_tail = 0;


#ifdef DUTY_CYCLE
				/* we are about to start processing the packet - remember the cycle count */
				process_start_cycle = cvmx_get_cycle();
#endif


				/* Short cut the common case */
				if (cvmx_likely(swp->hw_wqe.unused == 0))
				{
						goto packet_from_the_wire;
				}
				printf("Get work with unused is %X\n", swp->hw_wqe.unused);

				{
						{

packet_from_the_wire:

#if CVM_PKO_DONTFREE
								swp->hw_wqe.packet_ptr.s.i = 0;
#endif

#ifdef SANITY_CHECKS
								/* we have a work queue entry - do input sanity checks */
								ret = cvm_common_input_sanity_and_buffer_count_update(swp);
#endif

								if (cvmx_unlikely(swp->hw_wqe.word2.s.rcv_error))
								{
										goto discard_swp; /* Receive error */
								}

#ifndef WORK_QUEUE_ENTRY_SIZE_128 // {
								{
										/* Make sure pre-fetch completed */
										uint64_t dp = *(volatile uint64_t*)&swp->next;
								}
#endif // WORK_QUEUE_ENTRY_SIZE_128 }

								{
										/* Initialize SW portion of the work-queue entry */
										uint64_t *dptr = (uint64_t*)(&swp->next);
										dptr[0] = 0;
										dptr[1] = 0;
										dptr[2] = 0;
										dptr[3] = 0;
								}

								if(cvmx_unlikely(swp->hw_wqe.word2.s.not_IP))
								{
										goto output;
								}

								/* Shortcut classification to avoid multiple lookups */
								if(
#ifndef INET6
												swp->hw_wqe.word2.s.is_v6 || 
#endif
												swp->hw_wqe.word2.s.is_bcast 
#ifndef INET6
												|| swp->hw_wqe.word2.s.is_mcast
#endif
								  )
								{
										goto discard_swp; /* Receive error */
								}


								/* Packet is unicast IPv4, without L2 errors */
								/* (All IP exceptions are dropped.  This currently includes
								 *  IPv4 options and IPv6 extension headers.)
								 */
								if(cvmx_unlikely(swp->hw_wqe.word2.s.IP_exc))
								{
										goto discard_swp;
								}

								/* Packet is Ipv4 (and no IP exceptions) */
								if (cvmx_unlikely(swp->hw_wqe.word2.s.is_frag || !swp->hw_wqe.word2.s.tcp_or_udp))
								{
										goto output;
								}

#ifdef ANVL_RFC_793_COMPLIANCE
								/* RFC 793 says that:
								   - We should send a RST out when we get a packet with FIN set 
								   without the ACK bit set in the flags field. 
								   - We should send a RST out when we get a packet with no flag set.
								   Hence, let TCP stack handle these conditions.
								 */
								if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error &&
														(cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG8_ERR) &&
														(cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG9_ERR)))
#else
										if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error))
#endif
										{
												cvm_tcp_handle_error(swp);
												goto discard_swp;
										}

								/* Packet is not fragmented, TCP/UDP, no IP exceptions/L4 errors */
								/* We can try an L4 lookup now, but we need all the information */
								ih = ((cvm_ip_ip_t *)&(swp->hw_wqe.packet_data[CVM_COMMON_PD_ALIGN]));

								if (!swp->hw_wqe.word2.s.is_v6)
								{
										/* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */
										swp->l4_offset = ((uint16_t)(ih->ip_hl) << 2) + CVM_COMMON_PD_ALIGN;
										swp->l4_prot = ih->ip_p;
								}
#ifdef INET6
								else
								{
										ip6 = (struct cvm_ip6_ip6_hdr *) &swp->hw_wqe.packet_data[CVM_COMMON_IP6_PD_ALIGN];

										CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, 
														"%s: %d Packet trace Src: %s/%d Dest: %s/%d prot: %d len: %d\n", 
														__FUNCTION__, __LINE__, 
														cvm_ip6_ip6_sprintf (&ip6->ip6_dst), conn.ie_fport, 
														cvm_ip6_ip6_sprintf (&ip6->ip6_src), conn.ie_lport,
														swp->l4_prot, swp->hw_wqe.len);
										/* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */
										swp->l4_offset = CVM_IP6_IP6_HDRLEN;
										swp->l4_prot = ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt;

								}
#endif

								th = ((cvm_tcp_tcphdr_t *)&(swp->hw_wqe.packet_data[swp->l4_offset]));

								/* check if it is a TCP packet */
								if (swp->l4_prot == CVM_IP_IPPROTO_TCP)
								{
										process_handle(swp);
#ifdef INET6
										if (!swp->hw_wqe.word2.s.is_v6)
#endif
										{
												CVM_TCP_TCP_DUMP ((void*)ih);

												/* assume IPv4 for now */
												conn.ie_laddr = ih->ip_dst.s_addr;
												conn.ie_faddr = ih->ip_src.s_addr;
												conn.ie_lport = th->th_dport;
												conn.ie_fport = th->th_sport;

										}
#ifdef INET6
										else
										{
												/* assume IPv4 for now */
												memcpy (&conn.ie6_laddr, &ip6->ip6_dst, sizeof (struct cvm_ip6_in6_addr));
												memcpy (&conn.ie6_faddr, &ip6->ip6_src, sizeof (struct cvm_ip6_in6_addr));
												conn.ie_lport = th->th_dport;
												conn.ie_fport = th->th_sport;

												/* do a TCP lookup */
												swp->tcb = cvm_tcp6_lookup (swp);

												CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, "%s: %d TCPv6 lookup Src: %s/%d Dest: %s/%d ret_tcb: 0x%llx\n", 
																__FUNCTION__, __LINE__, 
																cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_faddr), conn.ie_fport, 
																cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_laddr), conn.ie_lport, 
																CAST64(swp->tcb));
										}
#endif // INET6
								}


								goto output;
						} /* packet from wire */
				} /* switch */


output:
				CVMX_SYNCWS;

				/* Send packet out */
				if (out_swp)
				{
						cvm_send_packet();
				}

				if(swp != NULL)
				{
						S3_send_packet((cvmx_wqe_t *)swp);
						swp = NULL;
				}
#ifdef DUTY_CYCLE
				process_end_cycle = cvmx_get_cycle();
				process_count += (process_end_cycle - process_start_cycle);
#endif
		}

		return (0);


discard_swp:
		/* Free the chained buffers */
		cvm_common_packet_free(swp);

		/* Free the work queue entry */
		cvm_common_free_fpa_buffer(swp, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE);
		swp = NULL;
		goto output;

} /* inic_data_loop */
Ejemplo n.º 15
0
void __cvmx_pow_display_v2(void *buffer, int buffer_size)
{
    __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
    int num_pow_entries = cvmx_pow_get_num_entries();
    int num_cores;
    int core;
    int index;
    uint8_t entry_list[2048];

    if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
    {
        cvmx_dprintf("cvmx_pow_dump: Buffer too small, pow_dump_t = 0x%x, buffer_size = 0x%x\n", (int)sizeof(__cvmx_pow_dump_t), buffer_size);
        return;
    }

    memset(entry_list, 0, sizeof(entry_list));
    num_cores = cvmx_octeon_num_cores();

    /* Print the free list info */
    {
        int valid[3], has_one[3], head[3], tail[3], qnum_head, qnum_tail;
        int idx;

        valid[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_val;
        valid[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_val;
        valid[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_val;
        has_one[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_one;
        has_one[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_one;
        has_one[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_one;
        head[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_head;
        head[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_head;
        head[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_head;
        tail[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_tail;
        tail[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_tail;
        tail[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_tail;
        qnum_head = dump->sindexload[0][4].sindexload1_cn68xx.qnum_head;
        qnum_tail = dump->sindexload[0][4].sindexload1_cn68xx.qnum_tail;

        printf("Free List: qnum_head=%d, qnum_tail=%d\n", qnum_head, qnum_tail);
        printf("Free0: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[0], has_one[0], CAST64(head[0]), CAST64(tail[0]));
        printf("Free1: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[1], has_one[1], CAST64(head[1]), CAST64(tail[1]));
        printf("Free2: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[2], has_one[2], CAST64(head[2]), CAST64(tail[2]));
        
        idx=qnum_head;
        while (valid[0] || valid[1] || valid[2])
        {
            int qidx = idx % 3;

            if (head[qidx] == tail[qidx])
                valid[qidx] = 0;

            if (__cvmx_pow_entry_mark_list(head[qidx], CVMX_POW_LIST_FREE, entry_list))   
                break;
            head[qidx] = dump->smemload[head[qidx]][4].s_smemload3_cn68xx.fwd_index;
            //printf("qidx = %d, idx = %d, head[qidx] = %d\n", qidx, idx, head[qidx]);
            idx++;
        }
    }
            
    /* Print the core state */
    for (core = 0; core < num_cores; core++)
    {
        int pendtag = 1;
        int pendwqp = 2;
        int tag = 3;
        int wqp = 4;
        int links = 5;

        printf("Core %d State: tag=%s,0x%08x", core, 
               OCT_TAG_TYPE_STRING(dump->sstatus[core][tag].s_sstatus2_cn68xx.tag_type),
               dump->sstatus[core][tag].s_sstatus2_cn68xx.tag);
        if (dump->sstatus[core][tag].s_sstatus2_cn68xx.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL)
        {
            __cvmx_pow_entry_mark_list(dump->sstatus[core][tag].s_sstatus2_cn68xx.index, CVMX_POW_LIST_CORE + core, entry_list);
            printf(" grp=%d",                   dump->sstatus[core][tag].s_sstatus2_cn68xx.grp);
            printf(" wqp=0x%016llx",            CAST64(dump->sstatus[core][wqp].s_sstatus3_cn68xx.wqp));
            printf(" index=%d",                 dump->sstatus[core][tag].s_sstatus2_cn68xx.index);
            if (dump->sstatus[core][links].s_sstatus4_cn68xx.head)
                printf(" head");
            else
                printf(" prev=%d", dump->sstatus[core][links].s_sstatus4_cn68xx.revlink_index);
            if (dump->sstatus[core][links].s_sstatus4_cn68xx.tail)
                printf(" tail");
            else
                printf(" next=%d", dump->sstatus[core][links].s_sstatus4_cn68xx.link_index);
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch)
        {
            printf(" pend_switch=%d",           dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch);
        }
                                                                                
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_desched)
        {
            printf(" pend_desched=%d",          dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_desched);
            printf(" pend_nosched=%d",          dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched);
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_get_work)
        {
            if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_get_work_wait)
                printf(" (Waiting for work)");
            else
                printf(" (Getting work)");
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_alloc_we)
            printf(" pend_alloc_we=%d",          dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_alloc_we);
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched_clr)
        {
            printf(" pend_nosched_clr=%d",      dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched_clr);
            printf(" pend_index=%d",            dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_index);
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch)
        {
            printf(" pending tag=%s,0x%08x",
                   OCT_TAG_TYPE_STRING(dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_type),
                   dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_tag);
        }
        if (dump->sstatus[core][pendwqp].s_sstatus1_cn68xx.pend_nosched_clr)
            printf(" pend_wqp=0x%016llx\n",     CAST64(dump->sstatus[core][pendwqp].s_sstatus1_cn68xx.pend_wqp));
        printf("\n");
    }

    /* Print out the state of the nosched list and the 16 deschedule lists. */
    __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED, dump, entry_list,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_val,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_one,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_head,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_tail);
    for (index=0; index<64; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_tail);
    }

    /* Print out the state of the 8 internal input queues */
    for (index=0; index<8; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_tail);
    }

    /* Print out the state of the 16 memory queues */
    for (index=0; index<8; index++)
    {
        const char *name;
        if (dump->sindexload[index][1].sindexload0_cn68xx.queue_head)
            name = "Queue %da Memory (is head)";
        else
            name = "Queue %da Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_tail);
        if (dump->sindexload[index+8][1].sindexload0_cn68xx.queue_head)
            name = "Queue %db Memory (is head)";
        else
            name = "Queue %db Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_tail);
    }

    /* Print out each of the internal POW entries. Each entry has a tag, group,
       wqe, and possibly a next pointer. The next pointer is only valid if this
       entry isn't make as a tail */
    for (index=0; index<num_pow_entries; index++)
    {
        printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index,
               __cvmx_pow_list_names[entry_list[index]],
               OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload0_cn68xx.tag_type),
               dump->smemload[index][1].s_smemload0_cn68xx.tag,
               dump->smemload[index][2].s_smemload1_cn68xx.grp,
               CAST64(dump->smemload[index][2].s_smemload1_cn68xx.wqp));
        if (dump->smemload[index][1].s_smemload0_cn68xx.tail)
            printf(" tail");
        else
            printf(" next=%d", dump->smemload[index][4].s_smemload3_cn68xx.fwd_index);
        if (entry_list[index] >= CVMX_POW_LIST_DESCHED)
        {
            printf(" prev=%d", dump->smemload[index][4].s_smemload3_cn68xx.fwd_index);
            printf(" nosched=%d", dump->smemload[index][1].s_smemload1_cn68xx.nosched);
            if (dump->smemload[index][3].s_smemload2_cn68xx.pend_switch)
            {
                printf(" pending tag=%s,0x%08x",
                       OCT_TAG_TYPE_STRING(dump->smemload[index][3].s_smemload2_cn68xx.pend_type),
                       dump->smemload[index][3].s_smemload2_cn68xx.pend_tag);
            }
        }
        printf("\n");
    }
}
Ejemplo n.º 16
0
void __cvmx_pow_display_v1(void *buffer, int buffer_size)
{
    __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
    int num_pow_entries = cvmx_pow_get_num_entries();
    int num_cores;
    int core;
    int index;
    uint8_t entry_list[2048];

    if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
    {
        cvmx_dprintf("cvmx_pow_dump: Buffer too small\n");
        return;
    }

    memset(entry_list, 0, sizeof(entry_list));
    num_cores = cvmx_octeon_num_cores();

    /* Print the free list info */
    __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_FREE, dump, entry_list,
                                     dump->sindexload[0][0].sindexload0.free_val,
                                     dump->sindexload[0][0].sindexload0.free_one,
                                     dump->sindexload[0][0].sindexload0.free_head,
                                     dump->sindexload[0][0].sindexload0.free_tail);

    /* Print the core state */
    for (core=0; core<num_cores; core++)
    {
        const int bit_rev = 1;
        const int bit_cur = 2;
        const int bit_wqp = 4;
        printf("Core %d State:  tag=%s,0x%08x", core,
               OCT_TAG_TYPE_STRING(dump->sstatus[core][bit_cur].s_sstatus2.tag_type),
               dump->sstatus[core][bit_cur].s_sstatus2.tag);
        if (dump->sstatus[core][bit_cur].s_sstatus2.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL)
        {
            __cvmx_pow_entry_mark_list(dump->sstatus[core][bit_cur].s_sstatus2.index, CVMX_POW_LIST_CORE + core, entry_list);
            printf(" grp=%d",                   dump->sstatus[core][bit_cur].s_sstatus2.grp);
            printf(" wqp=0x%016llx",            CAST64(dump->sstatus[core][bit_cur|bit_wqp].s_sstatus4.wqp));
            printf(" index=%d",                 dump->sstatus[core][bit_cur].s_sstatus2.index);
            if (dump->sstatus[core][bit_cur].s_sstatus2.head)
                printf(" head");
            else
                printf(" prev=%d", dump->sstatus[core][bit_cur|bit_rev].s_sstatus3.revlink_index);
            if (dump->sstatus[core][bit_cur].s_sstatus2.tail)
                printf(" tail");
            else
                printf(" next=%d", dump->sstatus[core][bit_cur].s_sstatus2.link_index);
        }

        if (dump->sstatus[core][0].s_sstatus0.pend_switch)
        {
            printf(" pend_switch=%d",           dump->sstatus[core][0].s_sstatus0.pend_switch);
            printf(" pend_switch_full=%d",      dump->sstatus[core][0].s_sstatus0.pend_switch_full);
            printf(" pend_switch_null=%d",      dump->sstatus[core][0].s_sstatus0.pend_switch_null);
        }

        if (dump->sstatus[core][0].s_sstatus0.pend_desched)
        {
            printf(" pend_desched=%d",          dump->sstatus[core][0].s_sstatus0.pend_desched);
            printf(" pend_desched_switch=%d",   dump->sstatus[core][0].s_sstatus0.pend_desched_switch);
            printf(" pend_nosched=%d",          dump->sstatus[core][0].s_sstatus0.pend_nosched);
            if (dump->sstatus[core][0].s_sstatus0.pend_desched_switch)
                printf(" pend_grp=%d",              dump->sstatus[core][0].s_sstatus0.pend_grp);
        }

        if (dump->sstatus[core][0].s_sstatus0.pend_new_work)
        {
            if (dump->sstatus[core][0].s_sstatus0.pend_new_work_wait)
                printf(" (Waiting for work)");
            else
                printf(" (Getting work)");
        }
        if (dump->sstatus[core][0].s_sstatus0.pend_null_rd)
            printf(" pend_null_rd=%d",          dump->sstatus[core][0].s_sstatus0.pend_null_rd);
        if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr)
        {
            printf(" pend_nosched_clr=%d",      dump->sstatus[core][0].s_sstatus0.pend_nosched_clr);
            printf(" pend_index=%d",            dump->sstatus[core][0].s_sstatus0.pend_index);
        }
        if (dump->sstatus[core][0].s_sstatus0.pend_switch ||
            (dump->sstatus[core][0].s_sstatus0.pend_desched &&
            dump->sstatus[core][0].s_sstatus0.pend_desched_switch))
        {
            printf(" pending tag=%s,0x%08x",
                   OCT_TAG_TYPE_STRING(dump->sstatus[core][0].s_sstatus0.pend_type),
                   dump->sstatus[core][0].s_sstatus0.pend_tag);
        }
        if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr)
            printf(" pend_wqp=0x%016llx\n",     CAST64(dump->sstatus[core][bit_wqp].s_sstatus1.pend_wqp));
        printf("\n");
    }

    /* Print out the state of the nosched list and the 16 deschedule lists. */
    __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED, dump, entry_list,
                            dump->sindexload[0][2].sindexload1.nosched_val,
                            dump->sindexload[0][2].sindexload1.nosched_one,
                            dump->sindexload[0][2].sindexload1.nosched_head,
                            dump->sindexload[0][2].sindexload1.nosched_tail);
    for (index=0; index<16; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list,
                                dump->sindexload[index][2].sindexload1.des_val,
                                dump->sindexload[index][2].sindexload1.des_one,
                                dump->sindexload[index][2].sindexload1.des_head,
                                dump->sindexload[index][2].sindexload1.des_tail);
    }

    /* Print out the state of the 8 internal input queues */
    for (index=0; index<8; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list,
                                dump->sindexload[index][0].sindexload0.loc_val,
                                dump->sindexload[index][0].sindexload0.loc_one,
                                dump->sindexload[index][0].sindexload0.loc_head,
                                dump->sindexload[index][0].sindexload0.loc_tail);
    }

    /* Print out the state of the 16 memory queues */
    for (index=0; index<8; index++)
    {
        const char *name;
        if (dump->sindexload[index][1].sindexload2.rmt_is_head)
            name = "Queue %da Memory (is head)";
        else
            name = "Queue %da Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index][1].sindexload2.rmt_val,
                                dump->sindexload[index][1].sindexload2.rmt_one,
                                dump->sindexload[index][1].sindexload2.rmt_head,
                                dump->sindexload[index][3].sindexload3.rmt_tail);
        if (dump->sindexload[index+8][1].sindexload2.rmt_is_head)
            name = "Queue %db Memory (is head)";
        else
            name = "Queue %db Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index+8][1].sindexload2.rmt_val,
                                dump->sindexload[index+8][1].sindexload2.rmt_one,
                                dump->sindexload[index+8][1].sindexload2.rmt_head,
                                dump->sindexload[index+8][3].sindexload3.rmt_tail);
    }

    /* Print out each of the internal POW entries. Each entry has a tag, group,
        wqe, and possibly a next pointer. The next pointer is only valid if this
        entry isn't make as a tail */
    for (index=0; index<num_pow_entries; index++)
    {
        printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index,
               __cvmx_pow_list_names[entry_list[index]],
               OCT_TAG_TYPE_STRING(dump->smemload[index][0].s_smemload0.tag_type),
               dump->smemload[index][0].s_smemload0.tag,
               dump->smemload[index][0].s_smemload0.grp,
               CAST64(dump->smemload[index][2].s_smemload1.wqp));
        if (dump->smemload[index][0].s_smemload0.tail)
            printf(" tail");
        else
            printf(" next=%d", dump->smemload[index][0].s_smemload0.next_index);
        if (entry_list[index] >= CVMX_POW_LIST_DESCHED)
        {
            printf(" nosched=%d", dump->smemload[index][1].s_smemload2.nosched);
            if (dump->smemload[index][1].s_smemload2.pend_switch)
            {
                printf(" pending tag=%s,0x%08x",
                       OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload2.pend_type),
                       dump->smemload[index][1].s_smemload2.pend_tag);
            }
        }
        printf("\n");
    }
}