コード例 #1
0
/**
 * Initialize the DMA engines for use
 *
 * @return Zero on success, negative on failure
 */
int cvmx_dma_engine_initialize(void)
{
    int engine;

    for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
    {
        cvmx_cmd_queue_result_t result;
        result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DMA(engine),
                                           0, CVMX_FPA_OUTPUT_BUFFER_POOL,
                                           CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
        if (result != CVMX_CMD_QUEUE_SUCCESS)
            return -1;
        if (octeon_has_feature(OCTEON_FEATURE_NPEI))
        {
            cvmx_npei_dmax_ibuff_saddr_t dmax_ibuff_saddr;
            dmax_ibuff_saddr.u64 = 0;
            dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
            cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), dmax_ibuff_saddr.u64);
        }
        else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
        {
            cvmx_dpi_dmax_ibuff_saddr_t dpi_dmax_ibuff_saddr;
            dpi_dmax_ibuff_saddr.u64 = 0;
            dpi_dmax_ibuff_saddr.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
            dpi_dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
            cvmx_write_csr(CVMX_DPI_DMAX_IBUFF_SADDR(engine), dpi_dmax_ibuff_saddr.u64);
        }
        else
        {
コード例 #2
0
ファイル: cvmx-hfa.c プロジェクト: 2asoft/freebsd
/**
 * Initialize the DFA block
 *
 * @return Zero on success, negative on failure
 */
int cvmx_hfa_initialize(void)
{
    cvmx_dfa_difctl_t control;
    cvmx_cmd_queue_result_t result;
    void *initial_base_address;
    int cmdsize;

    cmdsize = ((CVMX_FPA_DFA_POOL_SIZE - 8) / sizeof (cvmx_dfa_command_t)) *
        sizeof (cvmx_dfa_command_t);
    result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DFA, 0,
                                       CVMX_FPA_DFA_POOL, cmdsize + 8);
    if (result != CVMX_CMD_QUEUE_SUCCESS)
        return -1;

    control.u64 = 0;
    control.s.dwbcnt = CVMX_FPA_DFA_POOL_SIZE / 128;
    control.s.pool = CVMX_FPA_DFA_POOL;
    control.s.size = cmdsize / sizeof(cvmx_dfa_command_t);
    CVMX_SYNCWS;
    cvmx_write_csr(CVMX_DFA_DIFCTL, control.u64);
    initial_base_address = cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DFA);
    CVMX_SYNCWS;
    cvmx_write_csr(CVMX_DFA_DIFRDPTR, cvmx_ptr_to_phys(initial_base_address));
    cvmx_read_csr(CVMX_DFA_DIFRDPTR); /* Read to make sure setup is complete */
    return 0;
}
コード例 #3
0
/**
 * Initialize the DFA block
 *
 * @return Zero on success, negative on failure
 */
int cvmx_hfa_initialize(void)
{
	cvmx_dfa_difctl_t control;
	cvmx_cmd_queue_result_t result;
	void *initial_base_address;
	int cmdsize;
	int dfa_pool = (int)cvmx_fpa_get_dfa_pool();
	uint64_t dfa_pool_size = cvmx_fpa_get_dfa_pool_block_size();

#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
	cvmx_fpa_global_initialize();
	if(dfa_config.dfa_pool.buffer_count != 0)
		__cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_pool_size,
			dfa_config.dfa_pool.buffer_count, "Dfa Cmd Buffers");
#endif
	cmdsize = ((dfa_pool_size - 8) / sizeof(cvmx_dfa_command_t)) * sizeof(cvmx_dfa_command_t);
	result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DFA, 0, dfa_pool, cmdsize + 8);
	if (result != CVMX_CMD_QUEUE_SUCCESS)
		return -1;

	control.u64 = 0;
	control.cn31xx.dwbcnt = dfa_pool_size / 128;
	control.s.pool = dfa_pool;
	control.s.size = cmdsize / sizeof(cvmx_dfa_command_t);
	CVMX_SYNCWS;
	cvmx_write_csr(CVMX_DFA_DIFCTL, control.u64);
	initial_base_address = cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DFA);
	CVMX_SYNCWS;
	cvmx_write_csr(CVMX_DFA_DIFRDPTR, cvmx_ptr_to_phys(initial_base_address));
	cvmx_read_csr(CVMX_DFA_DIFRDPTR);	/* Read to make sure setup is complete */
	return 0;
}
コード例 #4
0
/**
 * Initialize the RAID block
 *
 * @param polynomial Coefficients for the RAID polynomial
 *
 * @return Zero on success, negative on failure
 */
int cvmx_raid_initialize(cvmx_rad_reg_polynomial_t polynomial)
{
    cvmx_cmd_queue_result_t result;
    cvmx_rad_reg_cmd_buf_t rad_reg_cmd_buf;
    int outputbuffer_pool = (int)cvmx_fpa_get_raid_pool();
    uint64_t outputbuffer_pool_size = cvmx_fpa_get_raid_pool_block_size();

    cvmx_write_csr(CVMX_RAD_REG_POLYNOMIAL, polynomial.u64);

    /*Initialize FPA pool for raid command queue buffers*/
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
    cvmx_fpa_global_initialize();
    if(raid_config.command_queue_pool.buffer_count != 0)
        __cvmx_helper_initialize_fpa_pool(outputbuffer_pool, outputbuffer_pool_size,
                                          raid_config.command_queue_pool.buffer_count, "Raid Cmd Bufs");
#endif
    result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_RAID, 0, outputbuffer_pool, outputbuffer_pool_size);
    if (result != CVMX_CMD_QUEUE_SUCCESS)
        return -1;

    rad_reg_cmd_buf.u64 = 0;
    rad_reg_cmd_buf.cn52xx.dwb = outputbuffer_pool_size / 128;
    rad_reg_cmd_buf.cn52xx.pool = outputbuffer_pool;
    rad_reg_cmd_buf.s.size = outputbuffer_pool_size / 8;
    rad_reg_cmd_buf.s.ptr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_RAID)) >> 7;
    cvmx_write_csr(CVMX_RAD_REG_CMD_BUF, rad_reg_cmd_buf.u64);
    return 0;
}
コード例 #5
0
/**
 * Initialize the ZIP QUEUE buffer
 *
 * @param queue : ZIP instruction queue
 * @param zcoremask : ZIP coremask to use for this queue
 *
 * @return Zero on success, negative on failure
 */
int cvmx_zip_queue_initialize(int queue, int zcoremask)
{
    cvmx_zip_quex_buf_t zip_que_buf;
    cvmx_cmd_queue_result_t result;
    cvmx_zip_quex_map_t que_map;
    cvmx_zip_que_ena_t que_ena;
    cvmx_zip_int_reg_t int_reg;

    /* Previous Octeon models has only one instruction queue, call 
       cvmx_zip_inititalize() to initialize the ZIP block */

    if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
        return -1;

    result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_ZIP_QUE(queue), 0,
                                       CVMX_FPA_OUTPUT_BUFFER_POOL,
                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
    if (result != CVMX_CMD_QUEUE_SUCCESS)
        return -1;

    /* 1. Program ZIP_QUE0/1_BUF to have the correct buffer pointer and
          size configured for each instruction queue */
    zip_que_buf.u64 = 0;
    zip_que_buf.s.dwb = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
    zip_que_buf.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
    zip_que_buf.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
    zip_que_buf.s.ptr =  cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_ZIP_QUE(queue)))>>7;
    cvmx_write_csr(CVMX_ZIP_QUEX_BUF(queue), zip_que_buf.u64);

    /* 2. Change the queue-to-ZIP core mapping by programming ZIP_QUE0/1_MAP. */ 
    que_map.u64 = cvmx_read_csr(CVMX_ZIP_QUEX_MAP(queue));
    que_map.s.zce = zcoremask;
    cvmx_write_csr(CVMX_ZIP_QUEX_MAP(queue), que_map.u64); 

    /* Enable the queue */
    que_ena.u64 = cvmx_read_csr(CVMX_ZIP_QUE_ENA);
    que_ena.s.ena |= (1<<queue);
    cvmx_write_csr(CVMX_ZIP_QUE_ENA, que_ena.u64);

    /* Use round robin to have equal priority for each instruction queue */ 
    cvmx_write_csr(CVMX_ZIP_QUE_PRI, 0x3);

    int_reg.u64 = cvmx_read_csr(CVMX_ZIP_INT_REG);
    if (queue)
        int_reg.s.doorbell1 = 1;
    else
        int_reg.s.doorbell0 = 1;

    cvmx_write_csr(CVMX_ZIP_INT_REG, int_reg.u64);
    /* Read back to make sure the setup is complete */
    cvmx_read_csr(CVMX_ZIP_QUEX_BUF(queue));
    return 0;
}
コード例 #6
0
/**
 * Initialize the ZIP block
 *
 * @return Zero on success, negative on failure
 */
int cvmx_zip_initialize(void)
{
    cvmx_zip_cmd_buf_t zip_cmd_buf;
    cvmx_cmd_queue_result_t result;
    result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_ZIP, 0,
                                       CVMX_FPA_OUTPUT_BUFFER_POOL,
                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
    if (result != CVMX_CMD_QUEUE_SUCCESS)
        return -1;

    zip_cmd_buf.u64 = 0;
    zip_cmd_buf.s.dwb = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
    zip_cmd_buf.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
    zip_cmd_buf.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
    zip_cmd_buf.s.ptr =  cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_ZIP))>>7;
    cvmx_write_csr(CVMX_ZIP_CMD_BUF, zip_cmd_buf.u64);
    cvmx_write_csr(CVMX_ZIP_ERROR, 1);
    cvmx_read_csr(CVMX_ZIP_CMD_BUF); /* Read to make sure setup is complete */
    return 0;
}
コード例 #7
0
ファイル: cvmx-pko.c プロジェクト: 3sOx/asuswrt-merlin
/**
 * Configure a output port and the associated queues for use.
 *
 * @port:       Port to configure.
 * @base_queue: First queue number to associate with this port.
 * @num_queues: Number of queues to associate with this port
 * @priority:   Array of priority levels for each queue. Values are
 *                   allowed to be 0-8. A value of 8 get 8 times the traffic
 *                   of a value of 1.  A value of 0 indicates that no rounds
 *                   will be participated in. These priorities can be changed
 *                   on the fly while the pko is enabled. A priority of 9
 *                   indicates that static priority should be used.  If static
 *                   priority is used all queues with static priority must be
 *                   contiguous starting at the base_queue, and lower numbered
 *                   queues have higher priority than higher numbered queues.
 *                   There must be num_queues elements in the array.
 */
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
				       uint64_t num_queues,
				       const uint64_t priority[])
{
	cvmx_pko_status_t result_code;
	uint64_t queue;
	union cvmx_pko_mem_queue_ptrs config;
	union cvmx_pko_reg_queue_ptrs1 config1;
	int static_priority_base = -1;
	int static_priority_end = -1;

	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
			     (unsigned long long)port);
		return CVMX_PKO_INVALID_PORT;
	}

	if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
		cvmx_dprintf
		    ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
		     (unsigned long long)(base_queue + num_queues));
		return CVMX_PKO_INVALID_QUEUE;
	}

	if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
		/*
		 * Validate the static queue priority setup and set
		 * static_priority_base and static_priority_end
		 * accordingly.
		 */
		for (queue = 0; queue < num_queues; queue++) {
			/* Find first queue of static priority */
			if (static_priority_base == -1
			    && priority[queue] ==
			    CVMX_PKO_QUEUE_STATIC_PRIORITY)
				static_priority_base = queue;
			/* Find last queue of static priority */
			if (static_priority_base != -1
			    && static_priority_end == -1
			    && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
			    && queue)
				static_priority_end = queue - 1;
			else if (static_priority_base != -1
				 && static_priority_end == -1
				 && queue == num_queues - 1)
				/* all queues are static priority */
				static_priority_end = queue;
			/*
			 * Check to make sure all static priority
			 * queues are contiguous.  Also catches some
			 * cases of static priorites not starting at
			 * queue 0.
			 */
			if (static_priority_end != -1
			    && (int)queue > static_priority_end
			    && priority[queue] ==
			    CVMX_PKO_QUEUE_STATIC_PRIORITY) {
				cvmx_dprintf("ERROR: cvmx_pko_config_port: "
					     "Static priority queues aren't "
					     "contiguous or don't start at "
					     "base queue. q: %d, eq: %d\n",
					(int)queue, static_priority_end);
				return CVMX_PKO_INVALID_PRIORITY;
			}
		}
		if (static_priority_base > 0) {
			cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
				     "priority queues don't start at base "
				     "queue. sq: %d\n",
				static_priority_base);
			return CVMX_PKO_INVALID_PRIORITY;
		}
	}
	/*
	 * At this point, static_priority_base and static_priority_end
	 * are either both -1, or are valid start/end queue
	 * numbers.
	 */

	result_code = CVMX_PKO_SUCCESS;

#ifdef PKO_DEBUG
	cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
#endif

	for (queue = 0; queue < num_queues; queue++) {
		uint64_t *buf_ptr = NULL;

		config1.u64 = 0;
		config1.s.idx3 = queue >> 3;
		config1.s.qid7 = (base_queue + queue) >> 7;

		config.u64 = 0;
		config.s.tail = queue == (num_queues - 1);
		config.s.index = queue;
		config.s.port = port;
		config.s.queue = base_queue + queue;

		if (!cvmx_octeon_is_pass1()) {
			config.s.static_p = static_priority_base >= 0;
			config.s.static_q = (int)queue <= static_priority_end;
			config.s.s_tail = (int)queue == static_priority_end;
		}
		/*
		 * Convert the priority into an enable bit field. Try
		 * to space the bits out evenly so the packet don't
		 * get grouped up
		 */
		switch ((int)priority[queue]) {
		case 0:
			config.s.qos_mask = 0x00;
			break;
		case 1:
			config.s.qos_mask = 0x01;
			break;
		case 2:
			config.s.qos_mask = 0x11;
			break;
		case 3:
			config.s.qos_mask = 0x49;
			break;
		case 4:
			config.s.qos_mask = 0x55;
			break;
		case 5:
			config.s.qos_mask = 0x57;
			break;
		case 6:
			config.s.qos_mask = 0x77;
			break;
		case 7:
			config.s.qos_mask = 0x7f;
			break;
		case 8:
			config.s.qos_mask = 0xff;
			break;
		case CVMX_PKO_QUEUE_STATIC_PRIORITY:
			/* Pass 1 will fall through to the error case */
			if (!cvmx_octeon_is_pass1()) {
				config.s.qos_mask = 0xff;
				break;
			}
		default:
			cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
				     "priority %llu\n",
				(unsigned long long)priority[queue]);
			config.s.qos_mask = 0xff;
			result_code = CVMX_PKO_INVALID_PRIORITY;
			break;
		}

		if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
			cvmx_cmd_queue_result_t cmd_res =
			    cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
						      (base_queue + queue),
						      CVMX_PKO_MAX_QUEUE_DEPTH,
						      CVMX_FPA_OUTPUT_BUFFER_POOL,
						      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
						      -
						      CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
						      * 8);
			if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
				switch (cmd_res) {
				case CVMX_CMD_QUEUE_NO_MEMORY:
					cvmx_dprintf("ERROR: "
						     "cvmx_pko_config_port: "
						     "Unable to allocate "
						     "output buffer.\n");
					return CVMX_PKO_NO_MEMORY;
				case CVMX_CMD_QUEUE_ALREADY_SETUP:
					cvmx_dprintf
					    ("ERROR: cvmx_pko_config_port: Port already setup.\n");
					return CVMX_PKO_PORT_ALREADY_SETUP;
				case CVMX_CMD_QUEUE_INVALID_PARAM:
				default:
					cvmx_dprintf
					    ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
					return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
				}
			}

			buf_ptr =
			    (uint64_t *)
			    cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
						  (base_queue + queue));
			config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
		} else
			config.s.buf_ptr = 0;

		CVMX_SYNCWS;

		if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
			cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
	}

	return result_code;
}
コード例 #8
0
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
				       uint64_t num_queues,
				       const uint64_t priority[])
{
	cvmx_pko_status_t result_code;
	uint64_t queue;
	union cvmx_pko_mem_queue_ptrs config;
	union cvmx_pko_reg_queue_ptrs1 config1;
	int static_priority_base = -1;
	int static_priority_end = -1;

	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
			     (unsigned long long)port);
		return CVMX_PKO_INVALID_PORT;
	}

	if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
		cvmx_dprintf
		    ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
		     (unsigned long long)(base_queue + num_queues));
		return CVMX_PKO_INVALID_QUEUE;
	}

	if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
		
		for (queue = 0; queue < num_queues; queue++) {
			
			if (static_priority_base == -1
			    && priority[queue] ==
			    CVMX_PKO_QUEUE_STATIC_PRIORITY)
				static_priority_base = queue;
			
			if (static_priority_base != -1
			    && static_priority_end == -1
			    && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
			    && queue)
				static_priority_end = queue - 1;
			else if (static_priority_base != -1
				 && static_priority_end == -1
				 && queue == num_queues - 1)
				
				static_priority_end = queue;
			
			if (static_priority_end != -1
			    && (int)queue > static_priority_end
			    && priority[queue] ==
			    CVMX_PKO_QUEUE_STATIC_PRIORITY) {
				cvmx_dprintf("ERROR: cvmx_pko_config_port: "
					     "Static priority queues aren't "
					     "contiguous or don't start at "
					     "base queue. q: %d, eq: %d\n",
					(int)queue, static_priority_end);
				return CVMX_PKO_INVALID_PRIORITY;
			}
		}
		if (static_priority_base > 0) {
			cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
				     "priority queues don't start at base "
				     "queue. sq: %d\n",
				static_priority_base);
			return CVMX_PKO_INVALID_PRIORITY;
		}
#if 0
		cvmx_dprintf("Port %d: Static priority queue base: %d, "
			     "end: %d\n", port,
			static_priority_base, static_priority_end);
#endif
	}
	

	result_code = CVMX_PKO_SUCCESS;

#ifdef PKO_DEBUG
	cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
#endif

	for (queue = 0; queue < num_queues; queue++) {
		uint64_t *buf_ptr = NULL;

		config1.u64 = 0;
		config1.s.idx3 = queue >> 3;
		config1.s.qid7 = (base_queue + queue) >> 7;

		config.u64 = 0;
		config.s.tail = queue == (num_queues - 1);
		config.s.index = queue;
		config.s.port = port;
		config.s.queue = base_queue + queue;

		if (!cvmx_octeon_is_pass1()) {
			config.s.static_p = static_priority_base >= 0;
			config.s.static_q = (int)queue <= static_priority_end;
			config.s.s_tail = (int)queue == static_priority_end;
		}
		
		switch ((int)priority[queue]) {
		case 0:
			config.s.qos_mask = 0x00;
			break;
		case 1:
			config.s.qos_mask = 0x01;
			break;
		case 2:
			config.s.qos_mask = 0x11;
			break;
		case 3:
			config.s.qos_mask = 0x49;
			break;
		case 4:
			config.s.qos_mask = 0x55;
			break;
		case 5:
			config.s.qos_mask = 0x57;
			break;
		case 6:
			config.s.qos_mask = 0x77;
			break;
		case 7:
			config.s.qos_mask = 0x7f;
			break;
		case 8:
			config.s.qos_mask = 0xff;
			break;
		case CVMX_PKO_QUEUE_STATIC_PRIORITY:
			
			if (!cvmx_octeon_is_pass1()) {
				config.s.qos_mask = 0xff;
				break;
			}
		default:
			cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
				     "priority %llu\n",
				(unsigned long long)priority[queue]);
			config.s.qos_mask = 0xff;
			result_code = CVMX_PKO_INVALID_PRIORITY;
			break;
		}

		if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
			cvmx_cmd_queue_result_t cmd_res =
			    cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
						      (base_queue + queue),
						      CVMX_PKO_MAX_QUEUE_DEPTH,
						      CVMX_FPA_OUTPUT_BUFFER_POOL,
						      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
						      -
						      CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
						      * 8);
			if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
				switch (cmd_res) {
				case CVMX_CMD_QUEUE_NO_MEMORY:
					cvmx_dprintf("ERROR: "
						     "cvmx_pko_config_port: "
						     "Unable to allocate "
						     "output buffer.\n");
					return CVMX_PKO_NO_MEMORY;
				case CVMX_CMD_QUEUE_ALREADY_SETUP:
					cvmx_dprintf
					    ("ERROR: cvmx_pko_config_port: Port already setup.\n");
					return CVMX_PKO_PORT_ALREADY_SETUP;
				case CVMX_CMD_QUEUE_INVALID_PARAM:
				default:
					cvmx_dprintf
					    ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
					return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
				}
			}

			buf_ptr =
			    (uint64_t *)
			    cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
						  (base_queue + queue));
			config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
		} else
			config.s.buf_ptr = 0;

		CVMX_SYNCWS;

		if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
			cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
	}

	return result_code;
}