コード例 #1
0
static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
{
	char *memory;
	char *fpa;
	int freed = elements;

	while (freed) {
		/*
		 * FPA memory must be 128 byte aligned.  Since we are
		 * aligning we need to save the original pointer so we
		 * can feed it to kfree when the memory is returned to
		 * the kernel.
		 *
		 * We allocate an extra 256 bytes to allow for
		 * alignment and space for the original pointer saved
		 * just before the block.
		 */
		memory = kmalloc(size + 256, GFP_ATOMIC);
		if (unlikely(memory == NULL)) {
			pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
				   elements * size, pool);
			break;
		}
		fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
		*((char **)fpa - 1) = memory;
		cvmx_fpa_free(fpa, pool, 0);
		freed--;
	}
	return elements - freed;
}
コード例 #2
0
void oct_tx_process_hw_work(cvmx_wqe_t *work, uint32_t outport)
{
	uint64_t queue = cvmx_pko_get_base_queue(outport);

	cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    /* Build a PKO pointer to this packet */
    cvmx_pko_command_word0_t pko_command;
    pko_command.u64 = 0;
    pko_command.s.segs = work->word2.s.bufs;
    pko_command.s.total_bytes = cvmx_wqe_get_len(work);

    /* Send the packet */
    cvmx_pko_return_value_t send_status = cvmx_pko_send_packet_finish(outport, queue, pko_command, work->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
    if (send_status != CVMX_PKO_SUCCESS)
    {
        printf("Failed to send packet using cvmx_pko_send_packet2\n");
        cvmx_helper_free_packet_data(work);
		STAT_TX_HW_SEND_ERR;
    }
	else
	{
		STAT_TX_SEND_OVER;
	}

    cvmx_fpa_free(work, wqe_pool, 0);
}
コード例 #3
0
ファイル: ethernet.c プロジェクト: hmatyschok/MeshBSD
/**
 * Free a work queue entry received in a intercept callback.
 *
 * @param work_queue_entry
 *               Work queue entry to free
 * @return Zero on success, Negative on failure.
 */
int cvm_oct_free_work(void *work_queue_entry)
{
	cvmx_wqe_t *work = work_queue_entry;

	int segments = work->word2.s.bufs;
	cvmx_buf_ptr_t segment_ptr = work->packet_ptr;

	while (segments--) {
		cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
		if (__predict_false(!segment_ptr.s.i))
			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), segment_ptr.s.pool, DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE/128));
		segment_ptr = next_ptr;
	}
	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));

	return 0;
}
コード例 #4
0
ファイル: ethernet-mem.c プロジェクト: 325116067/semc-qsd8x50
/**
 * This function fills a hardware pool with memory. Depending
 * on the config defines, this memory might come from the
 * kernel or global 32bit memory allocated with
 * cvmx_bootmem_alloc.
 *
 * @pool:     Pool to populate
 * @size:     Size of each buffer in the pool
 * @elements: Number of buffers to allocate
 */
static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
{
	char *memory;
	int freed = elements;

	if (USE_32BIT_SHARED) {
		extern uint64_t octeon_reserve32_memory;

		memory =
		    cvmx_bootmem_alloc_range(elements * size, 128,
					     octeon_reserve32_memory,
					     octeon_reserve32_memory +
					     (CONFIG_CAVIUM_RESERVE32 << 20) -
					     1);
		if (memory == NULL)
			panic("Unable to allocate %u bytes for FPA pool %d\n",
			      elements * size, pool);

		pr_notice("Memory range %p - %p reserved for "
			  "hardware\n", memory,
			  memory + elements * size - 1);

		while (freed) {
			cvmx_fpa_free(memory, pool, 0);
			memory += size;
			freed--;
		}
	} else {
		while (freed) {
			/* We need to force alignment to 128 bytes here */
			memory = kmalloc(size + 127, GFP_ATOMIC);
			if (unlikely(memory == NULL)) {
				pr_warning("Unable to allocate %u bytes for "
					   "FPA pool %d\n",
				     elements * size, pool);
				break;
			}
			memory = (char *)(((unsigned long)memory + 127) & -128);
			cvmx_fpa_free(memory, pool, 0);
			freed--;
		}
	}
	return elements - freed;
}
コード例 #5
0
void octeon_se_fastpath_fragc_uninit(SeFastpathCoreContext core,
                                     SeFastpath fastpath,
                             	     SeFastpathFragmentContext fragc)
{
  if (cvmx_likely(fragc->pc))
    {
      cvmx_helper_free_packet_data(fragc->pc->wqe);
      cvmx_fpa_free(fragc->pc->wqe, CVMX_FPA_WQE_POOL, 0);
      fragc->pc->wqe = NULL;
    } 
}
コード例 #6
0
ファイル: ethernet.c プロジェクト: 020gzh/linux
/**
 * cvm_oct_free_work- Free a work queue entry
 *
 * @work_queue_entry: Work queue entry to free
 *
 * Returns Zero on success, Negative on failure.
 */
int cvm_oct_free_work(void *work_queue_entry)
{
	cvmx_wqe_t *work = work_queue_entry;

	int segments = work->word2.s.bufs;
	union cvmx_buf_ptr segment_ptr = work->packet_ptr;

	while (segments--) {
		union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
			cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
		if (unlikely(!segment_ptr.s.i))
			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
				      segment_ptr.s.pool,
				      CVMX_FPA_PACKET_POOL_SIZE / 128);
		segment_ptr = next_ptr;
	}
	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);

	return 0;
}
コード例 #7
0
ファイル: oct-rxtx.c プロジェクト: lanxbrad/sec-fw
/*
 *  alloc a mbuf which can be used to describe the packet
 *  if work is error , return NULL
 *  then free wqe, reurn mbuf
 */
void *
oct_rx_process_work(cvmx_wqe_t *wq)
{
	void *pkt_virt;
	mbuf_t *m;

	if (wq->word2.s.rcv_error || cvmx_wqe_get_bufs(wq) > 1){
		/* 
		  *  Work has error, so drop
		  *  and now do not support jumbo packet
		  */
		printf("recv error\n");
		oct_packet_free(wq, wqe_pool);
		STAT_RECV_ERR;
		return NULL;
	}

	pkt_virt = (void *) cvmx_phys_to_ptr(wq->packet_ptr.s.addr);
	if(NULL == pkt_virt)
	{
		STAT_RECV_ADDR_ERR;
		return NULL;
	}
	
#ifdef SEC_RXTX_DEBUG
    printf("Received %u byte packet.\n", cvmx_wqe_get_len(wq));
    printf("Processing packet\n");
    cvmx_helper_dump_packet(wq);
#endif

	m = (mbuf_t *)MBUF_ALLOC();

	memset((void *)m, 0, sizeof(mbuf_t));

	m->magic_flag = MBUF_MAGIC_NUM;
	PKTBUF_SET_HW(m);
	
	m->packet_ptr.u64 = wq->packet_ptr.u64;

	m->input_port = cvmx_wqe_get_port(wq);
	
	m->pkt_totallen = cvmx_wqe_get_len(wq);
	m->pkt_ptr = pkt_virt;

	cvmx_fpa_free(wq, wqe_pool, 0);
	
	STAT_RECV_PC_ADD;
	STAT_RECV_PB_ADD(m->pkt_totallen);

	STAT_RECV_OK;
	return (void *)m;

}
コード例 #8
0
/* IN PROGRESS */
void send_packet()
{
  uint8_t *buf, *pbuf; 
  uint64_t queue, length, buf_phys_addr;
  cvmx_pko_command_word0_t pko_command;
  cvmx_pko_return_value_t status;
  cvmx_buf_ptr_t hw_buffer;

  buf = (uint8_t *) cvmx_fpa_alloc(packet_pool);

  if (buf == NULL) {
    printf("ERROR: allocation from pool %" PRIu64 " failed!\n", packet_pool);
    return;
  } else {
    printf("Packet allocation successful!\n");
  }

  pbuf = build_packet(buf, PAYLOAD_SIZE);
  length = (uint64_t) (pbuf - buf);

  printf("buf : %p\n", buf);
  printf("pbuf: %p\n", pbuf);
  printf("diff: %" PRIu64 "\n", length);

  pko_command.u64 = 0;
  pko_command.s.segs = 1;
  pko_command.s.total_bytes = length;
  pko_command.s.dontfree = 1;

  buf_phys_addr = cvmx_ptr_to_phys(buf); 
  printf("buf_phys_addr: %" PRIu64 "\n", buf_phys_addr);

  hw_buffer.s.i = 0;
  hw_buffer.s.back = 0;
  hw_buffer.s.pool = packet_pool; // the pool that the buffer came from
  hw_buffer.s.size = length; // the size of the segment pointed to by addr (in bytes)
  hw_buffer.s.addr = cvmx_ptr_to_phys(buf); // pointer to the first byte of the data

  queue = cvmx_pko_get_base_queue(xaui_ipd_port);
  printf("queue: %" PRIu64 "\n", queue);

  cvmx_pko_send_packet_prepare(xaui_ipd_port, queue, CVMX_PKO_LOCK_NONE);

  // THROWS EXCEPTION HERE
  status = cvmx_pko_send_packet_finish(xaui_ipd_port, queue, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE);

  if (status == CVMX_PKO_SUCCESS) {
    printf("Succesfully sent packet!\n");
    cvmx_fpa_free(buf, packet_pool, 0);
  }
}
コード例 #9
0
ファイル: ethernet-mem.c プロジェクト: JabirTech/Source
/**
 * Fill the supplied hardware pool with mbufs
 *
 * @param pool     Pool to allocate an mbuf for
 * @param size     Size of the buffer needed for the pool
 * @param elements Number of buffers to allocate
 */
int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
{
	int freed = elements;
	while (freed) {
		KASSERT(size <= MCLBYTES - 128, ("mbuf clusters are too small"));

		struct mbuf *m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
		if (__predict_false(m == NULL)) {
			printf("Failed to allocate mbuf for hardware pool %d\n", pool);
			break;
		}

		m->m_data += 128 - (((uintptr_t)m->m_data) & 0x7f);
		*(struct mbuf **)(m->m_data - sizeof(void *)) = m;
		cvmx_fpa_free(m->m_data, pool, DONT_WRITEBACK(size/128));
		freed--;
	}
	return (elements - freed);
}
コード例 #10
0
ファイル: ethernet-mem.c プロジェクト: 325116067/semc-qsd8x50
/**
 * Fill the supplied hardware pool with skbuffs
 *
 * @pool:     Pool to allocate an skbuff for
 * @size:     Size of the buffer needed for the pool
 * @elements: Number of buffers to allocate
 */
static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
{
	int freed = elements;
	while (freed) {

		struct sk_buff *skb = dev_alloc_skb(size + 128);
		if (unlikely(skb == NULL)) {
			pr_warning
			    ("Failed to allocate skb for hardware pool %d\n",
			     pool);
			break;
		}

		skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f));
		*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
		cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
		freed--;
	}
	return elements - freed;
}
コード例 #11
0
static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
{
	char *memory;
	char *fpa;
	int freed = elements;

	while (freed) {
		memory = kmalloc(size + 256, GFP_ATOMIC);
		if (unlikely(memory == NULL)) {
			pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
				   elements * size, pool);
			break;
		}
		fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
		*((char **)fpa - 1) = memory;
		cvmx_fpa_free(fpa, pool, 0);
		freed--;
	}
	return elements - freed;
}
コード例 #12
0
/**
 * Setup a FPA pool to control a new block of memory. The
 * buffer pointer must be a physical address.
 *
 * @pool:       Pool to initialize
 *                   0 <= pool < 8
 * @name:       Constant character string to name this pool.
 *                   String is not copied.
 * @buffer:     Pointer to the block of memory to use. This must be
<<<<<<< HEAD
 *                   accessible by all processors and external hardware.
=======
 *                   accessable by all processors and external hardware.
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
 * @block_size: Size for each block controlled by the FPA
 * @num_blocks: Number of blocks
 *
 * Returns 0 on Success,
 *         -1 on failure
 */
int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
			uint64_t block_size, uint64_t num_blocks)
{
	char *ptr;
	if (!buffer) {
		cvmx_dprintf
		    ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
		return -1;
	}
	if (pool >= CVMX_FPA_NUM_POOLS) {
		cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
		return -1;
	}

	if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) {
		cvmx_dprintf
		    ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
		return -1;
	}

	if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) {
		cvmx_dprintf
		    ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
		return -1;
	}

	cvmx_fpa_pool_info[pool].name = name;
	cvmx_fpa_pool_info[pool].size = block_size;
	cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
	cvmx_fpa_pool_info[pool].base = buffer;

	ptr = (char *)buffer;
	while (num_blocks--) {
		cvmx_fpa_free(ptr, pool, 0);
		ptr += block_size;
	}
	return 0;
}
コード例 #13
0
ファイル: cvmx-ipd.c プロジェクト: 2asoft/freebsd
static void __cvmx_ipd_free_ptr_v1(void)
{
    /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
    if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
	int no_wptr = 0;
	cvmx_ipd_ptr_count_t ipd_ptr_count;
	ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

	/* Handle Work Queue Entry in cn56xx and cn52xx */
	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
	    cvmx_ipd_ctl_status_t ipd_ctl_status;
	    ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
	    if (ipd_ctl_status.s.no_wptr)
		no_wptr = 1;
	}

	/* Free the prefetched WQE */
	if (ipd_ptr_count.s.wqev_cnt) {
	    cvmx_ipd_wqe_ptr_valid_t ipd_wqe_ptr_valid;
	    ipd_wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
	    if (no_wptr)
	        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    else
	        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
	}

	/* Free all WQE in the fifo */
	if (ipd_ptr_count.s.wqe_pcnt) {
	    int i;
	    cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
	    ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
	    for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
		ipd_pwp_ptr_fifo_ctl.s.cena = 0;
		ipd_pwp_ptr_fifo_ctl.s.raddr = ipd_pwp_ptr_fifo_ctl.s.max_cnts + (ipd_pwp_ptr_fifo_ctl.s.wraddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
		ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
		if (no_wptr)
		    cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
		else
		    cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
	    }
	    ipd_pwp_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
	}

	/* Free the prefetched packet */
	if (ipd_ptr_count.s.pktv_cnt) {
	    cvmx_ipd_pkt_ptr_valid_t ipd_pkt_ptr_valid;
	    ipd_pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
	    cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pkt_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	}

	/* Free the per port prefetched packets */
	if (1) {
	    int i;
	    cvmx_ipd_prc_port_ptr_fifo_ctl_t ipd_prc_port_ptr_fifo_ctl;
	    ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);

	    for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt; i++) {
		ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
		ipd_prc_port_ptr_fifo_ctl.s.raddr = i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
		ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    }
	    ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
	}

	/* Free all packets in the holding fifo */
	if (ipd_ptr_count.s.pfif_cnt) {
	    int i;
	    cvmx_ipd_prc_hold_ptr_fifo_ctl_t ipd_prc_hold_ptr_fifo_ctl;

	    ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);

	    for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
		ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
		ipd_prc_hold_ptr_fifo_ctl.s.raddr = (ipd_prc_hold_ptr_fifo_ctl.s.praddr + i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
		ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    }
	    ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
	}

	/* Free all packets in the fifo */
	if (ipd_ptr_count.s.pkt_pcnt) {
	    int i;
	    cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
	    ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);

	    for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
		ipd_pwp_ptr_fifo_ctl.s.cena = 0;
		ipd_pwp_ptr_fifo_ctl.s.raddr = (ipd_pwp_ptr_fifo_ctl.s.praddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
		ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    }
	    ipd_pwp_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
	}
    }
}
コード例 #14
0
static void __cvmx_ipd_free_ptr_v2(void)
{
	int no_wptr = 0;
	int i;
	union cvmx_ipd_port_ptr_fifo_ctl port_ptr_fifo;
	union cvmx_ipd_ptr_count ptr_count;
	int packet_pool = (int)cvmx_fpa_get_packet_pool();
	int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
	ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

	/* Handle Work Queue Entry in cn68xx */
	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
		union cvmx_ipd_ctl_status ctl_status;
		ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
		if (ctl_status.s.no_wptr)
			no_wptr = 1;
	}

	/* Free the prefetched WQE */
	if (ptr_count.s.wqev_cnt) {
		union cvmx_ipd_next_wqe_ptr next_wqe_ptr;
		next_wqe_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_WQE_PTR);
		if (no_wptr)
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)next_wqe_ptr.s.ptr << 7),
				      packet_pool, 0);
		else
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)next_wqe_ptr.s.ptr << 7),
				      wqe_pool, 0);
	}

	/* Free all WQE in the fifo */
	if (ptr_count.s.wqe_pcnt) {
		union cvmx_ipd_free_ptr_fifo_ctl free_fifo;
		union cvmx_ipd_free_ptr_value free_ptr_value;
		free_fifo.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
		for (i = 0; i < ptr_count.s.wqe_pcnt; i++) {
			free_fifo.s.cena = 0;
			free_fifo.s.raddr = free_fifo.s.max_cnts + (free_fifo.s.wraddr + i) % free_fifo.s.max_cnts;
			cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL,
				       free_fifo.u64);
			free_fifo.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
			free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
			if (no_wptr)
				cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)free_ptr_value.s.ptr << 7),
					      packet_pool, 0);
			else
				cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)free_ptr_value.s.ptr << 7),
					      wqe_pool, 0);
		}
		free_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, free_fifo.u64);
	}

	/* Free the prefetched packet */
	if (ptr_count.s.pktv_cnt) {
		union cvmx_ipd_next_pkt_ptr next_pkt_ptr;
		next_pkt_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_PKT_PTR);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)next_pkt_ptr.s.ptr << 7),
			      packet_pool, 0);
	}

	/* Free the per port prefetched packets */
	port_ptr_fifo.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);

	for (i = 0; i < port_ptr_fifo.s.max_pkt; i++) {
		port_ptr_fifo.s.cena = 0;
		port_ptr_fifo.s.raddr = i % port_ptr_fifo.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, port_ptr_fifo.u64);
		port_ptr_fifo.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)port_ptr_fifo.s.ptr << 7),
			      packet_pool, 0);
	}
	port_ptr_fifo.s.cena = 1;
	cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, port_ptr_fifo.u64);

	/* Free all packets in the holding fifo */
	if (ptr_count.s.pfif_cnt) {
		union cvmx_ipd_hold_ptr_fifo_ctl hold_ptr_fifo;

		hold_ptr_fifo.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);

		for (i = 0; i < ptr_count.s.pfif_cnt; i++) {
			hold_ptr_fifo.s.cena = 0;
			hold_ptr_fifo.s.raddr = (hold_ptr_fifo.s.praddr + i) % hold_ptr_fifo.s.max_pkt;
			cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL,
				       hold_ptr_fifo.u64);
			hold_ptr_fifo.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)hold_ptr_fifo.s.ptr << 7),
				      packet_pool, 0);
		}
		hold_ptr_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, hold_ptr_fifo.u64);
	}

	/* Free all packets in the fifo */
	if (ptr_count.s.pkt_pcnt) {
		union cvmx_ipd_free_ptr_fifo_ctl free_fifo;
		union cvmx_ipd_free_ptr_value free_ptr_value;
		free_fifo.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);

		for (i = 0; i < ptr_count.s.pkt_pcnt; i++) {
			free_fifo.s.cena = 0;
			free_fifo.s.raddr = (free_fifo.s.praddr + i) % free_fifo.s.max_cnts;
			cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL,
				       free_fifo.u64);
			free_fifo.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
			free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)free_ptr_value.s.ptr << 7),
				      packet_pool, 0);
		}
		free_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, free_fifo.u64);
	}
}
コード例 #15
0
static uint32_t 
octeon_se_fastpath_fragc_helper_alloc(SeFastpathFragmentContext fragc,
				      SeFastpathPacketContext orig_pc,
				      SeFastpathPacketContext frag_pc,
				      uint16_t data_len)
{
  cvmx_wqe_t *wqe;
  cvmx_buf_ptr_t fragment;
  uint64_t num_segments = 0;
  uint32_t len;
  size_t alignment;

  wqe = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
  if (cvmx_unlikely(wqe == NULL))
    {
      OCTEON_SE_DEBUG(3, "Out of memory while allocating wqe for fragment.\n");
      return 1;
    }
  
  len = data_len + fragc->frag_hlen;
  if (cvmx_unlikely(orig_pc->s->ip_version_6))
    alignment = (OCTEON_SE_ALIGN_64(orig_pc->s->ip_offset
				    + OCTEON_SE_FASTPATH_IP6_HDRLEN
				    + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN)
		 - (orig_pc->s->ip_offset
		    + OCTEON_SE_FASTPATH_IP6_HDRLEN
		    + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN));
  else
    alignment = (OCTEON_SE_ALIGN_64(orig_pc->s->ip_offset
				    + OCTEON_SE_FASTPATH_IP4_HDRLEN)
		 - (orig_pc->s->ip_offset
		    + OCTEON_SE_FASTPATH_IP4_HDRLEN));

  fragment.u64 = 
    octeon_se_fastpath_alloc_packet_chain(len + orig_pc->s->ip_offset,
					  alignment, &num_segments);
  if (cvmx_unlikely(fragment.u64 == 0))
    {
      OCTEON_SE_DEBUG(3, "Out of memory while allocating fragments.\n");
      cvmx_fpa_free(wqe, CVMX_FPA_WQE_POOL, 0);
      return 1;
    }
  wqe->packet_ptr.u64 = fragment.u64;
  wqe->len = len + orig_pc->s->ip_offset;
  wqe->word2.s.bufs = num_segments;




  frag_pc->wqe = wqe;
  frag_pc->s->ip_offset = orig_pc->s->ip_offset;
  frag_pc->s->ip_len = len;
  frag_pc->s->ip_version_6 = orig_pc->s->ip_version_6;

  frag_pc->mtu = orig_pc->mtu;
  frag_pc->oport = orig_pc->oport;
  frag_pc->nh_index = orig_pc->nh_index;
  frag_pc->media_hdrlen = orig_pc->media_hdrlen;
  memcpy(frag_pc->media_hdr.data, 
	 orig_pc->media_hdr.data, frag_pc->media_hdrlen);

  return 0;
}
コード例 #16
0
static void __cvmx_ipd_free_ptr_v1(void)
{
	unsigned wqe_pool = cvmx_fpa_get_wqe_pool();
	int i;
	union cvmx_ipd_ptr_count ptr_count;
	union cvmx_ipd_prc_port_ptr_fifo_ctl prc_port_fifo;
        int packet_pool = (int)cvmx_fpa_get_packet_pool();

	/* Only CN38XXp{1,2} cannot read pointer out of the IPD */
	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
		return;

	ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

	/* Handle Work Queue Entry in cn56xx and cn52xx */
	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
		union cvmx_ipd_ctl_status ctl_status;
		ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
		if (ctl_status.s.no_wptr)
			wqe_pool = packet_pool;
	}

	/* Free the prefetched WQE */
	if (ptr_count.s.wqev_cnt) {
		union cvmx_ipd_wqe_ptr_valid wqe_ptr_valid;
		wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)wqe_ptr_valid.s.ptr << 7),
			      wqe_pool, 0);
	}

	/* Free all WQE in the fifo */
	if (ptr_count.s.wqe_pcnt) {
		int i;
		union cvmx_ipd_pwp_ptr_fifo_ctl pwp_fifo;
		pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
		for (i = 0; i < ptr_count.s.wqe_pcnt; i++) {
			pwp_fifo.s.cena = 0;
			pwp_fifo.s.raddr = pwp_fifo.s.max_cnts + (pwp_fifo.s.wraddr + i) % pwp_fifo.s.max_cnts;
			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
			pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)pwp_fifo.s.ptr << 7),
				      wqe_pool, 0);
		}
		pwp_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
	}

	/* Free the prefetched packet */
	if (ptr_count.s.pktv_cnt) {
		union cvmx_ipd_pkt_ptr_valid pkt_ptr_valid;
		pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)pkt_ptr_valid.s.ptr << 7),
			      packet_pool, 0);
	}

	/* Free the per port prefetched packets */
	prc_port_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);

	for (i = 0; i < prc_port_fifo.s.max_pkt; i++) {
		prc_port_fifo.s.cena = 0;
		prc_port_fifo.s.raddr = i % prc_port_fifo.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
			       prc_port_fifo.u64);
		prc_port_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)prc_port_fifo.s.ptr << 7),
			      packet_pool, 0);
	}
	prc_port_fifo.s.cena = 1;
	cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, prc_port_fifo.u64);

	/* Free all packets in the holding fifo */
	if (ptr_count.s.pfif_cnt) {
		int i;
		union cvmx_ipd_prc_hold_ptr_fifo_ctl prc_hold_fifo;

		prc_hold_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);

		for (i = 0; i < ptr_count.s.pfif_cnt; i++) {
			prc_hold_fifo.s.cena = 0;
			prc_hold_fifo.s.raddr = (prc_hold_fifo.s.praddr + i) % prc_hold_fifo.s.max_pkt;
			cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
				       prc_hold_fifo.u64);
			prc_hold_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)prc_hold_fifo.s.ptr << 7),
				      packet_pool, 0);
		}
		prc_hold_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
			       prc_hold_fifo.u64);
	}

	/* Free all packets in the fifo */
	if (ptr_count.s.pkt_pcnt) {
		int i;
		union cvmx_ipd_pwp_ptr_fifo_ctl pwp_fifo;
		pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);

		for (i = 0; i < ptr_count.s.pkt_pcnt; i++) {
			pwp_fifo.s.cena = 0;
			pwp_fifo.s.raddr = (pwp_fifo.s.praddr + i) % pwp_fifo.s.max_cnts;
			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
			pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)pwp_fifo.s.ptr << 7),
				      packet_pool, 0);
		}
		pwp_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
	}
}
コード例 #17
0
ファイル: cvmx-ipd.c プロジェクト: 2asoft/freebsd
static void __cvmx_ipd_free_ptr_v2(void)
{
    int no_wptr = 0;
    int i;
    cvmx_ipd_port_ptr_fifo_ctl_t ipd_port_ptr_fifo_ctl;
    cvmx_ipd_ptr_count_t ipd_ptr_count;
    ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

    /* Handle Work Queue Entry in cn68xx */
    if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
        cvmx_ipd_ctl_status_t ipd_ctl_status;
        ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
        if (ipd_ctl_status.s.no_wptr)
            no_wptr = 1;
    }

    /* Free the prefetched WQE */
    if (ipd_ptr_count.s.wqev_cnt) {
        cvmx_ipd_next_wqe_ptr_t ipd_next_wqe_ptr;
        ipd_next_wqe_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_WQE_PTR);
        if (no_wptr)
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_wqe_ptr.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
        else
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_wqe_ptr.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
    }


    /* Free all WQE in the fifo */
    if (ipd_ptr_count.s.wqe_pcnt) {
        cvmx_ipd_free_ptr_fifo_ctl_t ipd_free_ptr_fifo_ctl;
        cvmx_ipd_free_ptr_value_t ipd_free_ptr_value;
        ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
        for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
            ipd_free_ptr_fifo_ctl.s.cena = 0;
            ipd_free_ptr_fifo_ctl.s.raddr = ipd_free_ptr_fifo_ctl.s.max_cnts + (ipd_free_ptr_fifo_ctl.s.wraddr+i) % ipd_free_ptr_fifo_ctl.s.max_cnts;
            cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
            ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
            ipd_free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
            if (no_wptr)
                cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
            else
                cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
        }
        ipd_free_ptr_fifo_ctl.s.cena = 1;
        cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
    }

    /* Free the prefetched packet */
    if (ipd_ptr_count.s.pktv_cnt) {
        cvmx_ipd_next_pkt_ptr_t ipd_next_pkt_ptr;
        ipd_next_pkt_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_PKT_PTR);
        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_pkt_ptr.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
    }

    /* Free the per port prefetched packets */
    ipd_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);

    for (i = 0; i < ipd_port_ptr_fifo_ctl.s.max_pkt; i++) {
        ipd_port_ptr_fifo_ctl.s.cena = 0;
        ipd_port_ptr_fifo_ctl.s.raddr = i % ipd_port_ptr_fifo_ctl.s.max_pkt;
        cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, ipd_port_ptr_fifo_ctl.u64);
        ipd_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);
        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
    }
    ipd_port_ptr_fifo_ctl.s.cena = 1;
    cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, ipd_port_ptr_fifo_ctl.u64);

    /* Free all packets in the holding fifo */
    if (ipd_ptr_count.s.pfif_cnt) {
        cvmx_ipd_hold_ptr_fifo_ctl_t ipd_hold_ptr_fifo_ctl;

        ipd_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);

        for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
            ipd_hold_ptr_fifo_ctl.s.cena = 0;
            ipd_hold_ptr_fifo_ctl.s.raddr = (ipd_hold_ptr_fifo_ctl.s.praddr + i) % ipd_hold_ptr_fifo_ctl.s.max_pkt;
            cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, ipd_hold_ptr_fifo_ctl.u64);
            ipd_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
        }
        ipd_hold_ptr_fifo_ctl.s.cena = 1;
        cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, ipd_hold_ptr_fifo_ctl.u64);
    }

    /* Free all packets in the fifo */
    if (ipd_ptr_count.s.pkt_pcnt) {
        cvmx_ipd_free_ptr_fifo_ctl_t ipd_free_ptr_fifo_ctl;
        cvmx_ipd_free_ptr_value_t ipd_free_ptr_value;
        ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);

        for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
            ipd_free_ptr_fifo_ctl.s.cena = 0;
            ipd_free_ptr_fifo_ctl.s.raddr = (ipd_free_ptr_fifo_ctl.s.praddr+i) % ipd_free_ptr_fifo_ctl.s.max_cnts;
            cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
            ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
            ipd_free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
        }
        ipd_free_ptr_fifo_ctl.s.cena = 1;
        cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
    }
}