static inline SeFastpathRet
octeon_se_fastpath_transform_check_pmtu(SeFastpathPacketContext pc,
			                size_t packet_out_len)
{
  if (cvmx_unlikely(packet_out_len > 65535))
    {
      OCTEON_SE_DEBUG(3, "Dropping packet because of overflow\n");
      return OCTEON_SE_FASTPATH_RET_DROP;
    }




  if (cvmx_unlikely((packet_out_len > pc->mtu) 
#ifdef OCTEON_SE_FASTPATH_FRAGMENTATION
		    && (pc->s->ipv4_df || pc->s->ip_version_6)
#endif /* OCTEON_SE_FASTPATH_FRAGMENTATION */
		    ))
    {
      OCTEON_SE_DEBUG(5, "Need to send ICMP error message, "
		      "passing to slowpath\n");
      return OCTEON_SE_FASTPATH_RET_SLOWPATH;
    }
  
  return OCTEON_SE_FASTPATH_RET_OK;
}
Beispiel #2
0
/**
 * Get clock rate based on the clock type.
 *
 * @param node  - CPU node number
 * @param clock - Enumeration of the clock type.
 * @return      - return the clock rate.
 */
uint64_t cvmx_clock_get_rate_node(int node, cvmx_clock_t clock)
{
	const uint64_t REF_CLOCK = 50000000;

#ifdef CVMX_BUILD_FOR_UBOOT
	uint64_t rate_eclk = 0;
	uint64_t rate_sclk = 0;
	uint64_t rate_dclk = 0;
#endif

	if (cvmx_unlikely(!rate_eclk)) {
		/* Note: The order of these checks is important.
		 ** octeon_has_feature(OCTEON_FEATURE_PCIE) is true for both 6XXX
		 ** and 52XX/56XX, so OCTEON_FEATURE_NPEI _must_ be checked first */
		if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
			cvmx_npei_dbg_data_t npei_dbg_data;
			npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
			rate_eclk = REF_CLOCK * npei_dbg_data.s.c_mul;
			rate_sclk = rate_eclk;
		} else if (OCTEON_IS_OCTEON3()) {
			cvmx_rst_boot_t rst_boot;
			rst_boot.u64 = cvmx_read_csr_node(node, CVMX_RST_BOOT);
			rate_eclk = REF_CLOCK * rst_boot.s.c_mul;
			rate_sclk = REF_CLOCK * rst_boot.s.pnr_mul;
		} else if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
			cvmx_mio_rst_boot_t mio_rst_boot;
			mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
			rate_eclk = REF_CLOCK * mio_rst_boot.s.c_mul;
			rate_sclk = REF_CLOCK * mio_rst_boot.s.pnr_mul;
		} else {
			cvmx_dbg_data_t dbg_data;
			dbg_data.u64 = cvmx_read_csr(CVMX_DBG_DATA);
			rate_eclk = REF_CLOCK * dbg_data.s.c_mul;
			rate_sclk = rate_eclk;
		}
	}

	switch (clock) {
	case CVMX_CLOCK_SCLK:
	case CVMX_CLOCK_TIM:
	case CVMX_CLOCK_IPD:
		return rate_sclk;

	case CVMX_CLOCK_RCLK:
	case CVMX_CLOCK_CORE:
		return rate_eclk;

	case CVMX_CLOCK_DDR:
#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_TOOLCHAIN)
		if (cvmx_unlikely(!rate_dclk))
			rate_dclk = cvmx_sysinfo_get()->dram_data_rate_hz;
#endif
		return rate_dclk;
	}

	cvmx_dprintf("cvmx_clock_get_rate: Unknown clock type\n");
	return 0;
}
Beispiel #3
0
/**
 * @INTERNAL
 * Calls the user supplied callback when an event happens.
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param reason Reason for the callback
 * @param endpoint_num
 *               Endpoint number
 * @param bytes_transferred
 *               Bytes transferred
 */
static void __cvmx_usbd_callback(cvmx_usbd_state_t *usb, cvmx_usbd_callback_t reason, int endpoint_num, int bytes_transferred)
{
    if (usb->callback[reason])
    {
        if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
            cvmx_dprintf("%s: Calling callback reason=%d endpoint=%d bytes=%d func=%p data=%p\n",
                __FUNCTION__, reason, endpoint_num, bytes_transferred, usb->callback[reason], usb->callback_data[reason]);
        usb->callback[reason](reason, endpoint_num, bytes_transferred, usb->callback_data[reason]);
    }
    else
    {
        if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
            cvmx_dprintf("%s: No callback for reason=%d endpoint=%d bytes=%d\n",
                __FUNCTION__, reason, endpoint_num, bytes_transferred);
    }
}
Beispiel #4
0
/**
 * @INTERNAL
 * Poll a device mode endpoint for status
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param endpoint_num
 *               Endpoint to poll
 *
 * @return Zero on success
 */
static int __cvmx_usbd_poll_in_endpoint(cvmx_usbd_state_t *usb, int endpoint_num)
{
    cvmx_usbcx_diepintx_t usbc_diepint;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);

    usbc_diepint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index));
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index), usbc_diepint.u32);

    if (usbc_diepint.s.epdisbld)
    {
        /* Endpoint Disabled Interrupt (EPDisbld)
            This bit indicates that the endpoint is disabled per the
            application's request. */
        /* Nothing to do */
    }
    if (usbc_diepint.s.xfercompl)
    {
        cvmx_usbcx_dieptsizx_t usbc_dieptsiz;
        int bytes_transferred;
        /* Transfer Completed Interrupt (XferCompl)
            Indicates that the programmed transfer is complete on the AHB
            as well as on the USB, for this endpoint. */
        usbc_dieptsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPTSIZX(endpoint_num, usb->index));
        bytes_transferred = usb->endpoint[endpoint_num].buffer_length - usbc_dieptsiz.s.xfersize;
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_IN_COMPLETE, endpoint_num, bytes_transferred);
    }
    return 0;
}
Beispiel #5
0
/**
 * Register a callback function to process USB events
 *
 * @param usb       USB device state populated by
 *                  cvmx_usbd_initialize().
 * @param reason    The reason this callback should be called
 * @param func      Function to call
 * @param user_data User supplied data for the callback
 *
 * @return Zero on succes, negative on failure
 */
int cvmx_usbd_register(cvmx_usbd_state_t *usb, cvmx_usbd_callback_t reason, cvmx_usbd_callback_func_t func, void *user_data)
{
    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Register reason=%d func=%p data=%p\n",
            __FUNCTION__, reason, func, user_data);
    usb->callback[reason] = func;
    usb->callback_data[reason] = user_data;
    return 0;
}
Beispiel #6
0
/**
 * Disable an OUT endpoint
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param endpoint_num
 *               Endpoint number to disable
 *
 * @return Zero on success, negative on failure
 */
int cvmx_usbd_out_endpoint_disable(cvmx_usbd_state_t *usb, int endpoint_num)
{
    cvmx_usbcx_doepctlx_t usbc_doepctl;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);

    usbc_doepctl.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPCTLX(endpoint_num, usb->index));
    if (usbc_doepctl.s.epena && !usbc_doepctl.s.epdis)
    {
        usbc_doepctl.s.epdis = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPCTLX(endpoint_num, usb->index), usbc_doepctl.u32);
    }
    return 0;
}
Beispiel #7
0
/**
 * Shutdown a USB port after a call to cvmx_usbd_initialize().
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 *
 * @return Zero or a negative on error.
 */
int cvmx_usbd_shutdown(cvmx_usbd_state_t *usb)
{
    cvmx_usbnx_clk_ctl_t usbn_clk_ctl;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    /* Disable the clocks and put them in power on reset */
    usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
    usbn_clk_ctl.s.enable = 1;
    usbn_clk_ctl.s.por = 1;
    usbn_clk_ctl.s.hclk_rst = 1;
    usbn_clk_ctl.s.prst = 0;
    usbn_clk_ctl.s.hrst = 0;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    return 0;
}
Beispiel #8
0
void S3_send_packet(cvmx_wqe_t * work)
{
		uint64_t        port;
		cvmx_buf_ptr_t  packet_ptr;
		cvmx_pko_command_word0_t pko_command;
		/* Build a PKO pointer to this packet */
		pko_command.u64 = 0;


		/* Errata PKI-100 fix. We need to fix chain pointers on segmneted
		   packets. Although the size is also wrong on a single buffer packet,
		   PKO doesn't care so we ignore it */
		if (cvmx_unlikely(work->word2.s.bufs > 1))
				cvmx_helper_fix_ipd_packet_chain(work);

		port = work->ipprt;
		if( port >= portbase + portnum)
				port = work->ipprt - portnum;
		else
				port = work->ipprt + portnum;

		int queue = cvmx_pko_get_base_queue(port);
		cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_ATOMIC_TAG);

		pko_command.s.total_bytes = work->len;
		pko_command.s.segs = work->word2.s.bufs;
		pko_command.s.ipoffp1 = 14 + 1;
		packet_ptr = work->packet_ptr;
		//cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 0);
		cvm_common_free_fpa_buffer(work, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE);
		work = NULL;

		/*
		 * Send the packet and wait for the tag switch to complete before
		 * accessing the output queue. This ensures the locking required
		 * for the queue.
		 *
		 */
		if (cvmx_pko_send_packet_finish(port, queue, pko_command, packet_ptr, CVMX_PKO_LOCK_ATOMIC_TAG))
		{
				printf("Failed to send packet using cvmx_pko_send_packet_finish\
								n");
		}
}
Beispiel #9
0
/**
 * @INTERNAL
 * Poll a device mode endpoint for status
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param endpoint_num
 *               Endpoint to poll
 *
 * @return Zero on success
 */
static int __cvmx_usbd_poll_out_endpoint(cvmx_usbd_state_t *usb, int endpoint_num)
{
    cvmx_usbcx_doepintx_t usbc_doepint;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: endpoint=%d\n", __FUNCTION__, endpoint_num);

    usbc_doepint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPINTX(endpoint_num, usb->index));
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPINTX(endpoint_num, usb->index), usbc_doepint.u32);

    if (usbc_doepint.s.setup)
    {
        /* SETUP Phase Done (SetUp)
            Applies to control OUT endpoints only.
            Indicates that the SETUP phase for the control endpoint is
            complete and no more back-to-back SETUP packets were
            received for the current control transfer. On this interrupt, the
            application can decode the received SETUP data packet. */
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_DEVICE_SETUP, endpoint_num, 0);
    }
    if (usbc_doepint.s.epdisbld)
    {
        /* Endpoint Disabled Interrupt (EPDisbld)
            This bit indicates that the endpoint is disabled per the
            application's request. */
        /* Nothing to do */
    }
    if (usbc_doepint.s.xfercompl)
    {
        cvmx_usbcx_doeptsizx_t usbc_doeptsiz;
        int bytes_transferred;
        /* Transfer Completed Interrupt (XferCompl)
            Indicates that the programmed transfer is complete on the AHB
            as well as on the USB, for this endpoint. */
        usbc_doeptsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DOEPTSIZX(endpoint_num, usb->index));
        bytes_transferred = usb->endpoint[endpoint_num].buffer_length - usbc_doeptsiz.s.xfersize;
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_OUT_COMPLETE, endpoint_num, bytes_transferred);
    }

    return 0;
}
uint32_t 
octeon_se_fastpath_fragc_init(SeFastpathCoreContext core,
			      SeFastpath fastpath,
			      SeFastpathFragmentContext fragc,
			      SeFastpathPacketContext pc,
			      size_t mtu,
			      uint8_t df_on_first_fragment)
{
  uint8_t * header;
  size_t packet_len = pc->s->ip_len;

  /* Initialize common fields in the fragment context. */
  fragc->pc = pc;
  fragc->mtu = mtu;
  fragc->offset = 0;

  /* Get a pointer to the packet to be fragmented. */
  header = 
    (uint8_t *)cvmx_phys_to_ptr(pc->wqe->packet_ptr.s.addr) + pc->s->ip_offset;

  if (pc->s->ip_version_6)
    {
      uint16_t frag_hlen;
      uint16_t frag_data_len;

      fragc->total_len = packet_len - OCTEON_SE_FASTPATH_IP6_HDRLEN;

      /* Compute fragments' header and data lengths. */
      frag_hlen = 
	OCTEON_SE_FASTPATH_IP6_HDRLEN + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN;
      frag_data_len = ((size_t) (mtu - frag_hlen)) & (size_t) ~7;
      OCTEON_SE_ASSERT((frag_data_len > 0) && 
		       (frag_data_len <= (65535 - frag_hlen)));

      /* Store that information into the fragmentation context. */
      fragc->frag_hlen = frag_hlen;
      fragc->frag_data_len = frag_data_len;

      OCTEON_SE_FASTPATH_GET_NEXT_IPV6_FRAG_ID(core, fragc->u.ipv6.id);
      memcpy(fragc->u.ipv6.frag_hdr, header, OCTEON_SE_FASTPATH_IP6_HDRLEN);
      octeon_se_fastpath_packet_buffer_create(fragc->original_pkt,
                                              pc->wqe->packet_ptr,
                                              pc->s->ip_offset + 
					      OCTEON_SE_FASTPATH_IP6_HDRLEN,
                                              fragc->total_len,
                                              pc->wqe->word2.s.bufs);
    }
  else
    {
      /* Check if the packet has DF bit set. */
      if (cvmx_unlikely(pc->s->ipv4_df))
        {
	  OCTEON_SE_DEBUG(7, "Cannot fragment packet. DF bit is set\n");
	  return 1;
        }

      fragc->total_len = packet_len - OCTEON_SE_FASTPATH_IP4_HDRLEN;
      fragc->frag_hlen = OCTEON_SE_FASTPATH_IP4_HDRLEN;

      /* Compute amount of data to go in fragments. */
      fragc->frag_data_len = ((size_t)(mtu - OCTEON_SE_FASTPATH_IP4_HDRLEN)) &
                                      (size_t) ~7;
      
      OCTEON_SE_ASSERT(fragc->frag_data_len > 0 && 
		       fragc->frag_data_len < 65535);

      fragc->u.ipv4.df_on_first_fragment = df_on_first_fragment;
      
      /* Store computed values into the fragmentation context. */
      memcpy(fragc->u.ipv4.frag_hdr, header, OCTEON_SE_FASTPATH_IP4_HDRLEN);
      octeon_se_fastpath_packet_buffer_create(fragc->original_pkt,
                                              pc->wqe->packet_ptr,
                                              pc->s->ip_offset + 
					      OCTEON_SE_FASTPATH_IP4_HDRLEN,
                                              fragc->total_len,
                                              pc->wqe->word2.s.bufs);
    }
  
  return 0;
}
static uint32_t 
octeon_se_fastpath_fragc_helper_alloc(SeFastpathFragmentContext fragc,
				      SeFastpathPacketContext orig_pc,
				      SeFastpathPacketContext frag_pc,
				      uint16_t data_len)
{
  cvmx_wqe_t *wqe;
  cvmx_buf_ptr_t fragment;
  uint64_t num_segments = 0;
  uint32_t len;
  size_t alignment;

  wqe = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
  if (cvmx_unlikely(wqe == NULL))
    {
      OCTEON_SE_DEBUG(3, "Out of memory while allocating wqe for fragment.\n");
      return 1;
    }
  
  len = data_len + fragc->frag_hlen;
  if (cvmx_unlikely(orig_pc->s->ip_version_6))
    alignment = (OCTEON_SE_ALIGN_64(orig_pc->s->ip_offset
				    + OCTEON_SE_FASTPATH_IP6_HDRLEN
				    + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN)
		 - (orig_pc->s->ip_offset
		    + OCTEON_SE_FASTPATH_IP6_HDRLEN
		    + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN));
  else
    alignment = (OCTEON_SE_ALIGN_64(orig_pc->s->ip_offset
				    + OCTEON_SE_FASTPATH_IP4_HDRLEN)
		 - (orig_pc->s->ip_offset
		    + OCTEON_SE_FASTPATH_IP4_HDRLEN));

  fragment.u64 = 
    octeon_se_fastpath_alloc_packet_chain(len + orig_pc->s->ip_offset,
					  alignment, &num_segments);
  if (cvmx_unlikely(fragment.u64 == 0))
    {
      OCTEON_SE_DEBUG(3, "Out of memory while allocating fragments.\n");
      cvmx_fpa_free(wqe, CVMX_FPA_WQE_POOL, 0);
      return 1;
    }
  wqe->packet_ptr.u64 = fragment.u64;
  wqe->len = len + orig_pc->s->ip_offset;
  wqe->word2.s.bufs = num_segments;




  frag_pc->wqe = wqe;
  frag_pc->s->ip_offset = orig_pc->s->ip_offset;
  frag_pc->s->ip_len = len;
  frag_pc->s->ip_version_6 = orig_pc->s->ip_version_6;

  frag_pc->mtu = orig_pc->mtu;
  frag_pc->oport = orig_pc->oport;
  frag_pc->nh_index = orig_pc->nh_index;
  frag_pc->media_hdrlen = orig_pc->media_hdrlen;
  memcpy(frag_pc->media_hdr.data, 
	 orig_pc->media_hdr.data, frag_pc->media_hdrlen);

  return 0;
}
SeFastpathPacketContext 
octeon_se_fastpath_fragc_next(SeFastpathCoreContext core,
                              SeFastpath fastpath,
                              SeFastpathFragmentContext fragc)
{
  SeFastpathPacketContext frag;
  SeFastpathPacketBufferStruct fragment_buffer[1];
  uint8_t * header;
  cvmx_buf_ptr_t packet_out;
  uint16_t hlen, data_len, len, offset_orig;
  uint16_t fragoff_orig, fragoff, checksum;
  uint8_t is_last_frag;

  /* If an error caused pc to be freed, return NULL to indicate we are done. */
  if (fragc->pc == NULL || fragc->offset >= fragc->total_len)
    return NULL;
  
  hlen = fragc->frag_hlen;
  data_len = fragc->frag_data_len;

  /* Determine the length of the data section of the fragment. */
  if (fragc->offset + data_len < fragc->total_len)
    len = data_len;
  else
    len = fragc->total_len - fragc->offset;
  
  if (fragc->offset + len == fragc->total_len)
    is_last_frag = TRUE;
  else
    is_last_frag = FALSE;

  OCTEON_SE_DEBUG(7, "Sending fragment offset=%d, len=%d\n",
		  fragc->offset, len);

  /* Allocate packet context and state for the fragment. */
  frag = &core->fragment.s;
  memset(frag, 0, sizeof(SeFastpathPacketContextStruct));
  frag->s = &core->fragment_state.s;
  memset(frag->s, 0, sizeof(SeFastpathPacketStateStruct));

  /* Create a new Work Queue entry and then copy extra things in pc. */
  if (cvmx_unlikely(octeon_se_fastpath_fragc_helper_alloc(fragc,
                                                          fragc->pc,
					                  frag,
                                                          len)))
    {
      OCTEON_SE_DEBUG(3, "Unable to create fragment\n");
      return NULL;
    }

  /* For local reference. */ 
  packet_out.u64 = frag->wqe->packet_ptr.u64;

  header = 
    ((uint8_t *)cvmx_phys_to_ptr(packet_out.s.addr)) + frag->s->ip_offset;

  if (frag->s->ip_version_6)
    {
      uint8_t nh;
      OCTEON_SE_DEBUG(9, "Building IPv6 fragment\n");

      /* Assert that headers fit into the first segment. */
      OCTEON_SE_ASSERT(packet_out.s.size > 
		       (frag->s->ip_offset +
			OCTEON_SE_FASTPATH_IP6_HDRLEN + 
			OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN));

      memcpy(header, fragc->u.ipv6.frag_hdr, OCTEON_SE_FASTPATH_IP6_HDRLEN);
      
      OCTEON_SE_FASTPATH_IPH6_SET_LEN(header, len + 
				      OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN);
      OCTEON_SE_FASTPATH_IPH6_NH(header, nh);
      OCTEON_SE_FASTPATH_IPH6_SET_NH(header, 
                                     OCTEON_SE_FASTPATH_IPPROTO_IPV6FRAG);

      /* Create the fragment header and copy it to its place. */
      header += OCTEON_SE_FASTPATH_IP6_HDRLEN;

      header[0] = nh;

      header[SSH_IP6_EXT_FRAGMENT_OFS_RESERVED1] = 0;
      OCTEON_SE_PUT_16BIT(header + 
			  OCTEON_SE_FASTPATH_IP6_EXT_FRAGMENT_OFS_OFFSET,
			  (fragc->offset | (is_last_frag ? 0 : 1)));
      OCTEON_SE_PUT_32BIT(header + 
			  OCTEON_SE_FASTPATH_IP6_EXT_FRAGMENT_OFS_ID,
			  fragc->u.ipv6.id);

      /* Finally, copy the payload. */
      octeon_se_fastpath_packet_buffer_create(fragment_buffer,
                                              packet_out,
                                              frag->s->ip_offset +
					      OCTEON_SE_FASTPATH_IP6_HDRLEN + 
				       OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN,
                                              len,
                                              frag->wqe->word2.s.bufs);
      octeon_se_fastpath_buffer_copy(fragment_buffer,
                                     fragc->original_pkt,
				     len);
    }
  else
    {
      /* Copy packet header to the fragment buffer. */
      OCTEON_SE_DEBUG(9, "Build IPv4 fragment\n");

      /* Asseet that header fits into the first segment. */
      OCTEON_SE_ASSERT(packet_out.s.size > (frag->s->ip_offset +
					    OCTEON_SE_FASTPATH_IP4_HDRLEN));

      /* Copy in the IPv4 header first */
      memcpy(header, fragc->u.ipv4.frag_hdr,OCTEON_SE_FASTPATH_IP4_HDRLEN);

      /* Copy data from the original packet to the fragment data part. */
      octeon_se_fastpath_packet_buffer_create(fragment_buffer,
                                              packet_out,
					      frag->s->ip_offset +
					      OCTEON_SE_FASTPATH_IP4_HDRLEN, 
					      len, 
					      frag->wqe->word2.s.bufs); 
      octeon_se_fastpath_buffer_copy(fragment_buffer,
                                     fragc->original_pkt,
				     len);

      /* Compute new values for fragment offset and flag bits. */
      OCTEON_SE_FASTPATH_IPH4_FRAG(header, fragoff_orig);

      offset_orig = (fragoff_orig & OCTEON_SE_FASTPATH_IP4_FRAG_MASK) << 3;
      fragoff = fragoff_orig & OCTEON_SE_FASTPATH_IPH4_FRAGOFF_RF;
      if (fragc->offset + data_len < fragc->total_len ||
          (fragoff_orig & OCTEON_SE_FASTPATH_IPH4_FRAGOFF_MF))
        fragoff |= OCTEON_SE_FASTPATH_IPH4_FRAGOFF_MF;

      /* If df_on_first_fragment is set and this is the first fragment,
         set DF bit */
      if (fragc->offset == 0 && fragc->u.ipv4.df_on_first_fragment)
        fragoff |= OCTEON_SE_FASTPATH_IPH4_FRAGOFF_DF;

      OCTEON_SE_ASSERT((fragc->offset & 7) == 0); 
      OCTEON_SE_FASTPATH_IPH4_SET_FRAG(header,
                           (fragoff | ((fragc->offset + offset_orig) >> 3)));
      OCTEON_SE_FASTPATH_IPH4_SET_LEN(header, hlen + len);
      OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, 0);
      
      checksum = octeon_se_fastpath_ip_cksum(header, hlen);
      OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, checksum);
    }

  /* Update next fragment offset. */
  fragc->offset += len;

  /* Return the fragment. */
  return frag;
}
Beispiel #13
0
/**
 * Enable an endpoint to respond to an IN transaction
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param endpoint_num
 *               Endpoint number to enable
 * @param transfer_type
 *               Transfer type for the endpoint
 * @param max_packet_size
 *               Maximum packet size for the endpoint
 * @param buffer Buffer to send
 * @param buffer_length
 *               Length of the buffer in bytes
 *
 * @return Zero on success, negative on failure
 */
int cvmx_usbd_in_endpoint_enable(cvmx_usbd_state_t *usb,
    int endpoint_num, cvmx_usbd_transfer_t transfer_type,
    int max_packet_size, uint64_t buffer, int buffer_length)
{
    cvmx_usbcx_diepctlx_t usbc_diepctl;
    cvmx_usbcx_dieptsizx_t usbc_dieptsiz;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: endpoint=%d buffer=0x%llx length=%d\n",
            __FUNCTION__, endpoint_num, (ULL)buffer, buffer_length);

    usb->endpoint[endpoint_num].buffer_length = buffer_length;

    CVMX_SYNCW; /* Flush out pending writes before enable */

    /* Clear any pending interrupts */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index),
        __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index)));

    usbc_dieptsiz.u32 = 0;
    usbc_dieptsiz.s.mc = 1;
    if (buffer)
    {
        cvmx_write_csr(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + endpoint_num*8, buffer);
        usbc_dieptsiz.s.pktcnt = (buffer_length + max_packet_size - 1) / max_packet_size;
        if (usbc_dieptsiz.s.pktcnt == 0)
            usbc_dieptsiz.s.pktcnt = 1;
        usbc_dieptsiz.s.xfersize = buffer_length;
    }
    else
    {
        usbc_dieptsiz.s.pktcnt = 0;
        usbc_dieptsiz.s.xfersize = 0;
    }
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPTSIZX(endpoint_num, usb->index), usbc_dieptsiz.u32);

    usbc_diepctl.u32 = 0;
    usbc_diepctl.s.epena = (buffer != 0);
    usbc_diepctl.s.setd1pid = 0;
    usbc_diepctl.s.setd0pid = (buffer == 0);
    usbc_diepctl.s.cnak = 1;
    usbc_diepctl.s.txfnum = endpoint_num;
    usbc_diepctl.s.eptype = transfer_type;
    usbc_diepctl.s.usbactep = 1;
    usbc_diepctl.s.nextep = endpoint_num;
    if (endpoint_num == 0)
    {
        switch (max_packet_size)
        {
            case 8:
                usbc_diepctl.s.mps = 3;
                break;
            case 16:
                usbc_diepctl.s.mps = 2;
                break;
            case 32:
                usbc_diepctl.s.mps = 1;
                break;
            default:
                usbc_diepctl.s.mps = 0;
                break;
        }
    }
    else
        usbc_diepctl.s.mps = max_packet_size;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPCTLX(endpoint_num, usb->index), usbc_diepctl.u32);

    return 0;
}
Beispiel #14
0
/**
 * Poll the USB block for status and call all needed callback
 * handlers. This function is meant to be called in the interrupt
 * handler for the USB controller. It can also be called
 * periodically in a loop for non-interrupt based operation.
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 *
 * @return Zero or negative on error.
 */
int cvmx_usbd_poll(cvmx_usbd_state_t *usb)
{
    cvmx_usbcx_gintsts_t usbc_gintsts;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    /* Read the pending interrupts */
    usbc_gintsts.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
    usbc_gintsts.u32 &= __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));

    /* Clear the interrupts now that we know about them */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);

    if (usbc_gintsts.s.usbsusp)
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_SUSPEND, 0, 0);

    if (usbc_gintsts.s.enumdone)
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_ENUM_COMPLETE, 0, 0);

    if (usbc_gintsts.s.usbrst)
    {
        /* USB Reset (USBRst)
            The core sets this bit to indicate that a reset is
            detected on the USB. */
        __cvmx_usbd_device_reset_complete(usb);
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_RESET, 0, 0);
    }

    if (usbc_gintsts.s.oepint || usbc_gintsts.s.iepint)
    {
        cvmx_usbcx_daint_t usbc_daint;
        usbc_daint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DAINT(usb->index));
        if (usbc_daint.s.inepint)
        {
            int active_endpoints = usbc_daint.s.inepint;

            while (active_endpoints)
            {
                int endpoint;
                CVMX_CLZ(endpoint, active_endpoints);
                endpoint = 31 - endpoint;
                __cvmx_usbd_poll_in_endpoint(usb, endpoint);
                active_endpoints ^= 1<<endpoint;
            }
        }
        if (usbc_daint.s.outepint)
        {
            int active_endpoints = usbc_daint.s.outepint;

            while (active_endpoints)
            {
                int endpoint;
                CVMX_CLZ(endpoint, active_endpoints);
                endpoint = 31 - endpoint;
                __cvmx_usbd_poll_out_endpoint(usb, endpoint);
                active_endpoints ^= 1<<endpoint;
            }
        }
    }

    return 0;
}
/** Execute outbound transforms */
SeFastpathRet
octeon_se_fastpath_transform_out(SeFastpathCoreContext core,
				 SeFastpath fastpath,
				 SeFastpathPacketContext pc)
{
  cvmx_buf_ptr_t packet_out;
  uint64_t packet_out_num_segs;
  size_t packet_out_len;
  SeFastpathTransformData se_trd;
  SeFastpathCombinedTransform combined;
  SeFastpathPacketBufferStruct src, dst;
  SeFastpathEspExtraInfoStruct extra_info[1];
  SeFastpathMacExtraInfoStruct mac_info[1];
  SeFastpathRet ret;
  uint8_t *header;
  uint32_t trd_i, tos, flow_label;
  uint64_t ipsec_seq;
  uint16_t csum, prefix_ofs;
  uint16_t esp_ah_ofs, prefix_len = 0, trailer_len = 0, pad_len = 0;
  uint8_t esp_ah_nh;
  uint64_t icv[OCTEON_SE_FASTPATH_MAX_HASH_WORDS] = { 0 };
  size_t i;
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AH
  size_t icv_pad_len = 0;
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AH */
  uint32_t run_time;
  size_t alignment = 0;
#ifdef OCTEON_SE_FASTPATH_STATISTICS
  size_t out_octets;
#endif /* OCTEON_SE_FASTPATH_STATISTICS */
  
  OCTEON_SE_DEBUG(9, "Execute transform out\n");

  packet_out.u64 = 0;

  OCTEON_SE_ASSERT(pc->transform_index != OCTEON_SE_FASTPATH_INVALID_INDEX);
  trd_i = pc->transform_index & 0x00ffffff;
  OCTEON_SE_ASSERT(trd_i < OCTEON_SE_FASTPATH_TRD_TABLE_SIZE);

  se_trd = OCTEON_SE_FASTPATH_TRD(fastpath, trd_i);
  OCTEON_SE_FASTPATH_TRD_READ_LOCK(fastpath, trd_i, se_trd);

  OCTEON_SE_FASTPATH_PREFETCH_TRD(se_trd);
  
  /* If transform is complex, pass packet to slowpath. */
  if (cvmx_unlikely(se_trd->is_special))
    {
      OCTEON_SE_DEBUG(9, "Special transform %08x, passing to slowpath\n",
		      se_trd->transform);
      goto slowpath;
    }

  combined = octeon_se_fastpath_get_combined_transform(se_trd->transform,
                                                   se_trd->mac_key_size);
  if (cvmx_unlikely(combined == NULL))
    {
      OCTEON_SE_DEBUG(9, "Unsupported transform %08x, passing to slowpath\n",
		      se_trd->transform);
      goto slowpath;
    }
  
  /* Update trd output timestamp. */
  run_time = cvmx_fau_fetch_and_add32(OCTEON_SE_FASTPATH_FAU_RUNTIME, 0);
  cvmx_atomic_set32((int32_t *) &se_trd->last_out_packet_time,
		    (int32_t) run_time);

  (*combined->init)(core->transform_context,
                    se_trd->keymat + OCTEON_MAX_KEYMAT_LEN /2,
		    se_trd->cipher_key_size,
		    se_trd->keymat + OCTEON_MAX_KEYMAT_LEN /2 
		    + OCTEON_MAX_ESP_KEY_BITS /8,
		    se_trd->mac_key_size);
  
  prefix_ofs = pc->s->ip_offset;

  /* Check ttl. */
  if (cvmx_unlikely(pc->s->ttl == 0))
    {
      OCTEON_SE_DEBUG(3, "Zero TTL, dropping\n");
      goto corrupt;
    }

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  if (cvmx_unlikely(!se_trd->tunnel_mode))
    {
      /* In transport mode insert the ESP/AH header between IP 
	 and transport headers. */
      prefix_ofs += pc->s->tr_offset;
      esp_ah_nh = pc->s->ipproto;
      prefix_len = 0;
    }
  else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */
    {      
      /* In tunnel mode insert IP and ESP/AH headers before IP header. */
      if (se_trd->ip_version_6)
	prefix_len = OCTEON_SE_FASTPATH_IP6_HDRLEN;
      else
	prefix_len = OCTEON_SE_FASTPATH_IP4_HDRLEN;
      
      if (pc->s->ip_version_6)
	esp_ah_nh = OCTEON_SE_FASTPATH_IPPROTO_IPV6;
      else
	esp_ah_nh = OCTEON_SE_FASTPATH_IPPROTO_IPIP;
    }
  
  /* Calculate IPsec overhead. */
  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_NATT
  /* Reserve space for UDP NAT-T. */
  if (se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_NATT)
    prefix_len += OCTEON_SE_FASTPATH_UDP_HDRLEN;
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_NATT */

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AH
  if (cvmx_unlikely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_AH))
    {
      prefix_len += OCTEON_SE_FASTPATH_AH_HDRLEN + combined->icv_len;

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_SHA2




      if (cvmx_unlikely((se_trd->ip_version_6 == 1) && 
			(se_trd->transform & OCTEON_SE_FASTPATH_MAC_HMAC_SHA2))
	  )
        {
          icv_pad_len = 4;
          prefix_len += 4; /* Align AH header to 64 bit boundary */
        }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_SHA2 */

      trailer_len = 0;
      pad_len = 0;
    }
  else if (se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_ESP)
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AH */
    {
      prefix_len += (OCTEON_SE_FASTPATH_ESP_HDRLEN + combined->cipher_iv_len);
      trailer_len = 2 + combined->icv_len;
      
      pad_len = (pc->s->ip_len + pc->s->ip_offset - prefix_ofs
		 + 2) % combined->pad_boundary;
      if (pad_len != 0)
	pad_len = combined->pad_boundary - pad_len;
    }
    
  /* The actual length of the packet */
  packet_out_len = pc->s->ip_len + prefix_len + pad_len + trailer_len;
  OCTEON_SE_DEBUG(9, "Resultant packet len is %d\n", (int) packet_out_len);

  /* Check result packet length. */
  if (cvmx_unlikely(se_trd->pmtu_received && pc->mtu > se_trd->pmtu_received))
    pc->mtu = se_trd->pmtu_received;
  
  ret = octeon_se_fastpath_transform_check_pmtu(pc, packet_out_len);
  if (cvmx_unlikely(ret == OCTEON_SE_FASTPATH_RET_DROP))
    goto drop;
  else if (cvmx_unlikely(ret == OCTEON_SE_FASTPATH_RET_SLOWPATH))
    goto slowpath;

  /* In tunnel mode decrement ttl of inner header. */
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  if (cvmx_likely(se_trd->tunnel_mode))
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */
    {      
      header = cvmx_phys_to_ptr(pc->wqe->packet_ptr.s.addr) + pc->s->ip_offset;

      if (pc->s->ip_version_6)
	{
	  /* Assert that header is in the first packet segment */
	  OCTEON_SE_ASSERT(pc->wqe->packet_ptr.s.size 
			   >= OCTEON_SE_FASTPATH_IP6_HDRLEN);	  
	  OCTEON_SE_FASTPATH_IPH6_SET_HL(header, pc->s->ttl - 1);
	}
      else
	{
	  /* Assert that header is in the first packet segment */
	  OCTEON_SE_ASSERT(pc->wqe->packet_ptr.s.size
			   >= OCTEON_SE_FASTPATH_IP4_HDRLEN);	  
	  OCTEON_SE_FASTPATH_IPH4_SET_TTL(header, pc->s->ttl - 1);
	  OCTEON_SE_FASTPATH_IPH4_CHECKSUM(header, csum);
	  csum = octeon_se_fastpath_csum_update_byte(csum, SSH_IPH4_OFS_TTL, 
						     pc->s->ttl,
						     pc->s->ttl - 1);
	  OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, csum);
	}
    }

  /* Save df bit processing state */
  pc->s->df_bit_processing = se_trd->df_bit_processing;

  /* Allocate packet buffer chain for result packet.
     Request that crypto result offset is 8 byte aligned. */
  alignment =
    OCTEON_SE_ALIGN_64(prefix_ofs + prefix_len) - (prefix_ofs + prefix_len);
  
  packet_out.u64 = 
    octeon_se_fastpath_alloc_packet_chain(packet_out_len + pc->s->ip_offset,
					  alignment,
					  &packet_out_num_segs);
  
  if (cvmx_unlikely(packet_out.u64 == 0))
    {
      OCTEON_SE_DEBUG(3, "Result packet allocation failed\n");
      goto drop;
    }

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  /* In case of transport mode copy the l3 header.*/
  if (cvmx_unlikely(prefix_ofs > pc->s->ip_offset))
    {
      OCTEON_SE_DEBUG(9, "Copying headers to %p\n",
		      cvmx_phys_to_ptr(packet_out.s.addr) + pc->s->ip_offset);
    
      /* Assert that l3 headers are in the first packet segment. */
      OCTEON_SE_ASSERT(packet_out.s.size > prefix_ofs);
      memcpy(cvmx_phys_to_ptr(packet_out.s.addr) + pc->s->ip_offset, 
	     cvmx_phys_to_ptr(pc->wqe->packet_ptr.s.addr) + pc->s->ip_offset, 
	     prefix_ofs - pc->s->ip_offset);
    }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */

  /* Prepare Source buffer */
  octeon_se_fastpath_packet_buffer_create(&src, pc->wqe->packet_ptr, 
					  prefix_ofs,
					  pc->s->ip_len + pc->s->ip_offset 
					  - prefix_ofs,
					  pc->wqe->word2.s.bufs);

  /* Count the number of bytes input to crypto processing. */
  OCTEON_SE_FASTPATH_STATS(out_octets =
			   pc->s->ip_len + pc->s->ip_offset - prefix_ofs);
  
  /* Build headers */

  header = ((uint8_t *) cvmx_phys_to_ptr(packet_out.s.addr)) + prefix_ofs;

  /* Build outer header for tunnel mode and modify IP header for 
     transport mode.*/

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  if (cvmx_unlikely(!se_trd->tunnel_mode && pc->s->ip_version_6 == 0))
    {
      /* IPv4 transport mode. */
      OCTEON_SE_DEBUG(9, "Modifying IPv4 header at %p\n", header);
      
      /* Modify original IPv4 header and change IP protocol and len. */
      OCTEON_SE_FASTPATH_IPH4_SET_LEN(header, packet_out_len);
      OCTEON_SE_FASTPATH_IPH4_SET_PROTO(header, se_trd->nh); 
      OCTEON_SE_FASTPATH_IPH4_CHECKSUM(header, csum);

      csum = 
	octeon_se_fastpath_csum_update_byte(csum,
					    OCTEON_SE_FASTPATH_IPH4_OFS_PROTO,
					    pc->s->ipproto, se_trd->nh);
      csum = 
	octeon_se_fastpath_csum_update_short(csum, 
					     OCTEON_SE_FASTPATH_IPH4_OFS_LEN,
					     pc->s->ip_len, packet_out_len);

      OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, csum);
    }
  else if (cvmx_unlikely(!se_trd->tunnel_mode && pc->s->ip_version_6 == 1))
    {
      /* IPv6 transport mode. */
      OCTEON_SE_DEBUG(9, "Modifying IPv6 header at %p\n", header);
      OCTEON_SE_FASTPATH_IPH6_SET_LEN(header, packet_out_len - 
				      OCTEON_SE_FASTPATH_IP6_HDRLEN);
      OCTEON_SE_FASTPATH_IPH6_SET_NH(header, se_trd->nh);
    }
  else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */
    if (se_trd->ip_version_6 == 0)
      {
	OCTEON_SE_ASSERT(se_trd->tunnel_mode);
	
	/* IPv4 tunnel mode. */
	OCTEON_SE_DEBUG(9, "Building outer IPv4 header at %p\n", header);
	
	OCTEON_SE_ASSERT(packet_out.s.size > 
			 prefix_ofs + OCTEON_SE_FASTPATH_IP4_HDRLEN);
	
	OCTEON_SE_FASTPATH_IPH4_SET_VERSION(header, 4);
	OCTEON_SE_FASTPATH_IPH4_SET_HLEN(header, 5);
	



	tos = 0;
	OCTEON_SE_FASTPATH_IPH4_SET_TOS(header, tos);
	
	OCTEON_SE_FASTPATH_IPH4_SET_LEN(header, packet_out_len);
	
	if (pc->s->df_bit_processing == OCTEON_SE_FASTPATH_DF_CLEAR
	    || (pc->s->df_bit_processing == OCTEON_SE_FASTPATH_DF_KEEP
		&& pc->s->ipv4_df == 0))
	  {
	    uint32_t id;
	    
	    OCTEON_SE_FASTPATH_GET_NEXT_IPV4_PACKET_ID(core, id);
	    OCTEON_SE_FASTPATH_IPH4_SET_ID(header, id);
	    OCTEON_SE_FASTPATH_IPH4_SET_FRAG(header, 0);
	    pc->s->ipv4_df = 0;
	  }
	else
	  {
	    OCTEON_SE_FASTPATH_IPH4_SET_ID(header, 0);
	    OCTEON_SE_FASTPATH_IPH4_SET_FRAG(header,
					   OCTEON_SE_FASTPATH_IPH4_FRAGOFF_DF);
	    pc->s->ipv4_df = 1;
	  }
	
	OCTEON_SE_FASTPATH_IPH4_SET_TTL(header,
				       OCTEON_SE_FASTPATH_IP4_TUNNEL_MODE_TTL);
	OCTEON_SE_FASTPATH_IPH4_SET_PROTO(header, se_trd->nh);
	OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, 0);
	OCTEON_SE_FASTPATH_IPH4_SET_SRC(header, se_trd->own_addr_low);
	OCTEON_SE_FASTPATH_IPH4_SET_DST(header, se_trd->gw_addr_low);
	
	csum = octeon_se_fastpath_ip_cksum(header,
					   OCTEON_SE_FASTPATH_IP4_HDRLEN);
	OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, csum);
	
	prefix_ofs += OCTEON_SE_FASTPATH_IP4_HDRLEN;
      }
    else if (se_trd->ip_version_6 == 1)
      {     
	OCTEON_SE_ASSERT(se_trd->tunnel_mode);
	
	/* IPv6 tunnel mode. */
	OCTEON_SE_DEBUG(9, "Building outer IPv6 header at %p\n", header);
	
	OCTEON_SE_FASTPATH_IPH6_SET_VERSION(header, 6);
	



	tos = 0;
	OCTEON_SE_FASTPATH_IPH6_SET_CLASS(header, tos);
	



	flow_label = 0;
	OCTEON_SE_FASTPATH_IPH6_SET_FLOW(header, flow_label);
	
	OCTEON_SE_FASTPATH_IPH6_SET_LEN(header, packet_out_len - 
					OCTEON_SE_FASTPATH_IP6_HDRLEN);
	OCTEON_SE_FASTPATH_IPH6_SET_NH(header, se_trd->nh);
	OCTEON_SE_FASTPATH_IPH6_SET_HL(header,
				       OCTEON_SE_FASTPATH_IP6_TUNNEL_MODE_HL);
	OCTEON_SE_FASTPATH_IPH6_SET_SRC_LOW(header, se_trd->own_addr_low);
	OCTEON_SE_FASTPATH_IPH6_SET_SRC_HIGH(header, se_trd->own_addr_high);
	
	OCTEON_SE_FASTPATH_IPH6_SET_DST_LOW(header, se_trd->gw_addr_low);
	OCTEON_SE_FASTPATH_IPH6_SET_DST_HIGH(header, se_trd->gw_addr_high);
	prefix_ofs += OCTEON_SE_FASTPATH_IP6_HDRLEN;
      }
  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_NATT
  /* Should we add NATT header as well ? */
  if (cvmx_unlikely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_NATT))
    {
      header = ((uint8_t *) cvmx_phys_to_ptr(packet_out.s.addr)) + prefix_ofs;
      
      OCTEON_SE_DEBUG(9, "Building UDP NAT-T header at %p\n", header);
      
      OCTEON_SE_ASSERT(packet_out.s.size > 
		       prefix_ofs + OCTEON_SE_FASTPATH_UDP_HDRLEN);
      OCTEON_SE_ASSERT((se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_AH) == 0);
      OCTEON_SE_ASSERT(se_trd->nh == OCTEON_SE_FASTPATH_IPPROTO_UDP);
      
      OCTEON_SE_FASTPATH_UDPH_SET_SRCPORT(header, se_trd->natt_local_port); 
      OCTEON_SE_FASTPATH_UDPH_SET_DSTPORT(header, se_trd->natt_remote_port); 
      OCTEON_SE_FASTPATH_UDPH_SET_LEN(header, 
				      packet_out_len - 
				      (prefix_ofs - pc->s->ip_offset));
      OCTEON_SE_FASTPATH_UDPH_SET_CHECKSUM(header, 0);

      prefix_ofs += OCTEON_SE_FASTPATH_UDP_HDRLEN;
    }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_NATT */

  /* Build ESP/AH */
  esp_ah_ofs = prefix_ofs;
  header = ((uint8_t *) cvmx_phys_to_ptr(packet_out.s.addr)) + prefix_ofs;

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AH
  if (se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_AH)
    {
      uint32_t low_seq;

      OCTEON_SE_DEBUG(9, "Building AH header at %p\n", header);

      OCTEON_SE_ASSERT(packet_out.s.size >
		       prefix_ofs + OCTEON_SE_FASTPATH_AH_HDRLEN +
		       combined->icv_len + icv_pad_len);

      /* Get and increment next sequence atomically. Note that se_trd
	 contains the last sequence number transmitted, thus sequence
	 is incremented by one here. */
      ipsec_seq = 
	(uint64_t) cvmx_atomic_fetch_and_add64((int64_t *)&se_trd->seq, 1);
      ipsec_seq++;

      OCTEON_SE_FASTPATH_AHH_SET_NH(header, esp_ah_nh);
      OCTEON_SE_FASTPATH_AHH_SET_LEN(header, 
				     (combined->icv_len + icv_pad_len + 12) / 4
				     - 2);
      OCTEON_SE_FASTPATH_AHH_SET_RESERVED(header, 0);
      OCTEON_SE_FASTPATH_AHH_SET_SPI(header, se_trd->spi_out);
      CVMX_DEXT(low_seq, ipsec_seq, 0, 32);
      OCTEON_SE_FASTPATH_AHH_SET_SEQ(header, low_seq);
      
      prefix_ofs += OCTEON_SE_FASTPATH_AH_HDRLEN + combined->icv_len;

      /* ICV computation also needs ICV field initialized to zero. */
      memcpy(mac_info->prefix.u8, header, OCTEON_SE_FASTPATH_AH_HDRLEN);
      memset(mac_info->prefix.u8 + OCTEON_SE_FASTPATH_AH_HDRLEN, 0,
	     combined->icv_len);

      mac_info->prefix_len = OCTEON_SE_FASTPATH_AH_HDRLEN + combined->icv_len;

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_SHA2      
      if (cvmx_unlikely((se_trd->ip_version_6 == 1) && 
			(se_trd->transform & OCTEON_SE_FASTPATH_MAC_HMAC_SHA2))
	  )
        {
          prefix_ofs += 4;
          mac_info->prefix_len += 4;
	  
          /* Use IPsec seq as AH padding for making 64 bit aligned. */
          OCTEON_SE_PUT_32BIT_ALIGNED(mac_info->prefix.u8 + 
				      OCTEON_SE_FASTPATH_AH_HDRLEN +
				      combined->icv_len, 
                                      low_seq);
        }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_SHA2 */

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ
      if (cvmx_unlikely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_LONGSEQ))
        {
          CVMX_DEXT(mac_info->suffix, ipsec_seq, 32, 32);
          mac_info->suffix_available = 1;
	}
      else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ */
	mac_info->suffix_available = 0;

      /* Assert that crypto offset is 8 byte aligned */
      OCTEON_SE_ASSERT(((uint64_t) (cvmx_phys_to_ptr(packet_out.s.addr) 
				    + prefix_ofs)) % 8 == 0);
      
      octeon_se_fastpath_packet_buffer_create(&dst, packet_out, 
                                              prefix_ofs,
					      packet_out_len
					      + pc->s->ip_offset,  
                                              packet_out_num_segs);

      if (se_trd->ip_version_6 == 1)
	octeon_se_fastpath_mac_add_ah_header6(packet_out,
	                        	      pc->s->ip_offset,
					      combined->update,
					      core->transform_context,
					      0);
      else
	octeon_se_fastpath_mac_add_ah_header4(packet_out,
	                                      pc->s->ip_offset,
					      combined->update,
					      core->transform_context,
					      0);

      OCTEON_SE_DEBUG(9, "MAC prefix, len %d\n", mac_info->prefix_len);
      OCTEON_SE_HEXDUMP(9, mac_info->prefix.u8, mac_info->prefix_len);

      /* Do the actual transform */
      (*combined->encrypt)(core->transform_context,
			   &dst,
			   &src,
			   mac_info,
			   NULL, icv);
      
      /* Copy ICV to packet. */
      if (cvmx_likely(combined->icv_len % 4 == 0))
	{
	  for (i = 0; i < combined->icv_len; i += 4)
	    {
	      OCTEON_SE_PUT_32BIT_ALIGNED(cvmx_phys_to_ptr(packet_out.s.addr)
					  + esp_ah_ofs 
					  + OCTEON_SE_FASTPATH_AH_HDRLEN + i,
					  *(uint32_t *)(((uint8_t *)icv) + i));
	    }
	}
      else
	{
	  memcpy(cvmx_phys_to_ptr(packet_out.s.addr)
		 + esp_ah_ofs + OCTEON_SE_FASTPATH_AH_HDRLEN,
		 icv, combined->icv_len);
	}

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_SHA2
      if (cvmx_unlikely((se_trd->ip_version_6 == 1) && 
			(se_trd->transform & OCTEON_SE_FASTPATH_MAC_HMAC_SHA2))
	  )
	{
	  /* Use IPsec seq as AH padding for making 64 bit aligned. */
	  OCTEON_SE_PUT_32BIT(cvmx_phys_to_ptr(packet_out.s.addr)
			      + esp_ah_ofs 
			      + OCTEON_SE_FASTPATH_AH_HDRLEN
			      + combined->icv_len, 
			      low_seq);
	}
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_SHA2 */
    }
  else if (cvmx_likely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_ESP))
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AH */
    {
      uint32_t low_seq;

      OCTEON_SE_DEBUG(9, "Building ESP header at %p\n", header);

      /* Assert that there is enough space for ESP */
      OCTEON_SE_ASSERT(packet_out.s.size >
		       prefix_ofs + OCTEON_SE_FASTPATH_ESP_HDRLEN);

      /* Get and increment next sequence atomically. Note that se_trd
	 contains the last sequence number transmitted, thus sequence
	 is incremented by one here. */
      ipsec_seq = 
	(uint64_t) cvmx_atomic_fetch_and_add64((int64_t *)&se_trd->seq, 1);
      ipsec_seq++;

      /* Build ESP header. */
      OCTEON_SE_FASTPATH_ESPH_SET_SPI(header, se_trd->spi_out);
      CVMX_DEXT(low_seq, ipsec_seq, 0, 32);
      OCTEON_SE_FASTPATH_ESPH_SET_SEQ(header, low_seq);
      prefix_ofs += OCTEON_SE_FASTPATH_ESP_HDRLEN;

      /* Fill in extra info for transform. */
      extra_info->pad_len = pad_len;
      extra_info->nh = esp_ah_nh;

      /* Fill in extra data form MAC. */
      OCTEON_SE_PUT_32BIT_ALIGNED(mac_info->prefix.u8, se_trd->spi_out);

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AES_GCM
      if (cvmx_likely(combined->is_auth_cipher))
        {
	  /* Extract cipher nonce. */
          OCTEON_SE_ASSERT(se_trd->cipher_nonce_size == 4);
          OCTEON_SE_GET_32BIT_ALIGNED(se_trd->keymat + 
				      OCTEON_MAX_KEYMAT_LEN /2 + 
				      se_trd->cipher_key_size, 
                                      extra_info->cipher_nonce);
	  
          /* Use IPsec seq# as counter. */ 
          extra_info->iv[0] = ipsec_seq;
	  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ
          if (cvmx_unlikely(se_trd->transform & 
			    OCTEON_SE_FASTPATH_IPSEC_LONGSEQ))
            {
              OCTEON_SE_PUT_64BIT(&mac_info->prefix.u8[4], ipsec_seq);
              mac_info->prefix_len = 12;
            }
          else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ */
            {
              OCTEON_SE_PUT_32BIT_ALIGNED(&mac_info->prefix.u8[4], low_seq);
              mac_info->prefix_len = 8;
            }
        }
      else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AES_GCM */
        {
          for (i = 0; i < combined->cipher_iv_len / 8; i++)
            extra_info->iv[i] = cvmx_rng_get_random64();
	  
          /* Prepare extra mac information */
          OCTEON_SE_PUT_32BIT_ALIGNED(&mac_info->prefix.u8[4], low_seq);
          mac_info->prefix_len = 8;
	  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ
          if (cvmx_unlikely(se_trd->transform & 
			    OCTEON_SE_FASTPATH_IPSEC_LONGSEQ))
            {
	      CVMX_DEXT(mac_info->suffix, ipsec_seq, 32, 32);
	      mac_info->suffix_available = 1;
            }
          else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ */
	    mac_info->suffix_available = 0;
        }

      /* Assert that crypto offset is 8 byte aligned */
      OCTEON_SE_ASSERT(((uint64_t) (cvmx_phys_to_ptr(packet_out.s.addr) 
				    + prefix_ofs)) % 8 == 0);
      
      octeon_se_fastpath_packet_buffer_create(&dst, packet_out,
                                              prefix_ofs,
					      packet_out_len
					      + pc->s->ip_offset
					      - prefix_ofs,
                                              packet_out_num_segs);
      
      OCTEON_SE_DEBUG(9, "Performing crypto transform\n");

      /* Do the actual transform. */
      (*combined->encrypt)(core->transform_context,
			   &dst,
			   &src,
			   mac_info,
			   extra_info, icv);
      
      /* The trailer should be appended at the end of encrypted data.
	 Write ptr is pointing to correct location which may be unaligned
	 if aes-gcm is used. */
      OCTEON_SE_ASSERT(dst.total_bytes == combined->icv_len);
      
      OCTEON_SE_DEBUG(9, "Inserting ICV, len %d:\n", (int) combined->icv_len);
      OCTEON_SE_HEXDUMP(9, icv, combined->icv_len);

      octeon_se_fastpath_buffer_copy_in(&dst, icv, combined->icv_len);
    }

  /* Update trd statistics only after successful encryption. */
  OCTEON_SE_FASTPATH_STATS({
    cvmx_atomic_add64((int64_t *) &se_trd->out_octets, out_octets);
    cvmx_atomic_add64((int64_t *) &se_trd->out_packets, 1);
  });
Beispiel #16
0
/**
 * Initialize a USB port for use. This must be called before any
 * other access to the Octeon USB port is made. The port starts
 * off in the disabled state.
 *
 * @param usb    Pointer to an empty cvmx_usbd_state_t structure
 *               that will be populated by the initialize call.
 *               This structure is then passed to all other USB
 *               functions.
 * @param usb_port_number
 *               Which Octeon USB port to initialize.
 * @param flags  Flags to control hardware initialization. See
 *               cvmx_usbd_initialize_flags_t for the flag
 *               definitions. Some flags are mandatory.
 *
 * @return Zero or a negative on error.
 */
int cvmx_usbd_initialize(cvmx_usbd_state_t *usb,
                                      int usb_port_number,
                                      cvmx_usbd_initialize_flags_t flags)
{
    cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
    cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;

    if (cvmx_unlikely(flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    memset(usb, 0, sizeof(*usb));
    usb->init_flags = flags;
    usb->index = usb_port_number;

    /* Try to determine clock type automatically */
    if ((usb->init_flags & (CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI |
                  CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
    {
        if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI;  /* Only 12 MHZ crystals are supported */
        else
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND;
    }

    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* Check for auto ref clock frequency */
        if (!(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
            switch (__cvmx_helper_board_usb_get_clock_type())
            {
                case USB_CLOCK_TYPE_REF_12:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_24:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_48:
                default:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ;
                    break;
            }
    }

    /* Power On Reset and PHY Initialization */

    /* 1. Wait for DCOK to assert (nothing to do) */
    /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
        USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
    usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
    usbn_clk_ctl.s.por = 1;
    usbn_clk_ctl.s.hrst = 0;
    usbn_clk_ctl.s.prst = 0;
    usbn_clk_ctl.s.hclk_rst = 0;
    usbn_clk_ctl.s.enable = 0;
    /* 2b. Select the USB reference clock/crystal parameters by writing
        appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* The USB port uses 12/24/48MHz 2.5V board clock
            source at USB_XO. USB_XI should be tied to GND.
            Most Octeon evaluation boards require this setting */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 0;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */

        switch (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
        {
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ:
                usbn_clk_ctl.s.p_c_sel = 0;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ:
                usbn_clk_ctl.s.p_c_sel = 1;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ:
                usbn_clk_ctl.s.p_c_sel = 2;
                break;
        }
    }
    else
    {
        /* The USB port uses a 12MHz crystal as clock source
            at USB_XO and USB_XI */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 1;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */

        usbn_clk_ctl.s.p_c_sel = 0;
    }
    /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
        setting USBN0/1_CLK_CTL[ENABLE] = 1.  Divide the core clock down such
        that USB is as close as possible to 125Mhz */
    {
        int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
        if (divisor < 4)  /* Lower than 4 doesn't seem to work properly */
            divisor = 4;
        usbn_clk_ctl.s.divide = divisor;
        usbn_clk_ctl.s.divide2 = 0;
    }
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
    usbn_clk_ctl.s.hclk_rst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
    cvmx_wait(64);
    /* 3. Program the power-on reset field in the USBN clock-control register:
        USBN_CLK_CTL[POR] = 0 */
    usbn_clk_ctl.s.por = 0;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 4. Wait 1 ms for PHY clock to start */
    cvmx_wait_usec(1000);
    /* 5. Program the Reset input from automatic test equipment field in the
        USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
    usbn_usbp_ctl_status.u64 = cvmx_read_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
    usbn_usbp_ctl_status.s.ate_reset = 1;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 6. Wait 10 cycles */
    cvmx_wait(10);
    /* 7. Clear ATE_RESET field in the USBN clock-control register:
        USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
    usbn_usbp_ctl_status.s.ate_reset = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 8. Program the PHY reset field in the USBN clock-control register:
        USBN_CLK_CTL[PRST] = 1 */
    usbn_clk_ctl.s.prst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 9. Program the USBP control and status register to select host or
        device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
        device */
    usbn_usbp_ctl_status.s.hst_mode = 1;
    usbn_usbp_ctl_status.s.dm_pulld = 0;
    usbn_usbp_ctl_status.s.dp_pulld = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 10. Wait 1 µs */
    cvmx_wait_usec(1);
    /* 11. Program the hreset_n field in the USBN clock-control register:
        USBN_CLK_CTL[HRST] = 1 */
    usbn_clk_ctl.s.hrst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 12. Proceed to USB core initialization */
    usbn_clk_ctl.s.enable = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    cvmx_wait_usec(1);

    /* Program the following fields in the global AHB configuration
        register (USBC_GAHBCFG)
        DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
        Burst length, USBC_GAHBCFG[HBSTLEN] = 0
        Nonperiodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[NPTXFEMPLVL]
        Periodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[PTXFEMPLVL]
        Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
    {
        cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
        usbcx_gahbcfg.u32 = 0;
        usbcx_gahbcfg.s.dmaen = 1;
        usbcx_gahbcfg.s.hbstlen = 0;
        usbcx_gahbcfg.s.nptxfemplvl = 1;
        usbcx_gahbcfg.s.ptxfemplvl = 1;
        usbcx_gahbcfg.s.glblintrmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), usbcx_gahbcfg.u32);
    }

    /* Program the following fields in USBC_GUSBCFG register.
        HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
        ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
        USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
        PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
    {
        cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
        usbcx_gusbcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
        usbcx_gusbcfg.s.toutcal = 0;
        usbcx_gusbcfg.s.ddrsel = 0;
        usbcx_gusbcfg.s.usbtrdtim = 0x5;
        usbcx_gusbcfg.s.phylpwrclksel = 0;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), usbcx_gusbcfg.u32);
    }

    /* Program the following fields in the USBC0/1_DCFG register:
        Device speed, USBC0/1_DCFG[DEVSPD] = 0 (high speed)
        Non-zero-length status OUT handshake, USBC0/1_DCFG[NZSTSOUTHSHK]=0
        Periodic frame interval (if periodic endpoints are supported),
        USBC0/1_DCFG[PERFRINT] = 1 */
    {
        cvmx_usbcx_dcfg_t usbcx_dcfg;
        usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
        usbcx_dcfg.s.devspd = 0;
        usbcx_dcfg.s.nzstsouthshk = 0;
        usbcx_dcfg.s.perfrint = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
    }

    /* Program the USBC0/1_GINTMSK register */
    {
        cvmx_usbcx_gintmsk_t usbcx_gintmsk;
        usbcx_gintmsk.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
        usbcx_gintmsk.s.oepintmsk = 1;
        usbcx_gintmsk.s.inepintmsk = 1;
        usbcx_gintmsk.s.enumdonemsk = 1;
        usbcx_gintmsk.s.usbrstmsk = 1;
        usbcx_gintmsk.s.usbsuspmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), usbcx_gintmsk.u32);
    }

    cvmx_usbd_disable(usb);
    return 0;
}
Beispiel #17
0
/**
 * @INTERNAL
 * Perform USB device mode initialization after a reset completes.
 * This should be called after USBC0/1_GINTSTS[USBRESET] and
 * corresponds to section 22.6.1.1, "Initialization on USB Reset",
 * in the manual.
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 *
 * @return Zero or negative on error.
 */
static int __cvmx_usbd_device_reset_complete(cvmx_usbd_state_t *usb)
{
    cvmx_usbcx_ghwcfg2_t usbcx_ghwcfg2;
    cvmx_usbcx_ghwcfg3_t usbcx_ghwcfg3;
    cvmx_usbcx_doepmsk_t usbcx_doepmsk;
    cvmx_usbcx_diepmsk_t usbcx_diepmsk;
    cvmx_usbcx_daintmsk_t usbc_daintmsk;
    cvmx_usbcx_gnptxfsiz_t gnptxfsiz;
    int fifo_space;
    int i;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Processing reset\n", __FUNCTION__);

    usbcx_ghwcfg2.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GHWCFG2(usb->index));
    usbcx_ghwcfg3.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));

    /* Set up the data FIFO RAM for each of the FIFOs */
    fifo_space = usbcx_ghwcfg3.s.dfifodepth;

    /* Start at the top of the FIFO and assign space for each periodic fifo */
    for (i=usbcx_ghwcfg2.s.numdeveps; i>0; i--)
    {
        cvmx_usbcx_dptxfsizx_t siz;
        siz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DPTXFSIZX(i, usb->index));
        fifo_space -= siz.s.dptxfsize;
        siz.s.dptxfstaddr = fifo_space;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DPTXFSIZX(i, usb->index), siz.u32);
    }

    /* Assign half the leftover space to the non periodic tx fifo */
    gnptxfsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
    gnptxfsiz.s.nptxfdep = fifo_space / 2;
    fifo_space -= gnptxfsiz.s.nptxfdep;
    gnptxfsiz.s.nptxfstaddr = fifo_space;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), gnptxfsiz.u32);

    /* Assign the remain space to the RX fifo */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GRXFSIZ(usb->index), fifo_space);

    /* Unmask the common endpoint interrupts */
    usbcx_doepmsk.u32 = 0;
    usbcx_doepmsk.s.setupmsk = 1;
    usbcx_doepmsk.s.epdisbldmsk = 1;
    usbcx_doepmsk.s.xfercomplmsk = 1;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPMSK(usb->index), usbcx_doepmsk.u32);
    usbcx_diepmsk.u32 = 0;
    usbcx_diepmsk.s.epdisbldmsk = 1;
    usbcx_diepmsk.s.xfercomplmsk = 1;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPMSK(usb->index), usbcx_diepmsk.u32);

    usbc_daintmsk.u32 = 0;
    usbc_daintmsk.s.inepmsk = -1;
    usbc_daintmsk.s.outepmsk = -1;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DAINTMSK(usb->index), usbc_daintmsk.u32);

    /* Set all endpoints to NAK */
    for (i=0; i<usbcx_ghwcfg2.s.numdeveps+1; i++)
    {
        cvmx_usbcx_doepctlx_t usbc_doepctl;
        usbc_doepctl.u32 = 0;
        usbc_doepctl.s.snak = 1;
        usbc_doepctl.s.usbactep = 1;
        usbc_doepctl.s.mps = (i==0) ? 0 : 64;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPCTLX(i, usb->index), usbc_doepctl.u32);
    }

    return 0;
}
Beispiel #18
0
/**
 * Process incoming packets. 
 */
int inic_data_loop(void)
{
		cvm_common_wqe_t *swp = NULL;
		cvm_tcp_in_endpoints_t conn;
		cvm_tcp_tcphdr_t *th = NULL;
		cvm_ip_ip_t *ih = NULL;
		cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
		uint64_t cpu_clock_hz = sys_info_ptr->cpu_clock_hz;
		uint64_t tick_cycle = cvmx_get_cycle();
		uint64_t tick_step;
		uint32_t idle_processing_interval_ticks = (CVM_COMMON_IDLE_PROCESSING_INTERVAL)*(1000*1000)/(CVM_COMMON_TICK_LEN_US);
		uint32_t idle_processing_last_ticks = 0;
#ifdef INET6
		struct cvm_ip6_ip6_hdr *ip6 = NULL;
#ifdef CVM_ENET_TUNNEL
		struct cvm_ip6_ip6_hdr *i6h = NULL;
#endif
#endif


#ifdef CVM_CLI_APP
		uint64_t idle_cycle_start_value;
#endif

		/* for the simulator */
		if (cpu_clock_hz == 0)
		{
				cpu_clock_hz = 333000000;
		}

		tick_step = (CVM_COMMON_TICK_LEN_US * cpu_clock_hz) / 1000000;
		cvm_debug_print_interval = cpu_clock_hz;

#ifndef REAL_HW
		/* for the simulator, set the debug interval to be 3M cycles */
		cvm_debug_print_interval = 3000000;
#endif

#ifdef DUTY_CYCLE
		start_cycle = cvmx_get_cycle();
		process_count = 0;
#endif

		if (cvmx_coremask_first_core(coremask_data)) 
		{
				/* Initiate a timer transaction for arp entry timeouts */
				//if(cvm_enet_arp_timeout_init() != CVMX_TIM_STATUS_SUCCESS)
				//{
				//		printf("Failed init of cvm_ip_arp_timeout_init\n");
				//}
		}

#if defined(CVM_COMBINED_APP_STACK)
		/* Flush the packets sent by main_global and main_local */
		/*
		printf("before cvm_send_packet () \n ");
		if (out_swp)
		{
				cvm_send_packet ();
		}
		printf("after cvm_send_packet () \n ");
		*/
		uint64_t app_timeout = cvmx_get_cycle ();
#endif




		/* start the main loop */
		while (1)
		{


#ifdef DUTY_CYCLE
				end_cycle = cvmx_get_cycle();

				/* check the wrap around case */
				if (end_cycle < start_cycle) end_cycle += cpu_clock_hz;

				if ((end_cycle - start_cycle) > cvm_debug_print_interval)
				{
						inic_do_per_second_duty_cycle_processing();
				}
#endif /* DUTY_CYCLE */

				cvmx_pow_work_request_async_nocheck(CVMX_SCR_WORK, 1);

				/* update the ticks variable */
				while (cvmx_get_cycle() - tick_cycle > tick_step)
				{
						tick_cycle += tick_step;
						cvm_tcp_ticks++;
						if (!(cvm_tcp_ticks & 0x1f)) CVM_COMMON_HISTORY_SET_CYCLE();
				}


				/* do common idle processing */
				if ( (cvm_tcp_ticks - idle_processing_last_ticks) > idle_processing_interval_ticks)
				{
						if (cvmx_coremask_first_core(coremask_data)) 
						{
								cvm_common_do_idle_processing();
						}

						idle_processing_last_ticks = cvm_tcp_ticks;
				}


#ifdef CVM_CLI_APP
				idle_cycle_start_value = cvmx_get_cycle();
#endif

				/* get work entry */
				swp = (cvm_common_wqe_t *)cvmx_pow_work_response_async(CVMX_SCR_WORK);
				if (swp == NULL)
				{
						idle_counter++;

						if(core_id == highest_core_id)
						{
								cvm_enet_check_link_status();
						}

#ifdef CVM_CLI_APP
						cvmx_fau_atomic_add64(core_idle_cycles[core_id], (cvmx_get_cycle()-idle_cycle_start_value) );
#endif
						continue;
				}

				CVM_COMMON_EXTRA_STATS_ADD64 (CVM_FAU_REG_WQE_RCVD, 1);

#ifdef WORK_QUEUE_ENTRY_SIZE_128 // {
				CVMX_PREFETCH0(swp);
#else
				/* Prefetch work-queue entry */
				CVMX_PREFETCH0(swp);
				CVMX_PREFETCH128(swp);
#endif // WORK_QUEUE_ENTRY_SIZE_128 }

				out_swp = 0;
				out_swp_tail = 0;


#ifdef DUTY_CYCLE
				/* we are about to start processing the packet - remember the cycle count */
				process_start_cycle = cvmx_get_cycle();
#endif


				/* Short cut the common case */
				if (cvmx_likely(swp->hw_wqe.unused == 0))
				{
						goto packet_from_the_wire;
				}
				printf("Get work with unused is %X\n", swp->hw_wqe.unused);

				{
						{

packet_from_the_wire:

#if CVM_PKO_DONTFREE
								swp->hw_wqe.packet_ptr.s.i = 0;
#endif

#ifdef SANITY_CHECKS
								/* we have a work queue entry - do input sanity checks */
								ret = cvm_common_input_sanity_and_buffer_count_update(swp);
#endif

								if (cvmx_unlikely(swp->hw_wqe.word2.s.rcv_error))
								{
										goto discard_swp; /* Receive error */
								}

#ifndef WORK_QUEUE_ENTRY_SIZE_128 // {
								{
										/* Make sure pre-fetch completed */
										uint64_t dp = *(volatile uint64_t*)&swp->next;
								}
#endif // WORK_QUEUE_ENTRY_SIZE_128 }

								{
										/* Initialize SW portion of the work-queue entry */
										uint64_t *dptr = (uint64_t*)(&swp->next);
										dptr[0] = 0;
										dptr[1] = 0;
										dptr[2] = 0;
										dptr[3] = 0;
								}

								if(cvmx_unlikely(swp->hw_wqe.word2.s.not_IP))
								{
										goto output;
								}

								/* Shortcut classification to avoid multiple lookups */
								if(
#ifndef INET6
												swp->hw_wqe.word2.s.is_v6 || 
#endif
												swp->hw_wqe.word2.s.is_bcast 
#ifndef INET6
												|| swp->hw_wqe.word2.s.is_mcast
#endif
								  )
								{
										goto discard_swp; /* Receive error */
								}


								/* Packet is unicast IPv4, without L2 errors */
								/* (All IP exceptions are dropped.  This currently includes
								 *  IPv4 options and IPv6 extension headers.)
								 */
								if(cvmx_unlikely(swp->hw_wqe.word2.s.IP_exc))
								{
										goto discard_swp;
								}

								/* Packet is Ipv4 (and no IP exceptions) */
								if (cvmx_unlikely(swp->hw_wqe.word2.s.is_frag || !swp->hw_wqe.word2.s.tcp_or_udp))
								{
										goto output;
								}

#ifdef ANVL_RFC_793_COMPLIANCE
								/* RFC 793 says that:
								   - We should send a RST out when we get a packet with FIN set 
								   without the ACK bit set in the flags field. 
								   - We should send a RST out when we get a packet with no flag set.
								   Hence, let TCP stack handle these conditions.
								 */
								if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error &&
														(cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG8_ERR) &&
														(cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG9_ERR)))
#else
										if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error))
#endif
										{
												cvm_tcp_handle_error(swp);
												goto discard_swp;
										}

								/* Packet is not fragmented, TCP/UDP, no IP exceptions/L4 errors */
								/* We can try an L4 lookup now, but we need all the information */
								ih = ((cvm_ip_ip_t *)&(swp->hw_wqe.packet_data[CVM_COMMON_PD_ALIGN]));

								if (!swp->hw_wqe.word2.s.is_v6)
								{
										/* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */
										swp->l4_offset = ((uint16_t)(ih->ip_hl) << 2) + CVM_COMMON_PD_ALIGN;
										swp->l4_prot = ih->ip_p;
								}
#ifdef INET6
								else
								{
										ip6 = (struct cvm_ip6_ip6_hdr *) &swp->hw_wqe.packet_data[CVM_COMMON_IP6_PD_ALIGN];

										CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, 
														"%s: %d Packet trace Src: %s/%d Dest: %s/%d prot: %d len: %d\n", 
														__FUNCTION__, __LINE__, 
														cvm_ip6_ip6_sprintf (&ip6->ip6_dst), conn.ie_fport, 
														cvm_ip6_ip6_sprintf (&ip6->ip6_src), conn.ie_lport,
														swp->l4_prot, swp->hw_wqe.len);
										/* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */
										swp->l4_offset = CVM_IP6_IP6_HDRLEN;
										swp->l4_prot = ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt;

								}
#endif

								th = ((cvm_tcp_tcphdr_t *)&(swp->hw_wqe.packet_data[swp->l4_offset]));

								/* check if it is a TCP packet */
								if (swp->l4_prot == CVM_IP_IPPROTO_TCP)
								{
										process_handle(swp);
#ifdef INET6
										if (!swp->hw_wqe.word2.s.is_v6)
#endif
										{
												CVM_TCP_TCP_DUMP ((void*)ih);

												/* assume IPv4 for now */
												conn.ie_laddr = ih->ip_dst.s_addr;
												conn.ie_faddr = ih->ip_src.s_addr;
												conn.ie_lport = th->th_dport;
												conn.ie_fport = th->th_sport;

										}
#ifdef INET6
										else
										{
												/* assume IPv4 for now */
												memcpy (&conn.ie6_laddr, &ip6->ip6_dst, sizeof (struct cvm_ip6_in6_addr));
												memcpy (&conn.ie6_faddr, &ip6->ip6_src, sizeof (struct cvm_ip6_in6_addr));
												conn.ie_lport = th->th_dport;
												conn.ie_fport = th->th_sport;

												/* do a TCP lookup */
												swp->tcb = cvm_tcp6_lookup (swp);

												CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, "%s: %d TCPv6 lookup Src: %s/%d Dest: %s/%d ret_tcb: 0x%llx\n", 
																__FUNCTION__, __LINE__, 
																cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_faddr), conn.ie_fport, 
																cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_laddr), conn.ie_lport, 
																CAST64(swp->tcb));
										}
#endif // INET6
								}


								goto output;
						} /* packet from wire */
				} /* switch */


output:
				CVMX_SYNCWS;

				/* Send packet out */
				if (out_swp)
				{
						cvm_send_packet();
				}

				if(swp != NULL)
				{
						S3_send_packet((cvmx_wqe_t *)swp);
						swp = NULL;
				}
#ifdef DUTY_CYCLE
				process_end_cycle = cvmx_get_cycle();
				process_count += (process_end_cycle - process_start_cycle);
#endif
		}

		return (0);


discard_swp:
		/* Free the chained buffers */
		cvm_common_packet_free(swp);

		/* Free the work queue entry */
		cvm_common_free_fpa_buffer(swp, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE);
		swp = NULL;
		goto output;

} /* inic_data_loop */