Esempio n. 1
0
int cvmx_bootmem_init(void *mem_desc_ptr)
{
	/* Here we set the global pointer to the bootmem descriptor
	 * block.  This pointer will be used directly, so we will set
	 * it up to be directly usable by the application.  It is set
	 * up as follows for the various runtime/ABI combinations:
	 *
	 * Linux 64 bit: Set XKPHYS bit
	 * Linux 32 bit: use mmap to create mapping, use virtual address
	 * CVMX 64 bit:  use physical address directly
	 * CVMX 32 bit:  use physical address directly
	 *
	 * Note that the CVMX environment assumes the use of 1-1 TLB
	 * mappings so that the physical addresses can be used
	 * directly
	 */
	if (!cvmx_bootmem_desc) {
#if   defined(CVMX_ABI_64)
		/* Set XKPHYS bit */
		cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
#else
		cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
#endif
	}

	return 0;
}
Esempio n. 2
0
/*
 * The boot loader command line may specify kernel environment variables or
 * applicable boot flags of boot(8).
 */
static void
octeon_init_kenv(register_t ptr)
{
	int i;
	char *n;
	char *v;
	octeon_boot_descriptor_t *app_desc_ptr;

	app_desc_ptr = (octeon_boot_descriptor_t *)(intptr_t)ptr;
	memset(octeon_kenv, 0, sizeof(octeon_kenv));
	init_static_kenv(octeon_kenv, sizeof(octeon_kenv));

	for (i = 0; i < app_desc_ptr->argc; i++) {
		v = cvmx_phys_to_ptr(app_desc_ptr->argv[i]);
		if (v == NULL)
			continue;
		if (*v == '-') {
			boothowto_parse(v);
			continue;
		}
		n = strsep(&v, "=");
		if (v == NULL)
			kern_setenv(n, "1");
		else
			kern_setenv(n, v);
	}
}
Esempio n. 3
0
void oct_send_response(cvmx_wqe_t *work, uint16_t opcode, void *data, uint32_t size)
{
	void *resp = NULL;
	rpc_ether_hdr_t *hdr;
	rpc_msg_t *rpcmsg;

	resp = (void *)cvmx_phys_to_ptr(work->packet_ptr.s.addr);

	hdr = (rpc_ether_hdr_t *)resp;

	hdr->type = ETH_P;

	rpcmsg = (rpc_msg_t *)((uint8_t *)resp + sizeof(rpc_ether_hdr_t));
	rpcmsg->opcode = opcode;
	rpcmsg->info_len = size;
	memcpy((void *)rpcmsg->info_buf, data, size);

	work->packet_ptr.s.size = sizeof(rpc_ether_hdr_t) + sizeof(rpc_msg_t) + rpcmsg->info_len;

	cvmx_wqe_set_len(work, work->packet_ptr.s.size);
	cvmx_wqe_set_port(work, 0);
	cvmx_wqe_set_grp(work, TO_LINUX_GROUP);
	
	cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, cvmx_wqe_get_qos(work), TO_LINUX_GROUP);
}
Esempio n. 4
0
void encryption(uint8_t * enc_map, cvm_common_wqe_t * swp, uint32_t pos)
{
		//return ;
		uint8_t * ptr = (uint8_t *)cvmx_phys_to_ptr(swp->hw_wqe.packet_ptr.s.addr);
		int i = 0;
		for(i=pos;i<swp->hw_wqe.len;i++)
		{
				ptr[i]=enc_map[ptr[i]];
		}
}
void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name)
{
    uint64_t addr;

    addr = octeon_phy_mem_named_block_alloc(cvmx_bootmem_desc, size, min_addr, max_addr, align, name);
    if (addr)
        return cvmx_phys_to_ptr(addr);
    else
        return NULL;

}
Esempio n. 6
0
void *hfa_bootmem_alloc (uint64_t size, uint64_t alignment)
{
    int64_t address;

    address = cvmx_bootmem_phy_alloc(size, 0, 0, alignment, 0);

    if (address > 0)
        return cvmx_phys_to_ptr(address);
    else
        return NULL;
}
Esempio n. 7
0
/*
 *  alloc a mbuf which can be used to describe the packet
 *  if work is error , return NULL
 *  then free wqe, reurn mbuf
 */
void *
oct_rx_process_work(cvmx_wqe_t *wq)
{
	void *pkt_virt;
	mbuf_t *m;

	if (wq->word2.s.rcv_error || cvmx_wqe_get_bufs(wq) > 1){
		/* 
		  *  Work has error, so drop
		  *  and now do not support jumbo packet
		  */
		printf("recv error\n");
		oct_packet_free(wq, wqe_pool);
		STAT_RECV_ERR;
		return NULL;
	}

	pkt_virt = (void *) cvmx_phys_to_ptr(wq->packet_ptr.s.addr);
	if(NULL == pkt_virt)
	{
		STAT_RECV_ADDR_ERR;
		return NULL;
	}
	
#ifdef SEC_RXTX_DEBUG
    printf("Received %u byte packet.\n", cvmx_wqe_get_len(wq));
    printf("Processing packet\n");
    cvmx_helper_dump_packet(wq);
#endif

	m = (mbuf_t *)MBUF_ALLOC();

	memset((void *)m, 0, sizeof(mbuf_t));

	m->magic_flag = MBUF_MAGIC_NUM;
	PKTBUF_SET_HW(m);
	
	m->packet_ptr.u64 = wq->packet_ptr.u64;

	m->input_port = cvmx_wqe_get_port(wq);
	
	m->pkt_totallen = cvmx_wqe_get_len(wq);
	m->pkt_ptr = pkt_virt;

	cvmx_fpa_free(wq, wqe_pool, 0);
	
	STAT_RECV_PC_ADD;
	STAT_RECV_PB_ADD(m->pkt_totallen);

	STAT_RECV_OK;
	return (void *)m;

}
void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
                               uint64_t min_addr, uint64_t max_addr)
{
    int64_t address;
    address =
        cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);

    if (address > 0)
        return cvmx_phys_to_ptr(address);
    else
        return NULL;
}
void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr)
{
    uint64_t address;
    octeon_lock(CAST64(&(cvmx_bootmem_desc->lock)));
    address = octeon_phy_mem_block_alloc(cvmx_bootmem_desc, size, min_addr, max_addr, alignment);
    octeon_unlock(CAST64(&(cvmx_bootmem_desc->lock)));

    if (address)
        return cvmx_phys_to_ptr(address);
    else
        return NULL;
}
int cvmx_bootmem_init(void *mem_desc_ptr)
{
    if (!cvmx_bootmem_desc) {
#if   defined(CVMX_ABI_64)

        cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
#else
        cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
#endif
    }

    return 0;
}
void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
                                     uint64_t max_addr, uint64_t align,
                                     char *name)
{
    int64_t addr;

    addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
            align, name, 0);
    if (addr >= 0)
        return cvmx_phys_to_ptr(addr);
    else
        return NULL;
}
Esempio n. 12
0
struct cvmx_bootmem_named_block_desc *
	cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
{
	unsigned int i;
	struct cvmx_bootmem_named_block_desc *named_block_array_ptr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
#endif
	/*
	 * Lock the structure to make sure that it is not being
	 * changed while we are examining it.
	 */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_lock();

	/* Use XKPHYS for 64 bit linux */
	named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
	    cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);

#ifdef DEBUG
	cvmx_dprintf
	    ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
	     named_block_array_ptr);
#endif
	if (cvmx_bootmem_desc->major_version == 3) {
		for (i = 0;
		     i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
			if ((name && named_block_array_ptr[i].size
			     && !strncmp(name, named_block_array_ptr[i].name,
					 cvmx_bootmem_desc->named_block_name_len
					 - 1))
			    || (!name && !named_block_array_ptr[i].size)) {
				if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
					cvmx_bootmem_unlock();

				return &(named_block_array_ptr[i]);
			}
		}
	} else {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
			     "version: %d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
	}
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_unlock();

	return NULL;
}
Esempio n. 13
0
/**
 * Free a work queue entry received in a intercept callback.
 *
 * @param work_queue_entry
 *               Work queue entry to free
 * @return Zero on success, Negative on failure.
 */
int cvm_oct_free_work(void *work_queue_entry)
{
	cvmx_wqe_t *work = work_queue_entry;

	int segments = work->word2.s.bufs;
	cvmx_buf_ptr_t segment_ptr = work->packet_ptr;

	while (segments--) {
		cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
		if (__predict_false(!segment_ptr.s.i))
			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), segment_ptr.s.pool, DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE/128));
		segment_ptr = next_ptr;
	}
	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));

	return 0;
}
Esempio n. 14
0
/**
 * cvm_oct_free_work- Free a work queue entry
 *
 * @work_queue_entry: Work queue entry to free
 *
 * Returns Zero on success, Negative on failure.
 */
int cvm_oct_free_work(void *work_queue_entry)
{
	cvmx_wqe_t *work = work_queue_entry;

	int segments = work->word2.s.bufs;
	union cvmx_buf_ptr segment_ptr = work->packet_ptr;

	while (segments--) {
		union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
			cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
		if (unlikely(!segment_ptr.s.i))
			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
				      segment_ptr.s.pool,
				      CVMX_FPA_PACKET_POOL_SIZE / 128);
		segment_ptr = next_ptr;
	}
	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);

	return 0;
}
Esempio n. 15
0
uint16_t oct_rx_command_get(cvmx_wqe_t *work)
{
	uint8_t *data;
	rpc_msg_t *rpcmsg;
	
	if(cvmx_wqe_get_bufs(work))
	{ 
        data = cvmx_phys_to_ptr(work->packet_ptr.s.addr);
		if(NULL == data)
			return COMMAND_INVALID;
    } 
	else 
	{
        return COMMAND_INVALID;
    }

	rpcmsg = (rpc_msg_t *)data;

	return rpcmsg->opcode;
}
Esempio n. 16
0
static cvmx_bootinfo_t *
octeon_process_app_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr)
{
	cvmx_bootinfo_t *octeon_bootinfo;

	/* XXX Why is 0x00000000ffffffffULL a bad value?  */
	if (app_desc_ptr->cvmx_desc_vaddr == 0 ||
	    app_desc_ptr->cvmx_desc_vaddr == 0xfffffffful) {
            	cvmx_safe_printf("Bad octeon_bootinfo %#jx\n",
		    (uintmax_t)app_desc_ptr->cvmx_desc_vaddr);
		return (NULL);
	}

    	octeon_bootinfo = cvmx_phys_to_ptr(app_desc_ptr->cvmx_desc_vaddr);
        if (octeon_bootinfo->major_version != 1) {
            	cvmx_safe_printf("Incompatible CVMX descriptor from bootloader: %d.%d %p\n",
		    (int) octeon_bootinfo->major_version,
		    (int) octeon_bootinfo->minor_version, octeon_bootinfo);
		return (NULL);
	}

	cvmx_sysinfo_minimal_initialize(octeon_bootinfo->phy_mem_desc_addr,
					octeon_bootinfo->board_type,
					octeon_bootinfo->board_rev_major,
					octeon_bootinfo->board_rev_minor,
					octeon_bootinfo->eclock_hz);
	memcpy(cvmx_sysinfo_get()->mac_addr_base,
	       octeon_bootinfo->mac_addr_base, 6);
	cvmx_sysinfo_get()->mac_addr_count = octeon_bootinfo->mac_addr_count;
	cvmx_sysinfo_get()->compact_flash_common_base_addr = 
		octeon_bootinfo->compact_flash_common_base_addr;
	cvmx_sysinfo_get()->compact_flash_attribute_base_addr = 
		octeon_bootinfo->compact_flash_attribute_base_addr;
	cvmx_sysinfo_get()->core_mask = octeon_bootinfo->core_mask;
	cvmx_sysinfo_get()->led_display_base_addr =
		octeon_bootinfo->led_display_base_addr;
	memcpy(cvmx_sysinfo_get()->board_serial_number,
	       octeon_bootinfo->board_serial_number,
	       sizeof cvmx_sysinfo_get()->board_serial_number);
	return (octeon_bootinfo);
}
Esempio n. 17
0
int oct_rxtx_get(void)
{
    int i;
    void *ptr;
    const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(OCT_TX_DESC_NAME);
    if (block_desc)
    {
        ptr = cvmx_phys_to_ptr(block_desc->base_addr);
    }
    else
    {
        return SEC_NO;
    }

    for( i = 0; i < CPU_HW_RUNNING_MAX; i++ )
    {
        oct_stx[i] = (oct_softx_stat_t *)((uint8_t *)ptr + sizeof(oct_softx_stat_t) * i);
    }

    return SEC_OK;
}
Esempio n. 18
0
void oct_rx_process_command(cvmx_wqe_t *wq)
{
	uint16_t opcode = oct_rx_command_get(wq);
	void *data;
	if(opcode == COMMAND_INVALID)
	{
		oct_packet_free(wq, wqe_pool);
		return;
	}

	data = cvmx_phys_to_ptr(wq->packet_ptr.s.addr);
	
	switch(opcode)
	{
		case COMMAND_SHOW_BUILD_TIME:
		{
			dp_show_build_time(wq, data);
			break;
		}
		case COMMAND_SHOW_PKT_STAT:
		{
			dp_show_pkt_stat(wq, data);
			break;
		}
		case COMMAND_SHOW_MEM_POOL:
		{
			dp_show_mem_pool(wq, data);
			break;
		}
		case COMMAND_ACL_RULE_COMMIT:
		{
			dp_acl_rule_commit(wq, data);	
		}
		default:
		{
			printf("unsupport command\n");
			break;
		}
	}
}
Esempio n. 19
0
/**
 * @INTERNAL
 * Helper function use to fault in cache lines for L2 cache locking
 *
 * @addr:   Address of base of memory region to read into L2 cache
 * @len:    Length (in bytes) of region to fault in
 */
static void fault_in(uint64_t addr, int len)
{
	char *ptr;

	/*
	 * Adjust addr and length so we get all cache lines even for
	 * small ranges spanning two cache lines.
	 */
	len += addr & CVMX_CACHE_LINE_MASK;
	addr &= ~CVMX_CACHE_LINE_MASK;
	ptr = cvmx_phys_to_ptr(addr);
	/*
	 * Invalidate L1 cache to make sure all loads result in data
	 * being in L2.
	 */
	CVMX_DCACHE_INVALIDATE;
	while (len > 0) {
		ACCESS_ONCE(*ptr);
		len -= CVMX_CACHE_LINE_SIZE;
		ptr += CVMX_CACHE_LINE_SIZE;
	}
}
Esempio n. 20
0
/* IN PROGRESS */
void receive_packet()
{
  cvmx_wqe_t *work = NULL;
  uint8_t *ptr;
  int i;

  printf("Waiting for packet...\n");

  while (!work) {
    /* In standalone CVMX, we have nothing to do if there isn't work,
     * so use the WAIT flag to reduce power usage. */
    work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
  }

  ptr = (uint8_t *) cvmx_phys_to_ptr(work->packet_ptr.s.addr);
  ptr += PAYLOAD_OFFSET;

  // print out the payload bytes of the recieved packet
  printf("Payload bytes recv: ");
  for (i = 0; i < PAYLOAD_SIZE; i++) {
    printf("%x", *(ptr++));
  }
}
static void setup_system_info(void)
{
    cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();
    memset(system_info, 0, sizeof(cvmx_sysinfo_t));

    system_info->core_mask = 0;
    system_info->init_core = cvmx_get_core_num();

    FILE *infile = fopen("/proc/octeon_info", "r");
    if (infile == NULL)
    {
        perror("Error opening /proc/octeon_info");
        exit(-1);
    }

    while (!feof(infile))
    {
        char buffer[80];
        if (fgets(buffer, sizeof(buffer), infile))
        {
            const char *field = strtok(buffer, " ");
            const char *valueS = strtok(NULL, " ");
            if (field == NULL)
                continue;
            if (valueS == NULL)
                continue;
            unsigned long long value;
            sscanf(valueS, "%lli", &value);

            if (strcmp(field, "dram_size:") == 0)
                system_info->system_dram_size = value;
            else if (strcmp(field, "phy_mem_desc_addr:") == 0)
                system_info->phy_mem_desc_ptr = cvmx_phys_to_ptr(value);
            else if (strcmp(field, "eclock_hz:") == 0)
                system_info->cpu_clock_hz = value;
            else if (strcmp(field, "dclock_hz:") == 0)
                system_info->dram_data_rate_hz = value * 2;
            else if (strcmp(field, "spi_clock_hz:") == 0)
                system_info->spi_clock_hz = value;
            else if (strcmp(field, "board_type:") == 0)
                system_info->board_type = value;
            else if (strcmp(field, "board_rev_major:") == 0)
                system_info->board_rev_major = value;
            else if (strcmp(field, "board_rev_minor:") == 0)
                system_info->board_rev_minor = value;
            else if (strcmp(field, "chip_type:") == 0)
                system_info->chip_type = value;
            else if (strcmp(field, "chip_rev_major:") == 0)
                system_info->chip_rev_major = value;
            else if (strcmp(field, "chip_rev_minor:") == 0)
                system_info->chip_rev_minor = value;
            else if (strcmp(field, "board_serial_number:") == 0)
                strncpy(system_info->board_serial_number, valueS, sizeof(system_info->board_serial_number));
            else if (strcmp(field, "mac_addr_base:") == 0)
            {
                int i;
                int m[6];
                sscanf(valueS, "%02x:%02x:%02x:%02x:%02x:%02x", m+0, m+1, m+2, m+3, m+4, m+5);
                for (i=0; i<6; i++)
                    system_info->mac_addr_base[i] = m[i];
            }
            else if (strcmp(field, "mac_addr_count:") == 0)
                system_info->mac_addr_count = value;
            else if (strcmp(field, "32bit_shared_mem_base:") == 0)
                linux_mem32_min = value;
            else if (strcmp(field, "32bit_shared_mem_size:") == 0)
                linux_mem32_max = linux_mem32_min + value - 1;
            else if (strcmp(field, "processor_id:") == 0)
                cvmx_app_init_processor_id = value;
        }
    }


#if 0
    cvmx_dprintf("system_dram_size:       %llu\n", (unsigned long long)system_info->system_dram_size);
    cvmx_dprintf("phy_mem_desc_ptr:       %p\n", system_info->phy_mem_desc_ptr);
    cvmx_dprintf("init_core:              %u\n", system_info->init_core);
    cvmx_dprintf("cpu_clock_hz:           %u\n", system_info->cpu_clock_hz);
    cvmx_dprintf("dram_data_rate_hz:      %u\n", system_info->dram_data_rate_hz);
    cvmx_dprintf("spi_clock_hz:           %u\n", system_info->spi_clock_hz);
    cvmx_dprintf("board_type:             %u\n", system_info->board_type);
    cvmx_dprintf("board_rev_major:        %u\n", system_info->board_rev_major);
    cvmx_dprintf("board_rev_minor:        %u\n", system_info->board_rev_minor);
    cvmx_dprintf("chip_type:              %u\n", system_info->chip_type);
    cvmx_dprintf("chip_rev_major:         %u\n", system_info->chip_rev_major);
    cvmx_dprintf("chip_rev_minor:         %u\n", system_info->chip_rev_minor);
    cvmx_dprintf("mac_addr_base:          %02x:%02x:%02x:%02x:%02x:%02x\n",
               (int)system_info->mac_addr_base[0],
               (int)system_info->mac_addr_base[1],
               (int)system_info->mac_addr_base[2],
               (int)system_info->mac_addr_base[3],
               (int)system_info->mac_addr_base[4],
               (int)system_info->mac_addr_base[5]);
    cvmx_dprintf("mac_addr_count:         %u\n", system_info->mac_addr_count);
    cvmx_dprintf("board_serial_number:    %s\n", system_info->board_serial_number);
#endif
}
uint32_t 
octeon_se_fastpath_fragc_init(SeFastpathCoreContext core,
			      SeFastpath fastpath,
			      SeFastpathFragmentContext fragc,
			      SeFastpathPacketContext pc,
			      size_t mtu,
			      uint8_t df_on_first_fragment)
{
  uint8_t * header;
  size_t packet_len = pc->s->ip_len;

  /* Initialize common fields in the fragment context. */
  fragc->pc = pc;
  fragc->mtu = mtu;
  fragc->offset = 0;

  /* Get a pointer to the packet to be fragmented. */
  header = 
    (uint8_t *)cvmx_phys_to_ptr(pc->wqe->packet_ptr.s.addr) + pc->s->ip_offset;

  if (pc->s->ip_version_6)
    {
      uint16_t frag_hlen;
      uint16_t frag_data_len;

      fragc->total_len = packet_len - OCTEON_SE_FASTPATH_IP6_HDRLEN;

      /* Compute fragments' header and data lengths. */
      frag_hlen = 
	OCTEON_SE_FASTPATH_IP6_HDRLEN + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN;
      frag_data_len = ((size_t) (mtu - frag_hlen)) & (size_t) ~7;
      OCTEON_SE_ASSERT((frag_data_len > 0) && 
		       (frag_data_len <= (65535 - frag_hlen)));

      /* Store that information into the fragmentation context. */
      fragc->frag_hlen = frag_hlen;
      fragc->frag_data_len = frag_data_len;

      OCTEON_SE_FASTPATH_GET_NEXT_IPV6_FRAG_ID(core, fragc->u.ipv6.id);
      memcpy(fragc->u.ipv6.frag_hdr, header, OCTEON_SE_FASTPATH_IP6_HDRLEN);
      octeon_se_fastpath_packet_buffer_create(fragc->original_pkt,
                                              pc->wqe->packet_ptr,
                                              pc->s->ip_offset + 
					      OCTEON_SE_FASTPATH_IP6_HDRLEN,
                                              fragc->total_len,
                                              pc->wqe->word2.s.bufs);
    }
  else
    {
      /* Check if the packet has DF bit set. */
      if (cvmx_unlikely(pc->s->ipv4_df))
        {
	  OCTEON_SE_DEBUG(7, "Cannot fragment packet. DF bit is set\n");
	  return 1;
        }

      fragc->total_len = packet_len - OCTEON_SE_FASTPATH_IP4_HDRLEN;
      fragc->frag_hlen = OCTEON_SE_FASTPATH_IP4_HDRLEN;

      /* Compute amount of data to go in fragments. */
      fragc->frag_data_len = ((size_t)(mtu - OCTEON_SE_FASTPATH_IP4_HDRLEN)) &
                                      (size_t) ~7;
      
      OCTEON_SE_ASSERT(fragc->frag_data_len > 0 && 
		       fragc->frag_data_len < 65535);

      fragc->u.ipv4.df_on_first_fragment = df_on_first_fragment;
      
      /* Store computed values into the fragmentation context. */
      memcpy(fragc->u.ipv4.frag_hdr, header, OCTEON_SE_FASTPATH_IP4_HDRLEN);
      octeon_se_fastpath_packet_buffer_create(fragc->original_pkt,
                                              pc->wqe->packet_ptr,
                                              pc->s->ip_offset + 
					      OCTEON_SE_FASTPATH_IP4_HDRLEN,
                                              fragc->total_len,
                                              pc->wqe->word2.s.bufs);
    }
  
  return 0;
}
SeFastpathPacketContext 
octeon_se_fastpath_fragc_next(SeFastpathCoreContext core,
                              SeFastpath fastpath,
                              SeFastpathFragmentContext fragc)
{
  SeFastpathPacketContext frag;
  SeFastpathPacketBufferStruct fragment_buffer[1];
  uint8_t * header;
  cvmx_buf_ptr_t packet_out;
  uint16_t hlen, data_len, len, offset_orig;
  uint16_t fragoff_orig, fragoff, checksum;
  uint8_t is_last_frag;

  /* If an error caused pc to be freed, return NULL to indicate we are done. */
  if (fragc->pc == NULL || fragc->offset >= fragc->total_len)
    return NULL;
  
  hlen = fragc->frag_hlen;
  data_len = fragc->frag_data_len;

  /* Determine the length of the data section of the fragment. */
  if (fragc->offset + data_len < fragc->total_len)
    len = data_len;
  else
    len = fragc->total_len - fragc->offset;
  
  if (fragc->offset + len == fragc->total_len)
    is_last_frag = TRUE;
  else
    is_last_frag = FALSE;

  OCTEON_SE_DEBUG(7, "Sending fragment offset=%d, len=%d\n",
		  fragc->offset, len);

  /* Allocate packet context and state for the fragment. */
  frag = &core->fragment.s;
  memset(frag, 0, sizeof(SeFastpathPacketContextStruct));
  frag->s = &core->fragment_state.s;
  memset(frag->s, 0, sizeof(SeFastpathPacketStateStruct));

  /* Create a new Work Queue entry and then copy extra things in pc. */
  if (cvmx_unlikely(octeon_se_fastpath_fragc_helper_alloc(fragc,
                                                          fragc->pc,
					                  frag,
                                                          len)))
    {
      OCTEON_SE_DEBUG(3, "Unable to create fragment\n");
      return NULL;
    }

  /* For local reference. */ 
  packet_out.u64 = frag->wqe->packet_ptr.u64;

  header = 
    ((uint8_t *)cvmx_phys_to_ptr(packet_out.s.addr)) + frag->s->ip_offset;

  if (frag->s->ip_version_6)
    {
      uint8_t nh;
      OCTEON_SE_DEBUG(9, "Building IPv6 fragment\n");

      /* Assert that headers fit into the first segment. */
      OCTEON_SE_ASSERT(packet_out.s.size > 
		       (frag->s->ip_offset +
			OCTEON_SE_FASTPATH_IP6_HDRLEN + 
			OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN));

      memcpy(header, fragc->u.ipv6.frag_hdr, OCTEON_SE_FASTPATH_IP6_HDRLEN);
      
      OCTEON_SE_FASTPATH_IPH6_SET_LEN(header, len + 
				      OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN);
      OCTEON_SE_FASTPATH_IPH6_NH(header, nh);
      OCTEON_SE_FASTPATH_IPH6_SET_NH(header, 
                                     OCTEON_SE_FASTPATH_IPPROTO_IPV6FRAG);

      /* Create the fragment header and copy it to its place. */
      header += OCTEON_SE_FASTPATH_IP6_HDRLEN;

      header[0] = nh;

      header[SSH_IP6_EXT_FRAGMENT_OFS_RESERVED1] = 0;
      OCTEON_SE_PUT_16BIT(header + 
			  OCTEON_SE_FASTPATH_IP6_EXT_FRAGMENT_OFS_OFFSET,
			  (fragc->offset | (is_last_frag ? 0 : 1)));
      OCTEON_SE_PUT_32BIT(header + 
			  OCTEON_SE_FASTPATH_IP6_EXT_FRAGMENT_OFS_ID,
			  fragc->u.ipv6.id);

      /* Finally, copy the payload. */
      octeon_se_fastpath_packet_buffer_create(fragment_buffer,
                                              packet_out,
                                              frag->s->ip_offset +
					      OCTEON_SE_FASTPATH_IP6_HDRLEN + 
				       OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN,
                                              len,
                                              frag->wqe->word2.s.bufs);
      octeon_se_fastpath_buffer_copy(fragment_buffer,
                                     fragc->original_pkt,
				     len);
    }
  else
    {
      /* Copy packet header to the fragment buffer. */
      OCTEON_SE_DEBUG(9, "Build IPv4 fragment\n");

      /* Asseet that header fits into the first segment. */
      OCTEON_SE_ASSERT(packet_out.s.size > (frag->s->ip_offset +
					    OCTEON_SE_FASTPATH_IP4_HDRLEN));

      /* Copy in the IPv4 header first */
      memcpy(header, fragc->u.ipv4.frag_hdr,OCTEON_SE_FASTPATH_IP4_HDRLEN);

      /* Copy data from the original packet to the fragment data part. */
      octeon_se_fastpath_packet_buffer_create(fragment_buffer,
                                              packet_out,
					      frag->s->ip_offset +
					      OCTEON_SE_FASTPATH_IP4_HDRLEN, 
					      len, 
					      frag->wqe->word2.s.bufs); 
      octeon_se_fastpath_buffer_copy(fragment_buffer,
                                     fragc->original_pkt,
				     len);

      /* Compute new values for fragment offset and flag bits. */
      OCTEON_SE_FASTPATH_IPH4_FRAG(header, fragoff_orig);

      offset_orig = (fragoff_orig & OCTEON_SE_FASTPATH_IP4_FRAG_MASK) << 3;
      fragoff = fragoff_orig & OCTEON_SE_FASTPATH_IPH4_FRAGOFF_RF;
      if (fragc->offset + data_len < fragc->total_len ||
          (fragoff_orig & OCTEON_SE_FASTPATH_IPH4_FRAGOFF_MF))
        fragoff |= OCTEON_SE_FASTPATH_IPH4_FRAGOFF_MF;

      /* If df_on_first_fragment is set and this is the first fragment,
         set DF bit */
      if (fragc->offset == 0 && fragc->u.ipv4.df_on_first_fragment)
        fragoff |= OCTEON_SE_FASTPATH_IPH4_FRAGOFF_DF;

      OCTEON_SE_ASSERT((fragc->offset & 7) == 0); 
      OCTEON_SE_FASTPATH_IPH4_SET_FRAG(header,
                           (fragoff | ((fragc->offset + offset_orig) >> 3)));
      OCTEON_SE_FASTPATH_IPH4_SET_LEN(header, hlen + len);
      OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, 0);
      
      checksum = octeon_se_fastpath_ip_cksum(header, hlen);
      OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, checksum);
    }

  /* Update next fragment offset. */
  fragc->offset += len;

  /* Return the fragment. */
  return frag;
}
Esempio n. 24
0
http_data * http_parse(cvm_common_wqe_t * swp, State status)
{
		http_data * http = (http_data *) cvmx_phys_to_ptr(cvm_common_alloc_fpa_buffer_sync(CVMX_FPA_PACKET_POOL));
		if(http == NULL)
				return http;
		memset(http, 0, sizeof(http_data));
		char * ptr = (char *)cvmx_phys_to_ptr(swp->hw_wqe.packet_ptr.s.addr);
		int res = -1;
		int pos = 54;

		//Client->Server
		if(swp->hw_wqe.ipprt < portbase + portnum)
		{
				if(StrFind(ptr+pos, 3, "GET") != -1 || StrFind(ptr+pos, 3, "PUT") != -1 || StrFind(ptr+pos, 4, "HTTP") != -1 || StrFind(ptr+pos, 4, "HEAD") != -1 || StrFind(ptr+pos, 6, "DELETE") != -1)
				{
					http->is_http = true;
				}
				return http;
				if(status == S0)
				{
						//16 is the length of "GET / HTTP/1.1\r\n"
						res = StrFind(ptr+pos, 16, "GET / HTTP/1.1\r\n");
						if(res == -1)
								return http;
						//printf("GET / HTTP/1.1	%d\n", res);
						pos += 16;

						//cut a line to find Auth
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "\r\n");
						//printf("/r/n  %d\n", res);
						if(res == -1)
								return http;
						pos += res+2;
						//Find Auth
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "\r\n");
						//printf("/r/n  %d\n", res);
						if(res == -1)
								return http;
						int i = 0;
						for(i=res-1;i>=0;i--)
						{
								if(ptr[pos+i] == ':')
								{
										break;
								}					
						}
						//19 is the length of "Authorization: AWS "
						//printf("%X,		%X,		%d\n", http->username, ptr, i);
						//printf("%d\n", swp->hw_wqe.len);
						res = StrFind(ptr+pos, 19, "Authorization: AWS ");
						if(i < 0 || res == -1)
						{
								//memset(http, 0, sizeof(http_data));
								return http;
						}
						else
						{
								memcpy(http->username, ptr+pos+19, i-19);									
								http->login = true;
								return http;
						}
				}
				else if(status == S2)
				{
						res = StrFind(ptr+pos, 3, "PUT");
						if(res == -1)
								return http;
						pos += 3;
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "HTTP/1.1\r\n");
						if(res == -1)
								return http;			
						pos += res + 10;
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "\r\n\r\n");
						http->put_content = true;
						if(res == -1)
								return http;
						pos += 4 + res;			
						if(swp->hw_wqe.len - pos > 0)
						{
								http->there_is_data = true;
								printf("swp->hw_wqe.len: %d  pos: %d\n",swp->hw_wqe.len ,pos);
								http->pos = pos;
						}
						return http;
				}
				/*
				else if(status != S3 && status != S4)
				{
						printf("Waring:Client->Server, State is %d\n", status);
				} 
				*/
		}
		//Server->Client
		else
		{
				res = StrFind(ptr+pos, 8, "HTTP/1.1");
				if(res != -1)
					http->is_http = true;
				else 
					return http;
				if(status == S1)
				{			
						//17 is the length of "HTTP/1.1 200 OK\r\n"		
						res = StrFind(ptr+pos, 17, "HTTP/1.1 200 OK\r\n");							
						if(res == -1)
								return http;
						http->login_done = true;
						return http;							
				}
				else if(status == S2)
				{
						//17 is the length of "HTTP/1.1 200 OK\r\n"
						res = StrFind(ptr+pos, 17, "HTTP/1.1 200 OK\r\n");
						if(res == -1)
								return http;
						pos += 17;

						//cut a line to find Auth
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "\r\n");
						if(res == -1)
								return http;
						pos += res+2;
						//Find Content-Length
						res = StrFind(ptr+pos, 16, "Content-Length: ");
						if(res == -1)
								return http;
						pos += 16;
						if(ptr[pos] == '0')
								return http;
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "\r\n");
						if(res == -1)
								return http;
						pos += res+2;
						res = StrFind(ptr+pos, 4, "Etag");
						if(res == -1)
								return http;			
						res = StrFind(ptr+pos, swp->hw_wqe.len-pos, "\r\n\r\n");
						http->get_content = true;
						if(res == -1)
								return http;
						pos += 4 + res;			
						if(swp->hw_wqe.len - pos > 0)
						{
								http->there_is_data = true;
								http->pos = pos;
						}
						return http;			
				}
				else if(status == S3)
				{
						//17 is the length of "HTTP/1.1 200 OK\r\n"
						res = StrFind(ptr+pos, 17, "HTTP/1.1 200 OK\r\n");
						if(res == -1)
								return http;
						http->put_done = true;
						return http;	
				}
				else if(status == S4)
				{
						//8 is the length of "HTTP/1.1"
						res = StrFind(ptr+pos, 8, "HTTP/1.1");
						if(res == -1)
								return http;
						http->get_done = true;
						return http;	
				}
				/*
				else if(status != S0)
				{
						printf("Waring:Server->Clinet, State is %d\n", status);	
				}
				*/
		}
		return http;
}
int cvmx_bootmem_init(void *mem_desc_ptr)
{
    /* Here we set the global pointer to the bootmem descriptor block.  This pointer will
    ** be used directly, so we will set it up to be directly usable by the application.
    ** It is set up as follows for the various runtime/ABI combinations:
    ** Linux 64 bit: Set XKPHYS bit
    ** Linux 32 bit: use mmap to create mapping, use virtual address
    ** CVMX 64 bit:  use physical address directly
    ** CVMX 32 bit:  use physical address directly
    ** Note that the CVMX environment assumes the use of 1-1 TLB mappings so that the physical addresses
    ** can be used directly
    */
    if (!cvmx_bootmem_desc)
    {
#if defined(__linux__) && defined(CVMX_ABI_N32)
        /* For 32 bit, we need to use mmap to create a mapping for the bootmem descriptor */
        int dm_fd = open("/dev/mem", O_RDWR);
        if (dm_fd < 0)
        {
            cvmx_dprintf("ERROR opening /dev/mem for boot descriptor mapping\n");
            return(-1);
        }

        void *base_ptr = mmap(NULL, 
                              sizeof(cvmx_bootmem_desc_t) + sysconf(_SC_PAGESIZE), 
                              PROT_READ | PROT_WRITE,
                              MAP_SHARED, 
                              dm_fd, 
                              ((off_t)mem_desc_ptr) & ~(sysconf(_SC_PAGESIZE) - 1));

        if (MAP_FAILED == base_ptr)
        {
            cvmx_dprintf("Error mapping bootmem descriptor!\n");
            close(dm_fd);
            return(-1);
        }

        /* Adjust pointer to point to bootmem_descriptor, rather than start of page it is in */
        cvmx_bootmem_desc =  base_ptr + (((off_t)mem_desc_ptr) & (sysconf(_SC_PAGESIZE) - 1));

        /* Also setup mapping for named memory block desc. while we are at it.  Here we must keep another
        ** pointer around, as the value in the bootmem descriptor is shared with other applications. */
        base_ptr = mmap(NULL, 
                              sizeof(cvmx_bootmem_named_block_desc_t) * cvmx_bootmem_desc->named_block_num_blocks + sysconf(_SC_PAGESIZE), 
                              PROT_READ | PROT_WRITE,
                              MAP_SHARED, 
                              dm_fd, 
                              ((off_t)cvmx_bootmem_desc->named_block_array_addr) & ~(sysconf(_SC_PAGESIZE) - 1));


        close(dm_fd);

        if (MAP_FAILED == base_ptr)
        {
            cvmx_dprintf("Error mapping named block descriptor!\n");
            return(-1);
        }

        /* Adjust pointer to point to named block array, rather than start of page it is in */
        linux32_named_block_array_ptr =  base_ptr + (((off_t)cvmx_bootmem_desc->named_block_array_addr) & (sysconf(_SC_PAGESIZE) - 1));


#elif defined(__linux__) && defined(CVMX_ABI_64)
        /* Set XKPHYS bit */
        cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));

#else
        /* Not linux, just copy pointer */
        cvmx_bootmem_desc = mem_desc_ptr;
#endif
    }


    return(0);
}
Esempio n. 26
0
int do_bootloader_update_nor(uint32_t image_addr, int length,
			     uint32_t burn_addr, int failsafe)
{
#if defined(CONFIG_SYS_NO_FLASH)
	printf("ERROR: Bootloader not compiled with NOR flash support\n");
	return 1;
#else
	uint32_t failsafe_size, failsafe_top_remapped;
	uint32_t burn_addr_remapped, image_size, normal_top_remapped;
	flash_info_t *info;
	char tmp[16] __attribute__ ((unused));	/* to hold 32 bit numbers in hex */
	int sector = 0;
	bootloader_header_t *header;
	int rc;

	header = cvmx_phys_to_ptr(image_addr);

	DBGUPD("%s(0x%x, 0x%x, 0x%x, %s)\n", __func__, image_addr, length,
	       burn_addr, failsafe ? "failsafe" : "normal");
	DBGUPD("LOOKUP_STEP                0x%x\n", LOOKUP_STEP);
	DBGUPD("CFG_FLASH_BASE             0x%x\n", CONFIG_SYS_FLASH_BASE);

	/* File with rev 1.1 headers are not relocatable, so _must_ be burned
	 * at the address that they are linked at.
	 */
	if (header->maj_rev == 1 && header->min_rev == 1) {
		if (burn_addr && burn_addr != header->address) {
			printf("ERROR: specified address (0x%x) does not match "
			       "required burn address (0x%llx\n)\n",
			       burn_addr, header->address);
			return 1;
		}
		burn_addr = header->address;
	}

	/* If we have at least one bank of non-zero size, we have some NOR */
	if (!flash_info[0].size) {
		puts("ERROR: No NOR Flash detected on board, can't burn NOR "
		     "bootloader image\n");
		return 1;
	}

	/* check the burn address allignement */
	if ((burn_addr & (LOOKUP_STEP - 1)) != 0) {
		printf("Cannot programm normal image at 0x%x: address must be\n"
		       " 0x%x bytes alligned for normal boot lookup\n",
		       burn_addr, LOOKUP_STEP);
		return 1;
	}

	/* for failsage checks are easy */
	if ((failsafe) && (burn_addr != FAILSAFE_BASE)) {
		printf("ERROR: Failsafe image must be burned to address 0x%x\n",
		       FAILSAFE_BASE);
		return 1;
	}

	if (burn_addr && (burn_addr < FAILSAFE_BASE)) {
		printf("ERROR: burn address 0x%x out of boot range\n",
		       burn_addr);
		return 1;
	}

	if (!failsafe) {
#ifndef CONFIG_OCTEON_NO_FAILSAFE
		/* find out where failsafe ends */
		failsafe_size = get_image_size((bootloader_header_t *)
					       CONFIG_SYS_FLASH_BASE);
		if (failsafe_size == 0) {
			/* failsafe does not have header - assume fixed size
			 * old image
			 */
			puts("Failsafe has no valid header, assuming old image. "
			     "Using default failsafe size\n");
			failsafe_size =
			    CONFIG_SYS_NORMAL_BOOTLOADER_BASE - FAILSAFE_BASE;

			/* must default to CONFIG_SYS_NORMAL_BOOTLOADER_BASE */
			if (!burn_addr)
				burn_addr = CONFIG_SYS_NORMAL_BOOTLOADER_BASE;
			else if (CONFIG_SYS_NORMAL_BOOTLOADER_BASE != burn_addr) {
				printf("WARNING: old failsafe image will not be able to start\n"
				       "image at any address but 0x%x\n",
				       CONFIG_SYS_NORMAL_BOOTLOADER_BASE);
#ifdef ERR_ON_OLD_BASE
				return 1;
#endif
			}
		}		/* old failsafe */
#else
		failsafe_size = 0;
#endif		/* CONFIG_OCTEON_NO_FAILSAFE */

		DBGUPD("failsafe size is 0x%x\n", failsafe_size);
		DBGUPD("%s: burn address: 0x%x\n", __func__, burn_addr);
		/* Locate the next flash sector */
		failsafe_top_remapped = CONFIG_SYS_FLASH_BASE + failsafe_size;
		DBGUPD("failsafe_top_remapped 0x%x\n", failsafe_top_remapped);
		info = &flash_info[0];	/* no need to look into any other banks */
		/* scan flash bank sectors */
		for (sector = 0; sector < info->sector_count; ++sector) {
			DBGUPD("%d: 0x%lx\n", sector, info->start[sector]);
			if (failsafe_top_remapped <= info->start[sector])
				break;
		}

		if (sector == info->sector_count) {
			puts("Failsafe takes all the flash??  Can not burn normal image\n");
			return 1;
		}

		/* Move failsafe top up to the sector boundary */
		failsafe_top_remapped = info->start[sector];

		DBGUPD("Found next sector after failsafe is at remapped addr 0x%x\n",
		       failsafe_top_remapped);
		failsafe_size = failsafe_top_remapped - CONFIG_SYS_FLASH_BASE;
		DBGUPD("Alligned up failsafe size is 0x%x\n", failsafe_size);

		/* default to the first sector after the failsafe */
		if (!burn_addr) {
			burn_addr = FAILSAFE_BASE + failsafe_size;
			DBGUPD("Setting burn address to 0x%x, failsafe size: 0x%x\n",
			       burn_addr, failsafe_size);
		/* check for overlap */
		} else if (FAILSAFE_BASE + failsafe_size > burn_addr) {
			puts("ERROR: can not burn: image overlaps with failsafe\n");
			printf("burn address is 0x%x, in-flash failsafe top is 0x%x\n",
			       burn_addr, FAILSAFE_BASE + failsafe_size);
			return 1;
		}
		/* done with failsafe checks */
	}

	if (length)
		image_size = length;
	else
		image_size = get_image_size((bootloader_header_t *)image_addr);
	if (!image_size) {
		/* this is wierd case. Should never happen with good image */
		printf("ERROR: image has size field set to 0??\n");
		return 1;
	}

	/* finally check the burn address' CKSSEG limit */
	if ((burn_addr + image_size) >= (uint64_t) CKSSEG) {
		puts("ERROR: can not burn: image exceeds KSEG1 area\n");
		printf("burnadr is 0x%x, top is 0x%x\n", burn_addr,
		       burn_addr + image_size);
		return 1;
	}
	DBGUPD("burn_addr: 0x%x, image_size: 0x%x\n", burn_addr, image_size);
	/* Look up the last sector to use by the new image */
	burn_addr_remapped = burn_addr - FAILSAFE_BASE + CONFIG_SYS_FLASH_BASE;
	DBGUPD("burn_addr_remapped 0x%x\n", burn_addr_remapped);
	normal_top_remapped = burn_addr_remapped + image_size;
	/* continue flash scan - now for normal image top */
	if (failsafe)
		sector = 0;	/* is failsafe, we start from first sector here */
	for (; sector < info->sector_count; ++sector) {
		DBGUPD("%d: 0x%lx\n", sector, info->start[sector]);
		if (normal_top_remapped <= info->start[sector])
			break;
	}
	if (sector == info->sector_count) {
		puts("ERROR: not enough room in flash bank for the image??\n");
		return 1;
	}
	/* align up for environment variable set up */
	normal_top_remapped = info->start[sector];

	DBGUPD("normal_top_remapped 0x%x\n", normal_top_remapped);
	/* if there is no header (length != 0) - check burn address and
	 * give warning
	 */
	if (length && CONFIG_SYS_NORMAL_BOOTLOADER_BASE != burn_addr) {
#ifdef ERR_ON_OLD_BASE
		puts("ERROR: burning headerless image at other that defailt address\n"
		     "Image look up will not work.\n");
		printf("Default burn address: 0x%x requested burn address: 0x%x\n",
		       CONFIG_SYS_NORMAL_BOOTLOADER_BASE, burn_addr);
		return 1;
#else
		puts("WARNING: burning headerless image at other that defailt address\n"
		     "Image look up will not work.\n");
		printf("Default burn address: 0x%x requested burn address: 0x%x\n",
		       CONFIG_SYS_NORMAL_BOOTLOADER_BASE, burn_addr);
#endif
	}

	printf("Image at 0x%x is ready for burning\n", image_addr);
	printf("           Header version: %d.%d\n", header->maj_rev,
	       header->min_rev);
	printf("           Header size %d, data size %d\n", header->hlen,
	       header->dlen);
	printf("           Header crc 0x%x, data crc 0x%x\n", header->hcrc,
	       header->dcrc);
	printf("           Image link address is 0x%llx\n", header->address);
	printf("           Image burn address on flash is 0x%x\n", burn_addr);
	printf("           Image size on flash 0x%x\n",
	       normal_top_remapped - burn_addr_remapped);

	DBGUPD("burn_addr_remapped 0x%x normal_top_remapped 0x%x\n",
	       burn_addr_remapped, normal_top_remapped);
	if (flash_sect_protect(0, burn_addr_remapped, normal_top_remapped - 1)) {
		puts("Flash unprotect failed\n");
		return 1;
	}
	if (flash_sect_erase(burn_addr_remapped, normal_top_remapped - 1)) {
		puts("Flash erase failed\n");
		return 1;
	}

	puts("Copy to Flash... ");
	/* Note: Here we copy more than we should - whatever is after the image
	 * in memory gets copied to flash.
	 */
	rc = flash_write((char *)image_addr, burn_addr_remapped,
			 normal_top_remapped - burn_addr_remapped);
	if (rc != 0) {
		flash_perror(rc);
		return 1;
	}
	puts("done\n");

#ifndef CONFIG_ENV_IS_IN_NAND
	/* Erase the environment so that older bootloader will use its default
	 * environment.  This will ensure that the default
	 * 'bootloader_flash_update' macro is there.  HOWEVER, this is only
	 * useful if a legacy sized failsafe u-boot image is present.
	 * If a new larger failsafe is present, then that macro will be incorrect
	 * and will erase part of the failsafe.
	 * The 1.9.0 u-boot needs to have its link address and
	 * normal_bootloader_size/base modified to work with this...
	 */
	if (header->maj_rev == 1 && header->min_rev == 1) {
		puts("Erasing environment due to u-boot downgrade.\n");
		flash_sect_protect(0, CONFIG_ENV_ADDR,
				   CONFIG_ENV_ADDR + CONFIG_ENV_SIZE - 1);
		if (flash_sect_erase
		    (CONFIG_ENV_ADDR, CONFIG_ENV_ADDR + CONFIG_ENV_SIZE - 1)) {
			puts("Environment erase failed\n");
			return 1;
		}

	}
#endif
	return 0;
#endif
}
/** Execute outbound transforms */
SeFastpathRet
octeon_se_fastpath_transform_out(SeFastpathCoreContext core,
				 SeFastpath fastpath,
				 SeFastpathPacketContext pc)
{
  cvmx_buf_ptr_t packet_out;
  uint64_t packet_out_num_segs;
  size_t packet_out_len;
  SeFastpathTransformData se_trd;
  SeFastpathCombinedTransform combined;
  SeFastpathPacketBufferStruct src, dst;
  SeFastpathEspExtraInfoStruct extra_info[1];
  SeFastpathMacExtraInfoStruct mac_info[1];
  SeFastpathRet ret;
  uint8_t *header;
  uint32_t trd_i, tos, flow_label;
  uint64_t ipsec_seq;
  uint16_t csum, prefix_ofs;
  uint16_t esp_ah_ofs, prefix_len = 0, trailer_len = 0, pad_len = 0;
  uint8_t esp_ah_nh;
  uint64_t icv[OCTEON_SE_FASTPATH_MAX_HASH_WORDS] = { 0 };
  size_t i;
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AH
  size_t icv_pad_len = 0;
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AH */
  uint32_t run_time;
  size_t alignment = 0;
#ifdef OCTEON_SE_FASTPATH_STATISTICS
  size_t out_octets;
#endif /* OCTEON_SE_FASTPATH_STATISTICS */
  
  OCTEON_SE_DEBUG(9, "Execute transform out\n");

  packet_out.u64 = 0;

  OCTEON_SE_ASSERT(pc->transform_index != OCTEON_SE_FASTPATH_INVALID_INDEX);
  trd_i = pc->transform_index & 0x00ffffff;
  OCTEON_SE_ASSERT(trd_i < OCTEON_SE_FASTPATH_TRD_TABLE_SIZE);

  se_trd = OCTEON_SE_FASTPATH_TRD(fastpath, trd_i);
  OCTEON_SE_FASTPATH_TRD_READ_LOCK(fastpath, trd_i, se_trd);

  OCTEON_SE_FASTPATH_PREFETCH_TRD(se_trd);
  
  /* If transform is complex, pass packet to slowpath. */
  if (cvmx_unlikely(se_trd->is_special))
    {
      OCTEON_SE_DEBUG(9, "Special transform %08x, passing to slowpath\n",
		      se_trd->transform);
      goto slowpath;
    }

  combined = octeon_se_fastpath_get_combined_transform(se_trd->transform,
                                                   se_trd->mac_key_size);
  if (cvmx_unlikely(combined == NULL))
    {
      OCTEON_SE_DEBUG(9, "Unsupported transform %08x, passing to slowpath\n",
		      se_trd->transform);
      goto slowpath;
    }
  
  /* Update trd output timestamp. */
  run_time = cvmx_fau_fetch_and_add32(OCTEON_SE_FASTPATH_FAU_RUNTIME, 0);
  cvmx_atomic_set32((int32_t *) &se_trd->last_out_packet_time,
		    (int32_t) run_time);

  (*combined->init)(core->transform_context,
                    se_trd->keymat + OCTEON_MAX_KEYMAT_LEN /2,
		    se_trd->cipher_key_size,
		    se_trd->keymat + OCTEON_MAX_KEYMAT_LEN /2 
		    + OCTEON_MAX_ESP_KEY_BITS /8,
		    se_trd->mac_key_size);
  
  prefix_ofs = pc->s->ip_offset;

  /* Check ttl. */
  if (cvmx_unlikely(pc->s->ttl == 0))
    {
      OCTEON_SE_DEBUG(3, "Zero TTL, dropping\n");
      goto corrupt;
    }

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  if (cvmx_unlikely(!se_trd->tunnel_mode))
    {
      /* In transport mode insert the ESP/AH header between IP 
	 and transport headers. */
      prefix_ofs += pc->s->tr_offset;
      esp_ah_nh = pc->s->ipproto;
      prefix_len = 0;
    }
  else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */
    {      
      /* In tunnel mode insert IP and ESP/AH headers before IP header. */
      if (se_trd->ip_version_6)
	prefix_len = OCTEON_SE_FASTPATH_IP6_HDRLEN;
      else
	prefix_len = OCTEON_SE_FASTPATH_IP4_HDRLEN;
      
      if (pc->s->ip_version_6)
	esp_ah_nh = OCTEON_SE_FASTPATH_IPPROTO_IPV6;
      else
	esp_ah_nh = OCTEON_SE_FASTPATH_IPPROTO_IPIP;
    }
  
  /* Calculate IPsec overhead. */
  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_NATT
  /* Reserve space for UDP NAT-T. */
  if (se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_NATT)
    prefix_len += OCTEON_SE_FASTPATH_UDP_HDRLEN;
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_NATT */

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AH
  if (cvmx_unlikely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_AH))
    {
      prefix_len += OCTEON_SE_FASTPATH_AH_HDRLEN + combined->icv_len;

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_SHA2




      if (cvmx_unlikely((se_trd->ip_version_6 == 1) && 
			(se_trd->transform & OCTEON_SE_FASTPATH_MAC_HMAC_SHA2))
	  )
        {
          icv_pad_len = 4;
          prefix_len += 4; /* Align AH header to 64 bit boundary */
        }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_SHA2 */

      trailer_len = 0;
      pad_len = 0;
    }
  else if (se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_ESP)
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AH */
    {
      prefix_len += (OCTEON_SE_FASTPATH_ESP_HDRLEN + combined->cipher_iv_len);
      trailer_len = 2 + combined->icv_len;
      
      pad_len = (pc->s->ip_len + pc->s->ip_offset - prefix_ofs
		 + 2) % combined->pad_boundary;
      if (pad_len != 0)
	pad_len = combined->pad_boundary - pad_len;
    }
    
  /* The actual length of the packet */
  packet_out_len = pc->s->ip_len + prefix_len + pad_len + trailer_len;
  OCTEON_SE_DEBUG(9, "Resultant packet len is %d\n", (int) packet_out_len);

  /* Check result packet length. */
  if (cvmx_unlikely(se_trd->pmtu_received && pc->mtu > se_trd->pmtu_received))
    pc->mtu = se_trd->pmtu_received;
  
  ret = octeon_se_fastpath_transform_check_pmtu(pc, packet_out_len);
  if (cvmx_unlikely(ret == OCTEON_SE_FASTPATH_RET_DROP))
    goto drop;
  else if (cvmx_unlikely(ret == OCTEON_SE_FASTPATH_RET_SLOWPATH))
    goto slowpath;

  /* In tunnel mode decrement ttl of inner header. */
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  if (cvmx_likely(se_trd->tunnel_mode))
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */
    {      
      header = cvmx_phys_to_ptr(pc->wqe->packet_ptr.s.addr) + pc->s->ip_offset;

      if (pc->s->ip_version_6)
	{
	  /* Assert that header is in the first packet segment */
	  OCTEON_SE_ASSERT(pc->wqe->packet_ptr.s.size 
			   >= OCTEON_SE_FASTPATH_IP6_HDRLEN);	  
	  OCTEON_SE_FASTPATH_IPH6_SET_HL(header, pc->s->ttl - 1);
	}
      else
	{
	  /* Assert that header is in the first packet segment */
	  OCTEON_SE_ASSERT(pc->wqe->packet_ptr.s.size
			   >= OCTEON_SE_FASTPATH_IP4_HDRLEN);	  
	  OCTEON_SE_FASTPATH_IPH4_SET_TTL(header, pc->s->ttl - 1);
	  OCTEON_SE_FASTPATH_IPH4_CHECKSUM(header, csum);
	  csum = octeon_se_fastpath_csum_update_byte(csum, SSH_IPH4_OFS_TTL, 
						     pc->s->ttl,
						     pc->s->ttl - 1);
	  OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, csum);
	}
    }

  /* Save df bit processing state */
  pc->s->df_bit_processing = se_trd->df_bit_processing;

  /* Allocate packet buffer chain for result packet.
     Request that crypto result offset is 8 byte aligned. */
  alignment =
    OCTEON_SE_ALIGN_64(prefix_ofs + prefix_len) - (prefix_ofs + prefix_len);
  
  packet_out.u64 = 
    octeon_se_fastpath_alloc_packet_chain(packet_out_len + pc->s->ip_offset,
					  alignment,
					  &packet_out_num_segs);
  
  if (cvmx_unlikely(packet_out.u64 == 0))
    {
      OCTEON_SE_DEBUG(3, "Result packet allocation failed\n");
      goto drop;
    }

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  /* In case of transport mode copy the l3 header.*/
  if (cvmx_unlikely(prefix_ofs > pc->s->ip_offset))
    {
      OCTEON_SE_DEBUG(9, "Copying headers to %p\n",
		      cvmx_phys_to_ptr(packet_out.s.addr) + pc->s->ip_offset);
    
      /* Assert that l3 headers are in the first packet segment. */
      OCTEON_SE_ASSERT(packet_out.s.size > prefix_ofs);
      memcpy(cvmx_phys_to_ptr(packet_out.s.addr) + pc->s->ip_offset, 
	     cvmx_phys_to_ptr(pc->wqe->packet_ptr.s.addr) + pc->s->ip_offset, 
	     prefix_ofs - pc->s->ip_offset);
    }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */

  /* Prepare Source buffer */
  octeon_se_fastpath_packet_buffer_create(&src, pc->wqe->packet_ptr, 
					  prefix_ofs,
					  pc->s->ip_len + pc->s->ip_offset 
					  - prefix_ofs,
					  pc->wqe->word2.s.bufs);

  /* Count the number of bytes input to crypto processing. */
  OCTEON_SE_FASTPATH_STATS(out_octets =
			   pc->s->ip_len + pc->s->ip_offset - prefix_ofs);
  
  /* Build headers */

  header = ((uint8_t *) cvmx_phys_to_ptr(packet_out.s.addr)) + prefix_ofs;

  /* Build outer header for tunnel mode and modify IP header for 
     transport mode.*/

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE
  if (cvmx_unlikely(!se_trd->tunnel_mode && pc->s->ip_version_6 == 0))
    {
      /* IPv4 transport mode. */
      OCTEON_SE_DEBUG(9, "Modifying IPv4 header at %p\n", header);
      
      /* Modify original IPv4 header and change IP protocol and len. */
      OCTEON_SE_FASTPATH_IPH4_SET_LEN(header, packet_out_len);
      OCTEON_SE_FASTPATH_IPH4_SET_PROTO(header, se_trd->nh); 
      OCTEON_SE_FASTPATH_IPH4_CHECKSUM(header, csum);

      csum = 
	octeon_se_fastpath_csum_update_byte(csum,
					    OCTEON_SE_FASTPATH_IPH4_OFS_PROTO,
					    pc->s->ipproto, se_trd->nh);
      csum = 
	octeon_se_fastpath_csum_update_short(csum, 
					     OCTEON_SE_FASTPATH_IPH4_OFS_LEN,
					     pc->s->ip_len, packet_out_len);

      OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, csum);
    }
  else if (cvmx_unlikely(!se_trd->tunnel_mode && pc->s->ip_version_6 == 1))
    {
      /* IPv6 transport mode. */
      OCTEON_SE_DEBUG(9, "Modifying IPv6 header at %p\n", header);
      OCTEON_SE_FASTPATH_IPH6_SET_LEN(header, packet_out_len - 
				      OCTEON_SE_FASTPATH_IP6_HDRLEN);
      OCTEON_SE_FASTPATH_IPH6_SET_NH(header, se_trd->nh);
    }
  else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_TRANSPORT_MODE */
    if (se_trd->ip_version_6 == 0)
      {
	OCTEON_SE_ASSERT(se_trd->tunnel_mode);
	
	/* IPv4 tunnel mode. */
	OCTEON_SE_DEBUG(9, "Building outer IPv4 header at %p\n", header);
	
	OCTEON_SE_ASSERT(packet_out.s.size > 
			 prefix_ofs + OCTEON_SE_FASTPATH_IP4_HDRLEN);
	
	OCTEON_SE_FASTPATH_IPH4_SET_VERSION(header, 4);
	OCTEON_SE_FASTPATH_IPH4_SET_HLEN(header, 5);
	



	tos = 0;
	OCTEON_SE_FASTPATH_IPH4_SET_TOS(header, tos);
	
	OCTEON_SE_FASTPATH_IPH4_SET_LEN(header, packet_out_len);
	
	if (pc->s->df_bit_processing == OCTEON_SE_FASTPATH_DF_CLEAR
	    || (pc->s->df_bit_processing == OCTEON_SE_FASTPATH_DF_KEEP
		&& pc->s->ipv4_df == 0))
	  {
	    uint32_t id;
	    
	    OCTEON_SE_FASTPATH_GET_NEXT_IPV4_PACKET_ID(core, id);
	    OCTEON_SE_FASTPATH_IPH4_SET_ID(header, id);
	    OCTEON_SE_FASTPATH_IPH4_SET_FRAG(header, 0);
	    pc->s->ipv4_df = 0;
	  }
	else
	  {
	    OCTEON_SE_FASTPATH_IPH4_SET_ID(header, 0);
	    OCTEON_SE_FASTPATH_IPH4_SET_FRAG(header,
					   OCTEON_SE_FASTPATH_IPH4_FRAGOFF_DF);
	    pc->s->ipv4_df = 1;
	  }
	
	OCTEON_SE_FASTPATH_IPH4_SET_TTL(header,
				       OCTEON_SE_FASTPATH_IP4_TUNNEL_MODE_TTL);
	OCTEON_SE_FASTPATH_IPH4_SET_PROTO(header, se_trd->nh);
	OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, 0);
	OCTEON_SE_FASTPATH_IPH4_SET_SRC(header, se_trd->own_addr_low);
	OCTEON_SE_FASTPATH_IPH4_SET_DST(header, se_trd->gw_addr_low);
	
	csum = octeon_se_fastpath_ip_cksum(header,
					   OCTEON_SE_FASTPATH_IP4_HDRLEN);
	OCTEON_SE_FASTPATH_IPH4_SET_CHECKSUM(header, csum);
	
	prefix_ofs += OCTEON_SE_FASTPATH_IP4_HDRLEN;
      }
    else if (se_trd->ip_version_6 == 1)
      {     
	OCTEON_SE_ASSERT(se_trd->tunnel_mode);
	
	/* IPv6 tunnel mode. */
	OCTEON_SE_DEBUG(9, "Building outer IPv6 header at %p\n", header);
	
	OCTEON_SE_FASTPATH_IPH6_SET_VERSION(header, 6);
	



	tos = 0;
	OCTEON_SE_FASTPATH_IPH6_SET_CLASS(header, tos);
	



	flow_label = 0;
	OCTEON_SE_FASTPATH_IPH6_SET_FLOW(header, flow_label);
	
	OCTEON_SE_FASTPATH_IPH6_SET_LEN(header, packet_out_len - 
					OCTEON_SE_FASTPATH_IP6_HDRLEN);
	OCTEON_SE_FASTPATH_IPH6_SET_NH(header, se_trd->nh);
	OCTEON_SE_FASTPATH_IPH6_SET_HL(header,
				       OCTEON_SE_FASTPATH_IP6_TUNNEL_MODE_HL);
	OCTEON_SE_FASTPATH_IPH6_SET_SRC_LOW(header, se_trd->own_addr_low);
	OCTEON_SE_FASTPATH_IPH6_SET_SRC_HIGH(header, se_trd->own_addr_high);
	
	OCTEON_SE_FASTPATH_IPH6_SET_DST_LOW(header, se_trd->gw_addr_low);
	OCTEON_SE_FASTPATH_IPH6_SET_DST_HIGH(header, se_trd->gw_addr_high);
	prefix_ofs += OCTEON_SE_FASTPATH_IP6_HDRLEN;
      }
  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_NATT
  /* Should we add NATT header as well ? */
  if (cvmx_unlikely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_NATT))
    {
      header = ((uint8_t *) cvmx_phys_to_ptr(packet_out.s.addr)) + prefix_ofs;
      
      OCTEON_SE_DEBUG(9, "Building UDP NAT-T header at %p\n", header);
      
      OCTEON_SE_ASSERT(packet_out.s.size > 
		       prefix_ofs + OCTEON_SE_FASTPATH_UDP_HDRLEN);
      OCTEON_SE_ASSERT((se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_AH) == 0);
      OCTEON_SE_ASSERT(se_trd->nh == OCTEON_SE_FASTPATH_IPPROTO_UDP);
      
      OCTEON_SE_FASTPATH_UDPH_SET_SRCPORT(header, se_trd->natt_local_port); 
      OCTEON_SE_FASTPATH_UDPH_SET_DSTPORT(header, se_trd->natt_remote_port); 
      OCTEON_SE_FASTPATH_UDPH_SET_LEN(header, 
				      packet_out_len - 
				      (prefix_ofs - pc->s->ip_offset));
      OCTEON_SE_FASTPATH_UDPH_SET_CHECKSUM(header, 0);

      prefix_ofs += OCTEON_SE_FASTPATH_UDP_HDRLEN;
    }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_NATT */

  /* Build ESP/AH */
  esp_ah_ofs = prefix_ofs;
  header = ((uint8_t *) cvmx_phys_to_ptr(packet_out.s.addr)) + prefix_ofs;

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AH
  if (se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_AH)
    {
      uint32_t low_seq;

      OCTEON_SE_DEBUG(9, "Building AH header at %p\n", header);

      OCTEON_SE_ASSERT(packet_out.s.size >
		       prefix_ofs + OCTEON_SE_FASTPATH_AH_HDRLEN +
		       combined->icv_len + icv_pad_len);

      /* Get and increment next sequence atomically. Note that se_trd
	 contains the last sequence number transmitted, thus sequence
	 is incremented by one here. */
      ipsec_seq = 
	(uint64_t) cvmx_atomic_fetch_and_add64((int64_t *)&se_trd->seq, 1);
      ipsec_seq++;

      OCTEON_SE_FASTPATH_AHH_SET_NH(header, esp_ah_nh);
      OCTEON_SE_FASTPATH_AHH_SET_LEN(header, 
				     (combined->icv_len + icv_pad_len + 12) / 4
				     - 2);
      OCTEON_SE_FASTPATH_AHH_SET_RESERVED(header, 0);
      OCTEON_SE_FASTPATH_AHH_SET_SPI(header, se_trd->spi_out);
      CVMX_DEXT(low_seq, ipsec_seq, 0, 32);
      OCTEON_SE_FASTPATH_AHH_SET_SEQ(header, low_seq);
      
      prefix_ofs += OCTEON_SE_FASTPATH_AH_HDRLEN + combined->icv_len;

      /* ICV computation also needs ICV field initialized to zero. */
      memcpy(mac_info->prefix.u8, header, OCTEON_SE_FASTPATH_AH_HDRLEN);
      memset(mac_info->prefix.u8 + OCTEON_SE_FASTPATH_AH_HDRLEN, 0,
	     combined->icv_len);

      mac_info->prefix_len = OCTEON_SE_FASTPATH_AH_HDRLEN + combined->icv_len;

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_SHA2      
      if (cvmx_unlikely((se_trd->ip_version_6 == 1) && 
			(se_trd->transform & OCTEON_SE_FASTPATH_MAC_HMAC_SHA2))
	  )
        {
          prefix_ofs += 4;
          mac_info->prefix_len += 4;
	  
          /* Use IPsec seq as AH padding for making 64 bit aligned. */
          OCTEON_SE_PUT_32BIT_ALIGNED(mac_info->prefix.u8 + 
				      OCTEON_SE_FASTPATH_AH_HDRLEN +
				      combined->icv_len, 
                                      low_seq);
        }
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_SHA2 */

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ
      if (cvmx_unlikely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_LONGSEQ))
        {
          CVMX_DEXT(mac_info->suffix, ipsec_seq, 32, 32);
          mac_info->suffix_available = 1;
	}
      else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ */
	mac_info->suffix_available = 0;

      /* Assert that crypto offset is 8 byte aligned */
      OCTEON_SE_ASSERT(((uint64_t) (cvmx_phys_to_ptr(packet_out.s.addr) 
				    + prefix_ofs)) % 8 == 0);
      
      octeon_se_fastpath_packet_buffer_create(&dst, packet_out, 
                                              prefix_ofs,
					      packet_out_len
					      + pc->s->ip_offset,  
                                              packet_out_num_segs);

      if (se_trd->ip_version_6 == 1)
	octeon_se_fastpath_mac_add_ah_header6(packet_out,
	                        	      pc->s->ip_offset,
					      combined->update,
					      core->transform_context,
					      0);
      else
	octeon_se_fastpath_mac_add_ah_header4(packet_out,
	                                      pc->s->ip_offset,
					      combined->update,
					      core->transform_context,
					      0);

      OCTEON_SE_DEBUG(9, "MAC prefix, len %d\n", mac_info->prefix_len);
      OCTEON_SE_HEXDUMP(9, mac_info->prefix.u8, mac_info->prefix_len);

      /* Do the actual transform */
      (*combined->encrypt)(core->transform_context,
			   &dst,
			   &src,
			   mac_info,
			   NULL, icv);
      
      /* Copy ICV to packet. */
      if (cvmx_likely(combined->icv_len % 4 == 0))
	{
	  for (i = 0; i < combined->icv_len; i += 4)
	    {
	      OCTEON_SE_PUT_32BIT_ALIGNED(cvmx_phys_to_ptr(packet_out.s.addr)
					  + esp_ah_ofs 
					  + OCTEON_SE_FASTPATH_AH_HDRLEN + i,
					  *(uint32_t *)(((uint8_t *)icv) + i));
	    }
	}
      else
	{
	  memcpy(cvmx_phys_to_ptr(packet_out.s.addr)
		 + esp_ah_ofs + OCTEON_SE_FASTPATH_AH_HDRLEN,
		 icv, combined->icv_len);
	}

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_SHA2
      if (cvmx_unlikely((se_trd->ip_version_6 == 1) && 
			(se_trd->transform & OCTEON_SE_FASTPATH_MAC_HMAC_SHA2))
	  )
	{
	  /* Use IPsec seq as AH padding for making 64 bit aligned. */
	  OCTEON_SE_PUT_32BIT(cvmx_phys_to_ptr(packet_out.s.addr)
			      + esp_ah_ofs 
			      + OCTEON_SE_FASTPATH_AH_HDRLEN
			      + combined->icv_len, 
			      low_seq);
	}
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_SHA2 */
    }
  else if (cvmx_likely(se_trd->transform & OCTEON_SE_FASTPATH_IPSEC_ESP))
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AH */
    {
      uint32_t low_seq;

      OCTEON_SE_DEBUG(9, "Building ESP header at %p\n", header);

      /* Assert that there is enough space for ESP */
      OCTEON_SE_ASSERT(packet_out.s.size >
		       prefix_ofs + OCTEON_SE_FASTPATH_ESP_HDRLEN);

      /* Get and increment next sequence atomically. Note that se_trd
	 contains the last sequence number transmitted, thus sequence
	 is incremented by one here. */
      ipsec_seq = 
	(uint64_t) cvmx_atomic_fetch_and_add64((int64_t *)&se_trd->seq, 1);
      ipsec_seq++;

      /* Build ESP header. */
      OCTEON_SE_FASTPATH_ESPH_SET_SPI(header, se_trd->spi_out);
      CVMX_DEXT(low_seq, ipsec_seq, 0, 32);
      OCTEON_SE_FASTPATH_ESPH_SET_SEQ(header, low_seq);
      prefix_ofs += OCTEON_SE_FASTPATH_ESP_HDRLEN;

      /* Fill in extra info for transform. */
      extra_info->pad_len = pad_len;
      extra_info->nh = esp_ah_nh;

      /* Fill in extra data form MAC. */
      OCTEON_SE_PUT_32BIT_ALIGNED(mac_info->prefix.u8, se_trd->spi_out);

#ifdef OCTEON_SE_FASTPATH_TRANSFORM_AES_GCM
      if (cvmx_likely(combined->is_auth_cipher))
        {
	  /* Extract cipher nonce. */
          OCTEON_SE_ASSERT(se_trd->cipher_nonce_size == 4);
          OCTEON_SE_GET_32BIT_ALIGNED(se_trd->keymat + 
				      OCTEON_MAX_KEYMAT_LEN /2 + 
				      se_trd->cipher_key_size, 
                                      extra_info->cipher_nonce);
	  
          /* Use IPsec seq# as counter. */ 
          extra_info->iv[0] = ipsec_seq;
	  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ
          if (cvmx_unlikely(se_trd->transform & 
			    OCTEON_SE_FASTPATH_IPSEC_LONGSEQ))
            {
              OCTEON_SE_PUT_64BIT(&mac_info->prefix.u8[4], ipsec_seq);
              mac_info->prefix_len = 12;
            }
          else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ */
            {
              OCTEON_SE_PUT_32BIT_ALIGNED(&mac_info->prefix.u8[4], low_seq);
              mac_info->prefix_len = 8;
            }
        }
      else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_AES_GCM */
        {
          for (i = 0; i < combined->cipher_iv_len / 8; i++)
            extra_info->iv[i] = cvmx_rng_get_random64();
	  
          /* Prepare extra mac information */
          OCTEON_SE_PUT_32BIT_ALIGNED(&mac_info->prefix.u8[4], low_seq);
          mac_info->prefix_len = 8;
	  
#ifdef OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ
          if (cvmx_unlikely(se_trd->transform & 
			    OCTEON_SE_FASTPATH_IPSEC_LONGSEQ))
            {
	      CVMX_DEXT(mac_info->suffix, ipsec_seq, 32, 32);
	      mac_info->suffix_available = 1;
            }
          else
#endif /* OCTEON_SE_FASTPATH_TRANSFORM_LONGSEQ */
	    mac_info->suffix_available = 0;
        }

      /* Assert that crypto offset is 8 byte aligned */
      OCTEON_SE_ASSERT(((uint64_t) (cvmx_phys_to_ptr(packet_out.s.addr) 
				    + prefix_ofs)) % 8 == 0);
      
      octeon_se_fastpath_packet_buffer_create(&dst, packet_out,
                                              prefix_ofs,
					      packet_out_len
					      + pc->s->ip_offset
					      - prefix_ofs,
                                              packet_out_num_segs);
      
      OCTEON_SE_DEBUG(9, "Performing crypto transform\n");

      /* Do the actual transform. */
      (*combined->encrypt)(core->transform_context,
			   &dst,
			   &src,
			   mac_info,
			   extra_info, icv);
      
      /* The trailer should be appended at the end of encrypted data.
	 Write ptr is pointing to correct location which may be unaligned
	 if aes-gcm is used. */
      OCTEON_SE_ASSERT(dst.total_bytes == combined->icv_len);
      
      OCTEON_SE_DEBUG(9, "Inserting ICV, len %d:\n", (int) combined->icv_len);
      OCTEON_SE_HEXDUMP(9, icv, combined->icv_len);

      octeon_se_fastpath_buffer_copy_in(&dst, icv, combined->icv_len);
    }

  /* Update trd statistics only after successful encryption. */
  OCTEON_SE_FASTPATH_STATS({
    cvmx_atomic_add64((int64_t *) &se_trd->out_octets, out_octets);
    cvmx_atomic_add64((int64_t *) &se_trd->out_packets, 1);
  });
Esempio n. 28
0
/**
 * Initialize the QLM layer
 */
void cvmx_qlm_init(void)
{
    int qlm;
    int qlm_jtag_length;
    char *qlm_jtag_name = "cvmx_qlm_jtag";
    int qlm_jtag_size = CVMX_QLM_JTAG_UINT32 * 8 * 4;
    static uint64_t qlm_base = 0;
    const cvmx_bootmem_named_block_desc_t *desc;
    
#ifndef CVMX_BUILD_FOR_LINUX_HOST
    /* Skip actual JTAG accesses on simulator */
    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
        return;
#endif

    qlm_jtag_length = cvmx_qlm_jtag_get_length();

    if (4 * qlm_jtag_length > (int)sizeof(__cvmx_qlm_jtag_xor_ref[0]) * 8)
    {
        cvmx_dprintf("ERROR: cvmx_qlm_init: JTAG chain larger than XOR ref size\n");
        return;
    }

    /* No need to initialize the initial JTAG state if cvmx_qlm_jtag
       named block is already created. */
    if ((desc = cvmx_bootmem_find_named_block(qlm_jtag_name)) != NULL)
    {
#ifdef CVMX_BUILD_FOR_LINUX_HOST
        char buffer[qlm_jtag_size];

        octeon_remote_read_mem(buffer, desc->base_addr, qlm_jtag_size);
        memcpy(__cvmx_qlm_jtag_xor_ref, buffer, qlm_jtag_size);
#else
        __cvmx_qlm_jtag_xor_ref = cvmx_phys_to_ptr(desc->base_addr);
#endif
        /* Initialize the internal JTAG */
        cvmx_helper_qlm_jtag_init();
        return;
    }

    /* Create named block to store the initial JTAG state. */
    qlm_base = cvmx_bootmem_phy_named_block_alloc(qlm_jtag_size, 0, 0, 128, qlm_jtag_name, CVMX_BOOTMEM_FLAG_END_ALLOC);

    if (qlm_base == -1ull)
    {
        cvmx_dprintf("ERROR: cvmx_qlm_init: Error in creating %s named block\n", qlm_jtag_name);
        return;
    }

#ifndef CVMX_BUILD_FOR_LINUX_HOST
    __cvmx_qlm_jtag_xor_ref = cvmx_phys_to_ptr(qlm_base);
#endif
    memset(__cvmx_qlm_jtag_xor_ref, 0, qlm_jtag_size);

    /* Initialize the internal JTAG */
    cvmx_helper_qlm_jtag_init();

    /* Read the XOR defaults for the JTAG chain */
    for (qlm=0; qlm<cvmx_qlm_get_num(); qlm++)
    {
        int i;
        /* Capture the reset defaults */
        cvmx_helper_qlm_jtag_capture(qlm);
        /* Save the reset defaults. This will shift out too much data, but
           the extra zeros don't hurt anything */
        for (i=0; i<CVMX_QLM_JTAG_UINT32; i++)
            __cvmx_qlm_jtag_xor_ref[qlm][i] = cvmx_helper_qlm_jtag_shift(qlm, 32, 0);
    }

#ifdef CVMX_BUILD_FOR_LINUX_HOST
    /* Update the initial state for oct-remote utils. */
    {
        char buffer[qlm_jtag_size];

        memcpy(buffer, &__cvmx_qlm_jtag_xor_ref, qlm_jtag_size);
        octeon_remote_write_mem(qlm_base, buffer, qlm_jtag_size);
    }
#endif

    /* Apply speed tweak as a workaround for errata G-16094. */
    __cvmx_qlm_speed_tweak();
    __cvmx_qlm_pcie_idle_dac_tweak();
}
Esempio n. 29
0
static void __cvmx_ipd_free_ptr_v2(void)
{
    int no_wptr = 0;
    int i;
    cvmx_ipd_port_ptr_fifo_ctl_t ipd_port_ptr_fifo_ctl;
    cvmx_ipd_ptr_count_t ipd_ptr_count;
    ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

    /* Handle Work Queue Entry in cn68xx */
    if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
        cvmx_ipd_ctl_status_t ipd_ctl_status;
        ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
        if (ipd_ctl_status.s.no_wptr)
            no_wptr = 1;
    }

    /* Free the prefetched WQE */
    if (ipd_ptr_count.s.wqev_cnt) {
        cvmx_ipd_next_wqe_ptr_t ipd_next_wqe_ptr;
        ipd_next_wqe_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_WQE_PTR);
        if (no_wptr)
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_wqe_ptr.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
        else
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_wqe_ptr.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
    }


    /* Free all WQE in the fifo */
    if (ipd_ptr_count.s.wqe_pcnt) {
        cvmx_ipd_free_ptr_fifo_ctl_t ipd_free_ptr_fifo_ctl;
        cvmx_ipd_free_ptr_value_t ipd_free_ptr_value;
        ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
        for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
            ipd_free_ptr_fifo_ctl.s.cena = 0;
            ipd_free_ptr_fifo_ctl.s.raddr = ipd_free_ptr_fifo_ctl.s.max_cnts + (ipd_free_ptr_fifo_ctl.s.wraddr+i) % ipd_free_ptr_fifo_ctl.s.max_cnts;
            cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
            ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
            ipd_free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
            if (no_wptr)
                cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
            else
                cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
        }
        ipd_free_ptr_fifo_ctl.s.cena = 1;
        cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
    }

    /* Free the prefetched packet */
    if (ipd_ptr_count.s.pktv_cnt) {
        cvmx_ipd_next_pkt_ptr_t ipd_next_pkt_ptr;
        ipd_next_pkt_ptr.u64 = cvmx_read_csr(CVMX_IPD_NEXT_PKT_PTR);
        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_next_pkt_ptr.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
    }

    /* Free the per port prefetched packets */
    ipd_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);

    for (i = 0; i < ipd_port_ptr_fifo_ctl.s.max_pkt; i++) {
        ipd_port_ptr_fifo_ctl.s.cena = 0;
        ipd_port_ptr_fifo_ctl.s.raddr = i % ipd_port_ptr_fifo_ctl.s.max_pkt;
        cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, ipd_port_ptr_fifo_ctl.u64);
        ipd_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PORT_PTR_FIFO_CTL);
        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
    }
    ipd_port_ptr_fifo_ctl.s.cena = 1;
    cvmx_write_csr(CVMX_IPD_PORT_PTR_FIFO_CTL, ipd_port_ptr_fifo_ctl.u64);

    /* Free all packets in the holding fifo */
    if (ipd_ptr_count.s.pfif_cnt) {
        cvmx_ipd_hold_ptr_fifo_ctl_t ipd_hold_ptr_fifo_ctl;

        ipd_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);

        for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
            ipd_hold_ptr_fifo_ctl.s.cena = 0;
            ipd_hold_ptr_fifo_ctl.s.raddr = (ipd_hold_ptr_fifo_ctl.s.praddr + i) % ipd_hold_ptr_fifo_ctl.s.max_pkt;
            cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, ipd_hold_ptr_fifo_ctl.u64);
            ipd_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL);
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
        }
        ipd_hold_ptr_fifo_ctl.s.cena = 1;
        cvmx_write_csr(CVMX_IPD_HOLD_PTR_FIFO_CTL, ipd_hold_ptr_fifo_ctl.u64);
    }

    /* Free all packets in the fifo */
    if (ipd_ptr_count.s.pkt_pcnt) {
        cvmx_ipd_free_ptr_fifo_ctl_t ipd_free_ptr_fifo_ctl;
        cvmx_ipd_free_ptr_value_t ipd_free_ptr_value;
        ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);

        for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
            ipd_free_ptr_fifo_ctl.s.cena = 0;
            ipd_free_ptr_fifo_ctl.s.raddr = (ipd_free_ptr_fifo_ctl.s.praddr+i) % ipd_free_ptr_fifo_ctl.s.max_cnts;
            cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
            ipd_free_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_FIFO_CTL);
            ipd_free_ptr_value.u64 = cvmx_read_csr(CVMX_IPD_FREE_PTR_VALUE);
            cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_free_ptr_value.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
        }
        ipd_free_ptr_fifo_ctl.s.cena = 1;
        cvmx_write_csr(CVMX_IPD_FREE_PTR_FIFO_CTL, ipd_free_ptr_fifo_ctl.u64);
    }
}
Esempio n. 30
0
static void __cvmx_ipd_free_ptr_v1(void)
{
    /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
    if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
	int no_wptr = 0;
	cvmx_ipd_ptr_count_t ipd_ptr_count;
	ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

	/* Handle Work Queue Entry in cn56xx and cn52xx */
	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
	    cvmx_ipd_ctl_status_t ipd_ctl_status;
	    ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
	    if (ipd_ctl_status.s.no_wptr)
		no_wptr = 1;
	}

	/* Free the prefetched WQE */
	if (ipd_ptr_count.s.wqev_cnt) {
	    cvmx_ipd_wqe_ptr_valid_t ipd_wqe_ptr_valid;
	    ipd_wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
	    if (no_wptr)
	        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    else
	        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
	}

	/* Free all WQE in the fifo */
	if (ipd_ptr_count.s.wqe_pcnt) {
	    int i;
	    cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
	    ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
	    for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
		ipd_pwp_ptr_fifo_ctl.s.cena = 0;
		ipd_pwp_ptr_fifo_ctl.s.raddr = ipd_pwp_ptr_fifo_ctl.s.max_cnts + (ipd_pwp_ptr_fifo_ctl.s.wraddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
		ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
		if (no_wptr)
		    cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
		else
		    cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
	    }
	    ipd_pwp_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
	}

	/* Free the prefetched packet */
	if (ipd_ptr_count.s.pktv_cnt) {
	    cvmx_ipd_pkt_ptr_valid_t ipd_pkt_ptr_valid;
	    ipd_pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
	    cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pkt_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	}

	/* Free the per port prefetched packets */
	if (1) {
	    int i;
	    cvmx_ipd_prc_port_ptr_fifo_ctl_t ipd_prc_port_ptr_fifo_ctl;
	    ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);

	    for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt; i++) {
		ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
		ipd_prc_port_ptr_fifo_ctl.s.raddr = i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
		ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    }
	    ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
	}

	/* Free all packets in the holding fifo */
	if (ipd_ptr_count.s.pfif_cnt) {
	    int i;
	    cvmx_ipd_prc_hold_ptr_fifo_ctl_t ipd_prc_hold_ptr_fifo_ctl;

	    ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);

	    for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
		ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
		ipd_prc_hold_ptr_fifo_ctl.s.raddr = (ipd_prc_hold_ptr_fifo_ctl.s.praddr + i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
		ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    }
	    ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
	}

	/* Free all packets in the fifo */
	if (ipd_ptr_count.s.pkt_pcnt) {
	    int i;
	    cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
	    ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);

	    for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
		ipd_pwp_ptr_fifo_ctl.s.cena = 0;
		ipd_pwp_ptr_fifo_ctl.s.raddr = (ipd_pwp_ptr_fifo_ctl.s.praddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
		ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
	    }
	    ipd_pwp_ptr_fifo_ctl.s.cena = 1;
	    cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
	}
    }
}