Exemplo n.º 1
0
/*
 * Find the largest index block in the MRC cache. Return NULL if none is
 * found.
 */
static struct mrc_data_container *find_current_mrc_cache_local
	(struct mrc_data_container *mrc_cache, u32 region_size)
{
	u32 region_end;
	u32 entry_id = 0;
	struct mrc_data_container *mrc_next = mrc_cache;

	region_end = (u32) mrc_cache + region_size;

	/* Search for the last filled entry in the region */
	while (is_mrc_cache(mrc_next)) {
		entry_id++;
		mrc_cache = mrc_next;
		mrc_next = next_mrc_block(mrc_next);
		if ((u32)mrc_next >= region_end) {
			/* Stay in the MRC data region */
			break;
		}
	}

	if (entry_id == 0) {
		printk(BIOS_ERR, "%s: No valid fast boot cache found.\n", __func__);
		return NULL;
	}

	/* Verify checksum */
	if (mrc_cache->mrc_checksum !=
	    compute_ip_checksum(mrc_cache->mrc_data,
				mrc_cache->mrc_data_size)) {
		printk(BIOS_ERR, "%s: fast boot cache checksum mismatch\n", __func__);
		return NULL;
	}

	printk(BIOS_DEBUG, "%s: picked entry %u from cache block\n", __func__,
	       entry_id - 1);

	return mrc_cache;
}
Exemplo n.º 2
0
/*---------------------------------------------------------------------
 * Method: ip_header_create(struct frame_t *outgoing)
 * Scope: Local
 * 
 * This method fills in the IP header on send_datagram based on the 
 * parameters given.
 *--------------------------------------------------------------------*/
void ip_header_create(struct frame_t *outgoing)
{
    assert(outgoing);
    
    struct ip *send_header = outgoing->ip_header;
    
    assert(send_header);

    
    send_header->ip_v = 4;
    send_header->ip_hl = 5;
    send_header->ip_tos = 0;
    send_header->ip_id = 0;
    send_header->ip_off = 0;
    send_header->ip_ttl = INIT_TTL;
    send_header->ip_p = IPPROTO_ICMP;
    send_header->ip_len = htons(outgoing->ip_len);
    send_header->ip_src.s_addr = outgoing->from_ip;
    send_header->ip_dst.s_addr = outgoing->to_ip;
    send_header->ip_sum = 0;
    compute_ip_checksum(outgoing); 
    return;
}
Exemplo n.º 3
0
struct mrc_data_container *mrccache_find_current(struct mrc_region *entry)
{
	struct mrc_data_container *cache, *next;
	ulong base_addr, end_addr;
	uint id;

	base_addr = entry->base + entry->offset;
	end_addr = base_addr + entry->length;
	cache = NULL;

	/* Search for the last filled entry in the region */
	for (id = 0, next = (struct mrc_data_container *)base_addr;
	     is_mrc_cache(next);
	     id++) {
		cache = next;
		next = next_mrc_block(next);
		if ((ulong)next >= end_addr)
			break;
	}

	if (id-- == 0) {
		debug("%s: No valid MRC cache found.\n", __func__);
		return NULL;
	}

	/* Verify checksum */
	if (cache->checksum != compute_ip_checksum(cache->data,
						   cache->data_size)) {
		printf("%s: MRC cache checksum mismatch\n", __func__);
		return NULL;
	}

	debug("%s: picked entry %u from cache block\n", __func__, id);

	return cache;
}
Exemplo n.º 4
0
/* When the ramstage is relocatable the elf loading ensures an elf image cannot
 * be loaded over the ramstage code. */
void jmp_to_elf_entry(void *entry, unsigned long unused1, unsigned long unused2)
{
	elf_boot_notes.hdr.b_checksum =
		compute_ip_checksum(&elf_boot_notes, sizeof(elf_boot_notes));

	/* Jump to kernel */
	__asm__ __volatile__(
		"	cld	\n\t"
		/* Now jump to the loaded image */
		"	call	*%0\n\t"

		/* The loaded image returned? */
		"	cli	\n\t"
		"	cld	\n\t"

		::
		"r" (entry),
#if CONFIG_MULTIBOOT
		"b"(mbi), "a" (MB_MAGIC2)
#else
		"b"(&elf_boot_notes), "a" (0x0E1FB007)
#endif
		);
}
Exemplo n.º 5
0
ssize_t dst_entry_tcp::fast_send(const struct iovec* p_iov, const ssize_t sz_iov, bool b_blocked /*= true*/, bool is_rexmit /*= false*/, bool dont_inline /*= false*/)
{
	tx_packet_template_t* p_pkt;
	mem_buf_desc_t *p_mem_buf_desc;
	size_t total_packet_len = 0;
	// The header is aligned for fast copy but we need to maintain this diff in order to get the real header pointer easily
	size_t hdr_alignment_diff = m_header.m_aligned_l2_l3_len - m_header.m_total_hdr_len;

	tcp_iovec* p_tcp_iov = NULL;
	bool no_copy = true;
	if (likely(sz_iov == 1 && !is_rexmit)) {
		p_tcp_iov = (tcp_iovec*)p_iov;
		if (unlikely(!m_p_ring->is_active_member(p_tcp_iov->p_desc->p_desc_owner, m_id))) {
			no_copy = false;
			dst_tcp_logdbg("p_desc=%p wrong desc_owner=%p, this ring=%p. did migration occurred?", p_tcp_iov->p_desc, p_tcp_iov->p_desc->p_desc_owner, m_p_ring);
			//todo can we handle this in migration (by going over all buffers lwip hold) instead for every send?
		}
	} else {
		no_copy = false;
	}

	if (unlikely(is_rexmit))
		m_p_ring->inc_ring_stats(m_id);

	if (likely(no_copy)) {
		p_pkt = (tx_packet_template_t*)((uint8_t*)p_tcp_iov[0].iovec.iov_base - m_header.m_aligned_l2_l3_len);
		total_packet_len = p_tcp_iov[0].iovec.iov_len + m_header.m_total_hdr_len;
		m_header.copy_l2_ip_hdr(p_pkt);
		// We've copied to aligned address, and now we must update p_pkt to point to real
		// L2 header
		//p_pkt = (tx_packet_template_t*)((uint8_t*)p_pkt + hdr_alignment_diff);
		p_pkt->hdr.m_ip_hdr.tot_len = (htons)(p_tcp_iov[0].iovec.iov_len + m_header.m_ip_header_len);

		m_sge[0].addr = (uintptr_t)((uint8_t*)p_pkt + hdr_alignment_diff);
		m_sge[0].length = total_packet_len;

		/* for DEBUG */
		if ((uint8_t*)m_sge[0].addr < p_tcp_iov[0].p_desc->p_buffer || (uint8_t*)p_pkt < p_tcp_iov[0].p_desc->p_buffer) {
			dst_tcp_logerr("p_buffer - addr=%d, m_total_hdr_len=%zd, p_buffer=%p, type=%d, len=%d, tot_len=%d, payload=%p, hdr_alignment_diff=%zd\n",
					(int)(p_tcp_iov[0].p_desc->p_buffer - (uint8_t*)m_sge[0].addr), m_header.m_total_hdr_len,
					p_tcp_iov[0].p_desc->p_buffer, p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.type,
					p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.len, p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.tot_len,
					p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.payload, hdr_alignment_diff);
		}

		if (!dont_inline && (total_packet_len < m_max_inline)) { // inline send
			m_p_send_wqe = &m_inline_send_wqe;

		} else {
			m_p_send_wqe = &m_not_inline_send_wqe;
		}

		m_p_send_wqe->wr_id = (uintptr_t)p_tcp_iov[0].p_desc;

#ifdef VMA_NO_HW_CSUM
		p_pkt->hdr.m_ip_hdr.check = 0; // use 0 at csum calculation time
		p_pkt->hdr.m_ip_hdr.check = compute_ip_checksum((unsigned short*)&p_pkt->hdr.m_ip_hdr, p_pkt->hdr.m_ip_hdr.ihl * 2);
		struct tcphdr* p_tcphdr = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
		p_tcphdr->check = 0;
		p_tcphdr->check = compute_tcp_checksum(&p_pkt->hdr.m_ip_hdr, (const uint16_t *)p_tcphdr);
		dst_tcp_logfine("using SW checksum calculation: p_pkt->hdr.m_ip_hdr.check=%d, p_tcphdr->check=%d", (int)p_pkt->hdr.m_ip_hdr.check, (int)p_tcphdr->check);
#endif
		m_p_ring->send_lwip_buffer(m_id, m_p_send_wqe, b_blocked);
	}
	else { // We don'nt support inline in this case, since we believe that this a very rare case
		p_mem_buf_desc = get_buffer(b_blocked);
		if (p_mem_buf_desc == NULL) {
			return -1;
		}

		m_header.copy_l2_ip_hdr((tx_packet_template_t*)p_mem_buf_desc->p_buffer);

		// Actually this is not the real packet len we will subtract the alignment diff at the end of the copy
		total_packet_len = m_header.m_aligned_l2_l3_len;

		for (int i = 0; i < sz_iov; ++i) {
			memcpy(p_mem_buf_desc->p_buffer + total_packet_len, p_iov[i].iov_base, p_iov[i].iov_len);
			total_packet_len += p_iov[i].iov_len;
		}

		m_sge[0].addr = (uintptr_t)(p_mem_buf_desc->p_buffer + hdr_alignment_diff);
		m_sge[0].length = total_packet_len - hdr_alignment_diff;
		// LKey will be updated in ring->send() // m_sge[0].lkey = p_mem_buf_desc->lkey; 

		/* for DEBUG */
		if ((uint8_t*)m_sge[0].addr < p_mem_buf_desc->p_buffer) {
			dst_tcp_logerr("p_buffer - addr=%d, m_total_hdr_len=%zd, p_buffer=%p, type=%d, len=%d, tot_len=%d, payload=%p, hdr_alignment_diff=%zd\n",
					(int)(p_mem_buf_desc->p_buffer - (uint8_t*)m_sge[0].addr), m_header.m_total_hdr_len,
					p_mem_buf_desc->p_buffer, p_mem_buf_desc->lwip_pbuf.pbuf.type,
					p_mem_buf_desc->lwip_pbuf.pbuf.len, p_mem_buf_desc->lwip_pbuf.pbuf.tot_len,
					p_mem_buf_desc->lwip_pbuf.pbuf.payload, hdr_alignment_diff);
		}

		p_pkt = (tx_packet_template_t*)((uint8_t*)p_mem_buf_desc->p_buffer);
		p_pkt->hdr.m_ip_hdr.tot_len = (htons)(m_sge[0].length - m_header.m_transport_header_len);
#ifdef VMA_NO_HW_CSUM
		p_pkt->hdr.m_ip_hdr.check = 0; // use 0 at csum calculation time
		p_pkt->hdr.m_ip_hdr.check = compute_ip_checksum((unsigned short*)&p_pkt->hdr.m_ip_hdr, p_pkt->hdr.m_ip_hdr.ihl * 2);
		struct tcphdr* p_tcphdr = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
		p_tcphdr->check = 0;
		p_tcphdr->check = compute_tcp_checksum(&p_pkt->hdr.m_ip_hdr, (const uint16_t *)p_tcphdr);
		dst_tcp_logfine("using SW checksum calculation: p_pkt->hdr.m_ip_hdr.check=%d, p_tcphdr->check=%d", (int)p_pkt->hdr.m_ip_hdr.check, (int)p_tcphdr->check);
#endif
		m_p_send_wqe = &m_not_inline_send_wqe;
		m_p_send_wqe->wr_id = (uintptr_t)p_mem_buf_desc;
		m_p_ring->send_ring_buffer(m_id, m_p_send_wqe, b_blocked);
	}

#ifndef __COVERITY__
        struct tcphdr* p_tcp_h = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
        NOT_IN_USE(p_tcp_h); /* to supress warning in case VMA_OPTIMIZE_LOG */
        dst_tcp_logfunc("Tx TCP segment info: src_port=%d, dst_port=%d, flags='%s%s%s%s%s%s' seq=%u, ack=%u, win=%u, payload_sz=%u",
                        ntohs(p_tcp_h->source), ntohs(p_tcp_h->dest),
                        p_tcp_h->urg?"U":"", p_tcp_h->ack?"A":"", p_tcp_h->psh?"P":"",
                        p_tcp_h->rst?"R":"", p_tcp_h->syn?"S":"", p_tcp_h->fin?"F":"",
                        ntohl(p_tcp_h->seq), ntohl(p_tcp_h->ack_seq), ntohs(p_tcp_h->window),
                        total_packet_len- p_tcp_h->doff*4 -34);
#endif

	if (unlikely(m_p_tx_mem_buf_desc_list == NULL)) {
		m_p_tx_mem_buf_desc_list = m_p_ring->mem_buf_tx_get(m_id, b_blocked, m_n_sysvar_tx_bufs_batch_tcp);
	}

	return 0;
}
Exemplo n.º 6
0
void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size)
{
	extern unsigned char _ram_seg, _eram_seg;
	unsigned long lb_start, lb_size;
	unsigned long adjust, adjusted_boot_notes;

	elf_boot_notes.hdr.b_checksum =
		compute_ip_checksum(&elf_boot_notes, sizeof(elf_boot_notes));

	lb_start = (unsigned long)&_ram_seg;
	lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
	adjust = buffer +  size - lb_start;

	adjusted_boot_notes = (unsigned long)&elf_boot_notes;
	adjusted_boot_notes += adjust;

	printk(BIOS_SPEW, "entry    = 0x%08lx\n", (unsigned long)entry);
	printk(BIOS_SPEW, "lb_start = 0x%08lx\n", lb_start);
	printk(BIOS_SPEW, "lb_size  = 0x%08lx\n", lb_size);
	printk(BIOS_SPEW, "adjust   = 0x%08lx\n", adjust);
	printk(BIOS_SPEW, "buffer   = 0x%08lx\n", buffer);
	printk(BIOS_SPEW, "     elf_boot_notes = 0x%08lx\n", (unsigned long)&elf_boot_notes);
	printk(BIOS_SPEW, "adjusted_boot_notes = 0x%08lx\n", adjusted_boot_notes);

	/* Jump to kernel */
	__asm__ __volatile__(
		"	cld	\n\t"
		/* Save the callee save registers... */
		"	pushl	%%esi\n\t"
		"	pushl	%%edi\n\t"
		"	pushl	%%ebx\n\t"
		/* Save the parameters I was passed */
		"	pushl	$0\n\t" /* 20 adjust */
	        "	pushl	%0\n\t" /* 16 lb_start */
		"	pushl	%1\n\t" /* 12 buffer */
		"	pushl	%2\n\t" /*  8 lb_size */
		"	pushl	%3\n\t" /*  4 entry */
		"	pushl	%4\n\t" /*  0 elf_boot_notes */
		/* Compute the adjustment */
		"	xorl	%%eax, %%eax\n\t"
		"	subl	16(%%esp), %%eax\n\t"
		"	addl	12(%%esp), %%eax\n\t"
		"	addl	 8(%%esp), %%eax\n\t"
		"	movl	%%eax, 20(%%esp)\n\t"
		/* Place a copy of coreboot in its new location */
		/* Move ``longs'' the coreboot size is 4 byte aligned */
		"	movl	12(%%esp), %%edi\n\t"
		"	addl	 8(%%esp), %%edi\n\t"
		"	movl	16(%%esp), %%esi\n\t"
		"	movl	 8(%%esp), %%ecx\n\n"
		"	shrl	$2, %%ecx\n\t"
		"	rep	movsl\n\t"

		/* Adjust the stack pointer to point into the new coreboot image */
		"	addl	20(%%esp), %%esp\n\t"
		/* Adjust the instruction pointer to point into the new coreboot image */
		"	movl	$1f, %%eax\n\t"
		"	addl	20(%%esp), %%eax\n\t"
		"	jmp	*%%eax\n\t"
		"1:	\n\t"

		/* Copy the coreboot bounce buffer over coreboot */
		/* Move ``longs'' the coreboot size is 4 byte aligned */
		"	movl	16(%%esp), %%edi\n\t"
		"	movl	12(%%esp), %%esi\n\t"
		"	movl	 8(%%esp), %%ecx\n\t"
		"	shrl	$2, %%ecx\n\t"
		"	rep	movsl\n\t"

		/* Now jump to the loaded image */
		"	movl	%5, %%eax\n\t"
		"	movl	 0(%%esp), %%ebx\n\t"
		"	call	*4(%%esp)\n\t"

		/* The loaded image returned? */
		"	cli	\n\t"
		"	cld	\n\t"

		/* Copy the saved copy of coreboot where coreboot runs */
		/* Move ``longs'' the coreboot size is 4 byte aligned */
		"	movl	16(%%esp), %%edi\n\t"
		"	movl	12(%%esp), %%esi\n\t"
		"	addl	 8(%%esp), %%esi\n\t"
		"	movl	 8(%%esp), %%ecx\n\t"
		"	shrl	$2, %%ecx\n\t"
		"	rep	movsl\n\t"

		/* Adjust the stack pointer to point into the old coreboot image */
		"	subl	20(%%esp), %%esp\n\t"

		/* Adjust the instruction pointer to point into the old coreboot image */
		"	movl	$1f, %%eax\n\t"
		"	subl	20(%%esp), %%eax\n\t"
		"	jmp	*%%eax\n\t"
		"1:	\n\t"

		/* Drop the parameters I was passed */
		"	addl	$24, %%esp\n\t"

		/* Restore the callee save registers */
		"	popl	%%ebx\n\t"
		"	popl	%%edi\n\t"
		"	popl	%%esi\n\t"

		::
		"ri" (lb_start), "ri" (buffer), "ri" (lb_size),
		"ri" (entry),
#if CONFIG_MULTIBOOT
		"ri"(mbi), "ri" (MB_MAGIC2)
#else
		"ri"(adjusted_boot_notes), "ri" (0x0E1FB007)
#endif
		);
}
Exemplo n.º 7
0
ssize_t dst_entry_tcp::fast_send(const iovec* p_iov, const ssize_t sz_iov, bool is_dummy, bool b_blocked /*= true*/, bool is_rexmit /*= false*/)
{
	int ret = 0;
	tx_packet_template_t* p_pkt;
	mem_buf_desc_t *p_mem_buf_desc;
	size_t total_packet_len = 0;
	// The header is aligned for fast copy but we need to maintain this diff in order to get the real header pointer easily
	size_t hdr_alignment_diff = m_header.m_aligned_l2_l3_len - m_header.m_total_hdr_len;

	tcp_iovec* p_tcp_iov = NULL;
	bool no_copy = true;
	if (likely(sz_iov == 1 && !is_rexmit)) {
		p_tcp_iov = (tcp_iovec*)p_iov;
		if (unlikely(!m_p_ring->is_active_member(p_tcp_iov->p_desc->p_desc_owner, m_id))) {
			no_copy = false;
			dst_tcp_logdbg("p_desc=%p wrong desc_owner=%p, this ring=%p. did migration occurred?", p_tcp_iov->p_desc, p_tcp_iov->p_desc->p_desc_owner, m_p_ring);
			//todo can we handle this in migration (by going over all buffers lwip hold) instead for every send?
		}
	} else {
		no_copy = false;
	}

	if (likely(no_copy)) {
		p_pkt = (tx_packet_template_t*)((uint8_t*)p_tcp_iov[0].iovec.iov_base - m_header.m_aligned_l2_l3_len);
		total_packet_len = p_tcp_iov[0].iovec.iov_len + m_header.m_total_hdr_len;
		m_header.copy_l2_ip_hdr(p_pkt);
		// We've copied to aligned address, and now we must update p_pkt to point to real
		// L2 header
		//p_pkt = (tx_packet_template_t*)((uint8_t*)p_pkt + hdr_alignment_diff);
		p_pkt->hdr.m_ip_hdr.tot_len = (htons)(p_tcp_iov[0].iovec.iov_len + m_header.m_ip_header_len);

		m_sge[0].addr = (uintptr_t)((uint8_t*)p_pkt + hdr_alignment_diff);
		m_sge[0].length = total_packet_len;

		if (total_packet_len < m_max_inline) { // inline send
			m_p_send_wqe = &m_inline_send_wqe;
		} else {
			m_p_send_wqe = &m_not_inline_send_wqe;
		}

		m_p_send_wqe->wr_id = (uintptr_t)p_tcp_iov[0].p_desc;

#ifdef VMA_NO_HW_CSUM
		p_pkt->hdr.m_ip_hdr.check = 0; // use 0 at csum calculation time
		p_pkt->hdr.m_ip_hdr.check = compute_ip_checksum((unsigned short*)&p_pkt->hdr.m_ip_hdr, p_pkt->hdr.m_ip_hdr.ihl * 2);
		struct tcphdr* p_tcphdr = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
		p_tcphdr->check = 0;
		p_tcphdr->check = compute_tcp_checksum(&p_pkt->hdr.m_ip_hdr, (const uint16_t *)p_tcphdr);
		dst_tcp_logfine("using SW checksum calculation: p_pkt->hdr.m_ip_hdr.check=%d, p_tcphdr->check=%d", (int)p_pkt->hdr.m_ip_hdr.check, (int)p_tcphdr->check);
#endif
		send_lwip_buffer(m_id, m_p_send_wqe, b_blocked, is_dummy);

		/* for DEBUG */
		if ((uint8_t*)m_sge[0].addr < p_tcp_iov[0].p_desc->p_buffer || (uint8_t*)p_pkt < p_tcp_iov[0].p_desc->p_buffer) {
			dst_tcp_logerr("p_buffer - addr=%d, m_total_hdr_len=%zd, p_buffer=%p, type=%d, len=%d, tot_len=%d, payload=%p, hdr_alignment_diff=%zd\n",
					(int)(p_tcp_iov[0].p_desc->p_buffer - (uint8_t*)m_sge[0].addr), m_header.m_total_hdr_len,
					p_tcp_iov[0].p_desc->p_buffer, p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.type,
					p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.len, p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.tot_len,
					p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.payload, hdr_alignment_diff);
		}
	}
	else { // We don'nt support inline in this case, since we believe that this a very rare case
		p_mem_buf_desc = get_buffer(b_blocked);
		if (p_mem_buf_desc == NULL) {
			ret = -1;
			goto out;
		}

		m_header.copy_l2_ip_hdr((tx_packet_template_t*)p_mem_buf_desc->p_buffer);

		// Actually this is not the real packet len we will subtract the alignment diff at the end of the copy
		total_packet_len = m_header.m_aligned_l2_l3_len;

		for (int i = 0; i < sz_iov; ++i) {
			memcpy(p_mem_buf_desc->p_buffer + total_packet_len, p_iov[i].iov_base, p_iov[i].iov_len);
			total_packet_len += p_iov[i].iov_len;
		}

		m_sge[0].addr = (uintptr_t)(p_mem_buf_desc->p_buffer + hdr_alignment_diff);
		m_sge[0].length = total_packet_len - hdr_alignment_diff;
		// LKey will be updated in ring->send() // m_sge[0].lkey = p_mem_buf_desc->lkey; 

		p_pkt = (tx_packet_template_t*)((uint8_t*)p_mem_buf_desc->p_buffer);
		p_pkt->hdr.m_ip_hdr.tot_len = (htons)(m_sge[0].length - m_header.m_transport_header_len);
#ifdef VMA_NO_HW_CSUM
		p_pkt->hdr.m_ip_hdr.check = 0; // use 0 at csum calculation time
		p_pkt->hdr.m_ip_hdr.check = compute_ip_checksum((unsigned short*)&p_pkt->hdr.m_ip_hdr, p_pkt->hdr.m_ip_hdr.ihl * 2);
		struct tcphdr* p_tcphdr = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
		p_tcphdr->check = 0;
		p_tcphdr->check = compute_tcp_checksum(&p_pkt->hdr.m_ip_hdr, (const uint16_t *)p_tcphdr);
		dst_tcp_logfine("using SW checksum calculation: p_pkt->hdr.m_ip_hdr.check=%d, p_tcphdr->check=%d", (int)p_pkt->hdr.m_ip_hdr.check, (int)p_tcphdr->check);
#endif
		m_p_send_wqe = &m_not_inline_send_wqe;
		m_p_send_wqe->wr_id = (uintptr_t)p_mem_buf_desc;
		vma_wr_tx_packet_attr attr = (vma_wr_tx_packet_attr)((VMA_TX_PACKET_BLOCK*b_blocked) | 
								     (VMA_TX_PACKET_DUMMY*is_dummy)  |
								      VMA_TX_PACKET_L3_CSUM          |
								      VMA_TX_PACKET_L4_CSUM);
		send_ring_buffer(m_id, m_p_send_wqe, attr);

		/* for DEBUG */
		if ((uint8_t*)m_sge[0].addr < p_mem_buf_desc->p_buffer) {
			dst_tcp_logerr("p_buffer - addr=%d, m_total_hdr_len=%zd, p_buffer=%p, type=%d, len=%d, tot_len=%d, payload=%p, hdr_alignment_diff=%zd\n",
					(int)(p_mem_buf_desc->p_buffer - (uint8_t*)m_sge[0].addr), m_header.m_total_hdr_len,
					p_mem_buf_desc->p_buffer, p_mem_buf_desc->lwip_pbuf.pbuf.type,
					p_mem_buf_desc->lwip_pbuf.pbuf.len, p_mem_buf_desc->lwip_pbuf.pbuf.tot_len,
					p_mem_buf_desc->lwip_pbuf.pbuf.payload, hdr_alignment_diff);
		}
	}

	if (unlikely(m_p_tx_mem_buf_desc_list == NULL)) {
		m_p_tx_mem_buf_desc_list = m_p_ring->mem_buf_tx_get(m_id, b_blocked, m_n_sysvar_tx_bufs_batch_tcp);
	}

out:
	if (unlikely(is_rexmit)) {
		m_p_ring->inc_tx_retransmissions(m_id);
	}

	return ret;
}
Exemplo n.º 8
0
static int sb_eth_send(struct udevice *dev, void *packet, int length)
{
	struct eth_sandbox_priv *priv = dev_get_priv(dev);
	struct ethernet_hdr *eth = packet;

	debug("eth_sandbox: Send packet %d\n", length);

	if (dev->seq >= 0 && dev->seq < ARRAY_SIZE(disabled) &&
	    disabled[dev->seq])
		return 0;

	if (ntohs(eth->et_protlen) == PROT_ARP) {
		struct arp_hdr *arp = packet + ETHER_HDR_SIZE;

		if (ntohs(arp->ar_op) == ARPOP_REQUEST) {
			struct ethernet_hdr *eth_recv;
			struct arp_hdr *arp_recv;

			/* store this as the assumed IP of the fake host */
			priv->fake_host_ipaddr = net_read_ip(&arp->ar_tpa);
			/* Formulate a fake response */
			eth_recv = (void *)priv->recv_packet_buffer;
			memcpy(eth_recv->et_dest, eth->et_src, ARP_HLEN);
			memcpy(eth_recv->et_src, priv->fake_host_hwaddr,
			       ARP_HLEN);
			eth_recv->et_protlen = htons(PROT_ARP);

			arp_recv = (void *)priv->recv_packet_buffer +
				ETHER_HDR_SIZE;
			arp_recv->ar_hrd = htons(ARP_ETHER);
			arp_recv->ar_pro = htons(PROT_IP);
			arp_recv->ar_hln = ARP_HLEN;
			arp_recv->ar_pln = ARP_PLEN;
			arp_recv->ar_op = htons(ARPOP_REPLY);
			memcpy(&arp_recv->ar_sha, priv->fake_host_hwaddr,
			       ARP_HLEN);
			net_write_ip(&arp_recv->ar_spa, priv->fake_host_ipaddr);
			memcpy(&arp_recv->ar_tha, &arp->ar_sha, ARP_HLEN);
			net_copy_ip(&arp_recv->ar_tpa, &arp->ar_spa);

			priv->recv_packet_length = ETHER_HDR_SIZE +
				ARP_HDR_SIZE;
		}
	} else if (ntohs(eth->et_protlen) == PROT_IP) {
		struct ip_udp_hdr *ip = packet + ETHER_HDR_SIZE;

		if (ip->ip_p == IPPROTO_ICMP) {
			struct icmp_hdr *icmp = (struct icmp_hdr *)&ip->udp_src;

			if (icmp->type == ICMP_ECHO_REQUEST) {
				struct ethernet_hdr *eth_recv;
				struct ip_udp_hdr *ipr;
				struct icmp_hdr *icmpr;

				/* reply to the ping */
				memcpy(priv->recv_packet_buffer, packet,
				       length);
				eth_recv = (void *)priv->recv_packet_buffer;
				ipr = (void *)priv->recv_packet_buffer +
					ETHER_HDR_SIZE;
				icmpr = (struct icmp_hdr *)&ipr->udp_src;
				memcpy(eth_recv->et_dest, eth->et_src,
				       ARP_HLEN);
				memcpy(eth_recv->et_src, priv->fake_host_hwaddr,
				       ARP_HLEN);
				ipr->ip_sum = 0;
				ipr->ip_off = 0;
				net_copy_ip((void *)&ipr->ip_dst, &ip->ip_src);
				net_write_ip((void *)&ipr->ip_src,
					     priv->fake_host_ipaddr);
				ipr->ip_sum = compute_ip_checksum(ipr,
					IP_HDR_SIZE);

				icmpr->type = ICMP_ECHO_REPLY;
				icmpr->checksum = 0;
				icmpr->checksum = compute_ip_checksum(icmpr,
					ICMP_HDR_SIZE);

				priv->recv_packet_length = length;
			}
		}
	}

	return 0;
}
Exemplo n.º 9
0
void sr_handlepacket(struct sr_instance* sr, 
                     uint8_t* packet/* lent */,
                     unsigned int len,
                     char* interface/* lent */)
{
    /* REQUIRES */
    assert(sr);
    assert(packet);
    assert(interface);
    
    printf("*** -> Received packet of length %d \n",len);
    if( len < ETHER_HDR_LEN ) {
        printf("Bogus packet length\n");
        return;
    }
    
    
    struct sr_if *iface;
    
    struct frame_t *incoming = create_frame_t(sr, packet, len, interface);
    struct frame_t *outgoing = NULL;
    
    /* First, deal with ARP cache timeouts */
    arp_cache_flush(&sr_arp_cache);
    arp_queue_flush(sr, &sr_arp_queue);
    
    /* Do we only need to cache ARP replies, or src MAC/IP on regular IP packets too, etc? */
    /* Also, do we need to worry about fragmentation? */
    
    /* Then actually handle the packet */
    /* Start by determining protocol */
    if (incoming->ip_header){
        
        /* sanity checks */
        if ( incoming->ip_header->ip_v != 4 ){
            printf("IP packet not IPv4\n");
            return;
        }
        
        compute_ip_checksum(incoming);
        /* Check the checksum */
        if( incoming->ip_header->ip_sum != 0 ) { 
            fprintf(stderr, "IP checksum incorrect, packet was dropped\n");
            return;
        }
        //set checksum back to what it was
        compute_ip_checksum(incoming);
        

        /* Are we the destination? */
        if (iface = if_dst_check(sr, incoming->to_ip)){  //we could change this to just take incoming and then get to_ip

            /* Is this an ICMP packet? */
            if (incoming->icmp_header){
                printf("received ICMP datagram\n");
                compute_icmp_checksum(incoming);
                if(incoming->icmp_header->icmp_type == ECHO_REQUEST && incoming->icmp_header->icmp_sum == 0){
                    outgoing = generate_icmp_echo(incoming); 
                    printf("received ICMP echo request\n");
                }
                else
                    printf("Dropped packet--we don't deal with that code, or invalid checksum\n");
            }
            else{
                outgoing = generate_icmp_error(incoming, DEST_UNREACH, PORT_UNREACH);
                printf("A packet for me! Flattering, but wrong.\n");
            }
        }
        else {
            /* Has it timed out? */
            if (incoming->ip_header->ip_ttl <= 0){
                int err = 0;
                //make sure it's not an ICMP error packet alrady
                if (incoming->ip_header->ip_p == IPPROTO_ICMP){
                    incoming->icmp_header = ((void *) incoming->ip_header + 
                                                             incoming->ip_hl);
                    uint8_t code = incoming->icmp_header->icmp_type;
                    //don't send ICMP error messages about ICMP error messages
                    if (code == DEST_UNREACH || code == TIME_EXCEEDED || code == 12 || code == 31)
                        //12 and 31 indicate bad IP header and datagram conversion error, respectively
                        err = 1;
                }
                if (!err){
                    outgoing = generate_icmp_error(incoming, TIME_EXCEEDED, TIME_INTRANSIT);
                    printf("Slowpoke. TTL exceeded.\n");
                }
            }
            else {

                /* update and forward packet; if necessary, add it to queue */
                struct arpc_entry *incache;
                uint32_t arpc_ip;
                outgoing = update_ip_hdr(sr, incoming, &arpc_ip);

                
                
                
                incache = arp_cache_lookup(sr_arp_cache.first, arpc_ip);
                
                
                if (!incache){
                    struct arpq_entry *entry = arp_queue_lookup(sr_arp_queue.first, outgoing->to_ip);
                    struct arpq_entry *temp_entry = NULL;
                    if ( entry ){ //if we've already sent an ARP request about this IP
                        if( time(NULL) - 1 > entry->arpq_last_req ) {
                            if(entry->arpq_num_reqs >= ARP_MAX_REQ)
                            {
                                printf("Too many ARP requests\n");
                                arpq_packets_icmpsend(sr, &entry->arpq_packets);
                                destroy_arpq_entry(entry);
                                return;
                            }
                            else if (entry->arpq_packets.first)
                            {
                                struct frame_t *arp_req;
                                struct queued_packet *old_packet = entry->arpq_packets.first;
                                entry->arpq_last_req = time(NULL);
                                entry->arpq_num_reqs++;
                            }
                        }
                        assert( (entry->arpq_packets).first );
                        if (!arpq_add_packet(entry, outgoing, len, incoming->from_MAC, incoming->iface))
                            printf("ARP queue packet add failed\n");
                        else printf("added packet to queue\n");
                    }
                    /* else, there are no outstanding ARP requests for this particular IP */
                    else {
                        printf("outgoing ip is %d\n", ntohl(outgoing->to_ip));
                        temp_entry = arpq_add_entry(&sr_arp_queue, outgoing->iface, outgoing, outgoing->to_ip, outgoing->ip_len, incoming->from_MAC, incoming->iface);
                    }
                    free(outgoing->frame);
                    
                    /* make ARP request */
                    outgoing = arp_create(sr, outgoing, outgoing->iface, ARP_REQUEST); //send datagram will now point to an ARP packet, not to the IP datagram 
                    if (temp_entry){
                        temp_entry->arpq_next_hop = outgoing->to_ip;
                    }
                    printf("sending ARP request\n");
                }
                else{
                    printf("got the MAC, can actually send this packet\n");
                    memcpy(outgoing->to_MAC, incache->arpc_mac, ETHER_ADDR_LEN);
                    encapsulate(outgoing);
                }
            }
        }
    }
    else if ( incoming->arp_header )
    {
        printf("received ARP packet\n");
        
        struct sr_arphdr *arp_header = incoming->arp_header;
        
        
        uint8_t in_cache = 0;
        
        struct arpc_entry *arpc_ent = arp_cache_lookup(sr_arp_cache.first, arp_header->ar_sip);
        printf("checking the cache\n");
        if( arpc_ent )
        {
            arp_cache_update(arpc_ent, arp_header->ar_sha);
            printf("updated cache\n");
            in_cache = 1;
        }
        
        struct sr_if *target_if = if_dst_check(sr, arp_header->ar_tip);
        printf("checking the target\n");
        if( target_if )
        {
            printf("It's for us\n");
            if( !in_cache ) {
                if( arp_cache_add(&sr_arp_cache, arp_header->ar_sha, arp_header->ar_sip) ) {
                    printf("added to cache\n");
                    printf("ip is %d\n", ntohl(arp_header->ar_sip));
                    struct arpq_entry *new_ent;
                    if( new_ent = arpq_next_hop_lookup(sr_arp_queue.first, arp_header->ar_sip) )
                        arpq_entry_clear(sr, &sr_arp_queue, new_ent, arp_header->ar_sha);
                }
                else perror("ARP request not added to cache");
            }
            if( ntohs(arp_header->ar_op) == ARP_REQUEST ){
                outgoing = arp_create(sr, incoming, incoming->iface, ARP_REPLY);
                printf("created ARP reply\n");
                
                assert(outgoing);
            }
        }
    }
    else perror("Unknown protocol");
        
    //send datagram, if appropriate
    if (outgoing != NULL){ 
        sr_send_packet(sr, (uint8_t *)outgoing->frame, outgoing->len, outgoing->iface->name);
        printf("sent packet of length %d on iface %s\n", outgoing->len, outgoing->iface->name);
    }
    
    if (outgoing != NULL) destroy_frame_t(outgoing);
    
}/* end sr_handlepacket */
Exemplo n.º 10
0
/****************************************************************************
 * lbtable_scan
 *
 * Scan the chunk of memory specified by 'start' and 'end' for a coreboot
 * table.  The first 4 bytes of the table are marked by the signature
 * { 'L', 'B', 'I', 'O' }.  'start' and 'end' indicate the addresses of the
 * first and last bytes of the chunk of memory to be scanned.  For instance,
 * values of 0x10000000 and 0x1000ffff for 'start' and 'end' specify a 64k
 * chunk of memory starting at address 0x10000000.  'start' and 'end' are
 * physical addresses.
 *
 * If a coreboot table is found, return a pointer to it.  Otherwise return
 * NULL.  On return, *bad_header_count and *bad_table_count are set as
 * follows:
 *
 *     *bad_header_count:
 *         Indicates the number of times in which a valid signature was found
 *         but the header checksum was invalid.
 *
 *     *bad_table_count:
 *         Indicates the number of times in which a header with a valid
 *         checksum was found but the table checksum was invalid.
 ****************************************************************************/
static const struct lb_header *lbtable_scan(unsigned long start,
					    unsigned long end,
					    int *bad_header_count,
					    int *bad_table_count)
{
	static const char signature[4] = { 'L', 'B', 'I', 'O' };
	const struct lb_header *table;
	const struct lb_forward *forward;
	unsigned long p;
	uint32_t sig;

	assert(end >= start);
	memcpy(&sig, signature, sizeof(sig));
	table = NULL;
	*bad_header_count = 0;
	*bad_table_count = 0;

	/* Look for signature.  Table is aligned on 16-byte boundary.  Therefore
	 * only check every fourth 32-bit memory word.  As the loop is coded below,
	 * this function will behave in a reasonable manner for ALL possible values
	 * for 'start' and 'end': even weird boundary cases like 0x00000000 and
	 * 0xffffffff on a 32-bit architecture.
	 */
	map_pages(start, end - start);
	for (p = start;
	     (p <= end) &&
	     (end - p >= (sizeof(uint32_t) - 1)); p += 4) {
		if (*(uint32_t*)phystov(p) != sig)
			continue;

		/* We found a valid signature. */
		table = (const struct lb_header *)phystov(p);

		/* validate header checksum */
		if (compute_ip_checksum((void *)table, sizeof(*table))) {
			(*bad_header_count)++;
			continue;
		}

		map_pages(p, table->table_bytes + sizeof(*table));
		/* validate table checksum */
		if (table->table_checksum !=
		    compute_ip_checksum(((char *)table) + sizeof(*table),
					table->table_bytes)) {
			(*bad_table_count)++;
			continue;
		}

		/* checksums are ok: we found it! */
		/* But it may just be a forwarding table, so look if there's a forwarder */
		lbtable = table;
		forward = (struct lb_forward *)find_lbrec(LB_TAG_FORWARD);
		lbtable = NULL;

		if (forward) {
			uint64_t new_phys = forward->forward;
			table = lbtable_scan(new_phys, new_phys + getpagesize(),
					 bad_header_count, bad_table_count);
		}
		return table;
	}

	return NULL;
}
Exemplo n.º 11
0
/*
 * HELPER function called from arp_thread
 */
void process_arp_queue(struct sr_instance* sr) {
	router_state* rs = get_router_state(sr);
	node* n = get_router_state(sr)->arp_queue;
	node* next = NULL;
	time_t now;
	double diff;

	while (n) {
		next = n->next;

		arp_queue_entry* aqe = (arp_queue_entry*)n->data;

		/* has it been over a second since the last arp request was sent? */
		time(&now);
		diff = difftime(now, aqe->last_req_time);
		if (diff > 1) {
			/* have we sent less than 5 arp requests? */
			if (aqe->requests < 5) {
				/* send another */
				time(&(aqe->last_req_time));
				++(aqe->requests);
				send_arp_request(sr, aqe->next_hop.s_addr, aqe->out_iface_name);
			} else {
				/* we have exceeded the max arp requests, return packets to sender */
				node* cur_packet_node = aqe->head;
				node* next_packet_node = NULL;

				while (cur_packet_node) {
					/* send icmp for the packet, free it, and its encasing entry */
					arp_queue_packet_entry* aqpe = (arp_queue_packet_entry*)cur_packet_node->data;

					/* only send an icmp error if the packet is not icmp, or if it is, its an echo request or reply
					 * also ensure we don't send an icmp error back to one of our interfaces
					 */
					if ((get_ip_hdr(aqpe->packet, aqpe->len)->ip_p != IP_PROTO_ICMP) ||
							(get_icmp_hdr(aqpe->packet, aqpe->len)->icmp_type == ICMP_TYPE_ECHO_REPLY) ||
							(get_icmp_hdr(aqpe->packet, aqpe->len)->icmp_type == ICMP_TYPE_ECHO_REQUEST)) {

					 	/* also ensure we don't send an icmp error back to one of our interfaces */
						if (!iface_match_ip(rs, get_ip_hdr(aqpe->packet, aqpe->len)->ip_src.s_addr)) {
							/* Total hack here to increment the TTL since we already decremented it earlier in the pipeline
							 * and the ICMP error should return the original packet.
							 * TODO: Don't decrement the TTL until the packet is ready to be put on the wire
							 * and we have the next hop ARP address, although checking should be done
							 * where it is currently being decremented to minimize effort on a doomed packet */
							ip_hdr *ip = get_ip_hdr(aqpe->packet, aqpe->len);
							if (ip->ip_ttl < 255) {
								ip->ip_ttl++;

								/* recalculate checksum */
								bzero(&ip->ip_sum, sizeof(uint16_t));
								uint16_t checksum = htons(compute_ip_checksum(ip));
								ip->ip_sum = checksum;
							}

							send_icmp_packet(sr, aqpe->packet, aqpe->len, ICMP_TYPE_DESTINATION_UNREACHABLE, ICMP_CODE_HOST_UNREACHABLE);
						}
					}

					free(aqpe->packet);
					next_packet_node = cur_packet_node->next;
					//free(cur_packet_node);   /* IS THIS CORRECT TO FREE IT ? */
					node_remove(&(aqe->head), cur_packet_node);
					cur_packet_node = next_packet_node;
				}

				/* free the arp queue entry for this destination ip, and patch the list */
				node_remove(&(get_router_state(sr)->arp_queue), n);
			}
		}

		n = next;
	}
}
Exemplo n.º 12
0
static int cb_parse_header(void *addr, int len, struct sysinfo_t *info)
{
	struct cb_header *header;
	unsigned char *ptr = (unsigned char *)addr;
	int i;

	for (i = 0; i < len; i += 16, ptr += 16) {
		header = (struct cb_header *)ptr;
		if (!strncmp((const char *)header->signature, "LBIO", 4))
			break;
	}

	/* We walked the entire space and didn't find anything. */
	if (i >= len)
		return -1;

	if (!header->table_bytes)
		return 0;

	/* Make sure the checksums match. */
	if (!ip_checksum_ok(header, sizeof(*header)))
		return -1;

	if (compute_ip_checksum(ptr + sizeof(*header), header->table_bytes) !=
	    header->table_checksum)
		return -1;

	/* Now, walk the tables. */
	ptr += header->header_bytes;

	/* Inintialize some fields to sentinel values. */
	info->vbnv_start = info->vbnv_size = (uint32_t)(-1);

	for (i = 0; i < header->table_entries; i++) {
		struct cb_record *rec = (struct cb_record *)ptr;

		/* We only care about a few tags here (maybe more later). */
		switch (rec->tag) {
		case CB_TAG_FORWARD:
			return cb_parse_header(
				(void *)(unsigned long)
				((struct cb_forward *)rec)->forward,
				len, info);
			continue;
		case CB_TAG_MEMORY:
			cb_parse_memory(ptr, info);
			break;
		case CB_TAG_SERIAL:
			cb_parse_serial(ptr, info);
			break;
		case CB_TAG_VERSION:
			cb_parse_string(ptr, &info->version);
			break;
		case CB_TAG_EXTRA_VERSION:
			cb_parse_string(ptr, &info->extra_version);
			break;
		case CB_TAG_BUILD:
			cb_parse_string(ptr, &info->build);
			break;
		case CB_TAG_COMPILE_TIME:
			cb_parse_string(ptr, &info->compile_time);
			break;
		case CB_TAG_COMPILE_BY:
			cb_parse_string(ptr, &info->compile_by);
			break;
		case CB_TAG_COMPILE_HOST:
			cb_parse_string(ptr, &info->compile_host);
			break;
		case CB_TAG_COMPILE_DOMAIN:
			cb_parse_string(ptr, &info->compile_domain);
			break;
		case CB_TAG_COMPILER:
			cb_parse_string(ptr, &info->compiler);
			break;
		case CB_TAG_LINKER:
			cb_parse_string(ptr, &info->linker);
			break;
		case CB_TAG_ASSEMBLER:
			cb_parse_string(ptr, &info->assembler);
			break;
		/*
		 * FIXME we should warn on serial if coreboot set up a
		 * framebuffer buf the payload does not know about it.
		 */
		case CB_TAG_FRAMEBUFFER:
			cb_parse_framebuffer(ptr, info);
			break;
		case CB_TAG_GPIO:
			cb_parse_gpios(ptr, info);
			break;
		case CB_TAG_VDAT:
			cb_parse_vdat(ptr, info);
			break;
		case CB_TAG_TIMESTAMPS:
			cb_parse_tstamp(ptr, info);
			break;
		case CB_TAG_CBMEM_CONSOLE:
			cb_parse_cbmem_cons(ptr, info);
			break;
		case CB_TAG_VBNV:
			cb_parse_vbnv(ptr, info);
			break;
		}

		ptr += rec->size;
	}

	return 1;
}
Exemplo n.º 13
0
/****************************************************************************
 * lbtable_scan
 *
 * Scan the chunk of memory specified by 'start' and 'end' for a coreboot
 * table.  The first 4 bytes of the table are marked by the signature
 * { 'L', 'B', 'I', 'O' }.  'start' and 'end' indicate the addresses of the
 * first and last bytes of the chunk of memory to be scanned.  For instance,
 * values of 0x10000000 and 0x1000ffff for 'start' and 'end' specify a 64k
 * chunk of memory starting at address 0x10000000.  'start' and 'end' are
 * virtual addresses in the address space of the current process.  They
 * represent a chunk of memory obtained by calling mmap() on /dev/mem.
 *
 * If a coreboot table is found, return a pointer to it.  Otherwise return
 * NULL.  On return, *bad_header_count and *bad_table_count are set as
 * follows:
 *
 *     *bad_header_count:
 *         Indicates the number of times in which a valid signature was found
 *         but the header checksum was invalid.
 *
 *     *bad_table_count:
 *         Indicates the number of times in which a header with a valid
 *         checksum was found but the table checksum was invalid.
 ****************************************************************************/
static const struct lb_header *lbtable_scan(unsigned long start,
					    unsigned long end,
					    int *bad_header_count,
					    int *bad_table_count)
{
	static const char signature[] = { 'L', 'B', 'I', 'O' };
	const struct lb_header *table;
	const struct lb_forward *forward;
	const uint32_t *p;
	uint32_t sig;

	assert(end >= start);
	sig = (*((const uint32_t *)signature));
	table = NULL;
	*bad_header_count = 0;
	*bad_table_count = 0;

	/* Look for signature.  Table is aligned on 16-byte boundary.  Therefore
	 * only check every fourth 32-bit memory word.  As the loop is coded below,
	 * this function will behave in a reasonable manner for ALL possible values
	 * for 'start' and 'end': even weird boundary cases like 0x00000000 and
	 * 0xffffffff on a 32-bit architecture.
	 */
	for (p = (const uint32_t *)start;
	     (((unsigned long)p) <= end) &&
	     ((end - (unsigned long)p) >= (sizeof(uint32_t) - 1)); p += 4) {
		if (*p != sig)
			continue;

		/* We found a valid signature. */
		table = (const struct lb_header *)p;

		/* validate header checksum */
		if (compute_ip_checksum((void *)table, sizeof(*table))) {
			(*bad_header_count)++;
			continue;
		}

		/* validate table checksum */
		if (table->table_checksum !=
		    compute_ip_checksum(((char *)table) + sizeof(*table),
					table->table_bytes)) {
			(*bad_table_count)++;
			continue;
		}

		/* checksums are ok: we found it! */
		/* But it may just be a forwarding table, so look if there's a forwarder */
		lbtable = table;
		forward = (struct lb_forward *)find_lbrec(LB_TAG_FORWARD);
		lbtable = NULL;

		if (forward) {
			uint64_t new_phys = forward->forward;

			new_phys &= ~(getpagesize() - 1);

			munmap((void *)low_phys_mem, BYTES_TO_MAP);
			if ((low_phys_mem =
			     mmap(NULL, BYTES_TO_MAP, PROT_READ, MAP_SHARED, fd,
				  (off_t) new_phys)) == MAP_FAILED) {
				fprintf(stderr,
					"%s: Failed to mmap /dev/mem: %s\n",
					prog_name, strerror(errno));
				exit(1);
			}
			low_phys_base = new_phys;
			table =
			    lbtable_scan(phystov(low_phys_base),
					 phystov(low_phys_base + BYTES_TO_MAP),
					 bad_header_count, bad_table_count);
		}
		return table;
	}

	return NULL;
}