Example #1
0
static int init(void) 
{
	unsigned short int i;
	void *b;

	lock_static_init(&netif_lock);

	NET_LOCK_TAKE();

	cos_vect_init_static(&tmap);
	
	rb_init(&rb1_md_wildcard, &rb1);
	rb_init(&rb2_md, &rb2);
	/* Setup the region from which headers will be transmitted. */
	if (cos_buff_mgmt(COS_BM_XMIT_REGION, &xmit_headers, sizeof(xmit_headers), 0)) {
		prints("net: error setting up xmit region.");
	}
	/* Wildcard upcall */
	if (cos_net_create_net_brand(0, &rb1_md_wildcard)) BUG();
	for (i = 0 ; i < NUM_WILDCARD_BUFFS ; i++) {
		if(!(b = alloc_rb_buff(&rb1_md_wildcard))) {
			prints("net: could not allocate the ring buffer.");
		}
		if(rb_add_buff(&rb1_md_wildcard, b, MTU)) {
			prints("net: could not populate the ring with buffer");
		}
	}
	NET_LOCK_RELEASE();

	return 0;
}
Example #2
0
static int cos_net_create_net_brand(unsigned short int port, rb_meta_t *rbm)
{
	wildcard_brand_id = sched_create_net_brand(cos_spd_id(), port);
	assert(wildcard_brand_id > 0);
	if (cos_buff_mgmt(COS_BM_RECV_RING, rb1.packets, sizeof(rb1.packets), wildcard_brand_id)) {
		prints("net: could not setup recv ring.\n");
		return -1;
	}
	return 0;
}
Example #3
0
static int cos_net_create_net_acap(unsigned short int port, rb_meta_t *rbm)
{
	int acap;

	acap = cos_async_cap_cntl(COS_ACAP_CREATE, cos_spd_id(), cos_spd_id(), cos_get_thd_id());
	assert(acap);
	/* cli acap not used. The server acap will be triggered by
	 * network driver. */
	wildcard_acap_id = acap & 0xFFFF;
	assert(wildcard_acap_id > 0);

	if (sched_create_net_acap(cos_spd_id(), wildcard_acap_id, port)) return -1;
	if (cos_buff_mgmt(COS_BM_RECV_RING, rb1.packets, sizeof(rb1.packets), wildcard_acap_id)) {
		prints("net: could not setup recv ring.\n");
		return -1;
	}
	return 0;
}
Example #4
0
static int __netif_xmit(char *d, unsigned int sz)
{
	/* If we're just transmitting a TCP packet without data
	 * (e.g. ack), then use the fast path here */
	assert(d && sz > 0);
	xmit_headers.len = 0;
	if (sz <= sizeof(xmit_headers.headers)) {
		memcpy(&xmit_headers.headers, d, sz);
		xmit_headers.len = sz;
		xmit_headers.gather_len = 0;
	} else {
		struct gather_item *gi;
		gi = &xmit_headers.gather_list[0];
		gi->data = d;
		gi->len = sz;
		xmit_headers.gather_len = 1;
	}
	
	/* 
	 * Here we do 2 things: create a separate gather data entry
	 * for each packet, and separate the data in individual
	 * packets into separate gather entries if it crosses page
	 * boundaries.  
	 *
	 * This is a general implementation for gather of packets.  We
	 * are, of course, currently doing something simpler.
	 */
/* 	for (i = 0 ; p && i < XMIT_HEADERS_GATHER_LEN ; i++) { */
/* 		char *data = p->payload; */
/* 		struct gather_item *gi = &xmit_headers.gather_list[i]; */
/* 		int len_on_page; */

/* 		assert(data && p->len < PAGE_SIZE); */
/* 		gi->data = data; */
/* 		gi->len  = p->len; */
/* 		len_on_page = (unsigned long)round_up_to_page(data) - (unsigned long)data; */
/* 		/\* Data split across pages??? *\/ */
/* 		if (len_on_page < p->len) { */
/* 			int len_on_second = p->len - len_on_page; */

/* 			if (XMIT_HEADERS_GATHER_LEN == i+1) goto segment_err; */
/* 			gi->len  = len_on_page; */
/* 			gi = gi+1; */
/* 			gi->data = data + len_on_page; */
/* 			gi->len  = len_on_second; */
/* 			i++; */
/* 		} */
/* 		assert(p->type != PBUF_POOL); */
/* 		assert(p->ref == 1); */
/* 		p = p->next; */
/* 	} */
/* 	if (unlikely(NULL != p)) goto segment_err; */
/* 	xmit_headers.gather_len = i; */


	/* Send the collection of pbuf data on its way. */
	if (cos_buff_mgmt(COS_BM_XMIT, NULL, 0, 0)) {
		prints("net: could not xmit data.\n");
	}

	return 0;
/* segment_err: */
/* 	printc("net: attempted to xmit too many segments"); */
/* 	goto done; */
}