tube_t *tube_attach(domid_t peer_domid, uint32_t page_ref, uint32_t peer_port_rx, uint32_t peer_port_tx, void *data) { tube_t *tb = alloc_tube(0); if (tb == 0) return 0; tube_shared_t *page = tb->page; tb->page_map.ref = page_ref; tb->page_map.dom = peer_domid; tb->page_map.flags = GNTMAP_host_map; tb->page_map.host_addr = (uint64_t)page; int rs = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &tb->page_map, 1); assert(rs == 0); assert(tb->page_map.status == GNTST_okay); for (int i = 0; i < TUBE_SLOTS; i++) { struct gnttab_map_grant_ref *m = &tb->bufs_map[i]; m->ref = page->tx.slots[i].gref; m->dom = peer_domid; m->flags = GNTMAP_host_map; m->host_addr = (uint64_t)tb->tx_buffers[i]; } for (int i = 0; i < TUBE_SLOTS; i++) { struct gnttab_map_grant_ref *m = &tb->bufs_map[i+TUBE_SLOTS]; m->ref = page->rx.slots[i].gref; m->dom = peer_domid; m->flags = GNTMAP_host_map; m->host_addr = (uint64_t)tb->rx_buffers[i]; } rs = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, tb->bufs_map, 2*TUBE_SLOTS); assert(rs == 0); for (int i = 0; i < 2*TUBE_SLOTS; i++) { assert(tb->bufs_map[i].status == GNTST_okay); rmb(); //dark } tb->evtchn_tx = event_bind_interdomain(peer_domid, peer_port_tx); tb->evtchn_rx = event_bind_interdomain(peer_domid, peer_port_rx); event_bind(tb->evtchn_rx, tube_int, data); event_bind(tb->evtchn_tx, tube_send_int, data); return tb; }
pore_t *pore_make_N(term_t tag, uint32_t size, term_t owner, void (*destroy_private)(pore_t *), uint32_t evtchn) { memnode_t *home = nalloc_N(size); if (home == 0) return 0; pore_t *np = (pore_t *)home->starts; memset(np, 0, size); np->eid = tag_short_eid(next_pore_id++); np->tag = tag; np->owner = owner; np->destroy_private = destroy_private; np->home = home; np->evtchn = evtchn; if (evtchn != NO_EVENT) event_bind(evtchn, pore_universal_handler, np); if (active_pores != 0) active_pores->ref = &np->next; np->ref = &active_pores; np->next = active_pores; active_pores = np; return np; }
tube_t *tube_make(domid_t peer_domid, void *data) { tube_t *tb = alloc_tube(1); if (tb == 0) return 0; tube_shared_t *page = tb->page; memset(page, 0, PAGE_SIZE); grants_allow_access(&tb->page_ref, peer_domid, virt_to_mfn(page)); tb->evtchn_tx = event_alloc_unbound(peer_domid); tb->evtchn_rx = event_alloc_unbound(peer_domid); for (int i = 0; i < TUBE_SLOTS; i++) grants_allow_access(&page->tx.slots[i].gref, peer_domid, virt_to_mfn(tb->tx_buffers[i])); for (int i = 0; i < TUBE_SLOTS; i++) grants_allow_access(&page->rx.slots[i].gref, peer_domid, virt_to_mfn(tb->rx_buffers[i])); event_bind(tb->evtchn_tx, tube_int, data); event_bind(tb->evtchn_rx, tube_send_int, data); return tb; }
void console_init(struct xencons_interface *intf, uint32_t chan) { if (console.is_initialized) fatal_error("console_init: already initialized"); console.intf = intf; console.attached = 0; console.chan = chan; event_bind(console.chan, console_int, 0); // // ECMA-48 modes: // // - set insert mode // // NB: mode changes not undone on shutdown // char modes[] = "\x1b[4h"; console_write(modes, sizeof(modes) -1); console.is_initialized = true; }
void netfe_init(void) { int index = 0; netfe_t **link = &net_front_ends; while (1) { int n; char xs_key[256]; snprintf(xs_key, sizeof(xs_key), "device/vif/%d/backend-id", index); int rs = xenstore_read_int(&n, xs_key); if (rs != 0) break; // FE/(index) is present domid_t backend_id = (domid_t)n; netfe_t *fe = (netfe_t *)mm_alloc_pages(PSIZE(sizeof(netfe_t))); memset(fe, 0, sizeof(*fe)); // setup shared rings fe->rxs = (netif_rx_sring_t *)mm_alloc_page(); assert(fe->rxs != 0); fe->txs = (netif_tx_sring_t *)mm_alloc_page(); assert(fe->txs != 0); SHARED_RING_INIT(fe->rxs); SHARED_RING_INIT(fe->txs); FRONT_RING_INIT(&fe->rx_ring, fe->rxs, PAGE_SIZE); FRONT_RING_INIT(&fe->tx_ring, fe->txs, PAGE_SIZE); grants_allow_access(&fe->rx_ref, backend_id, virt_to_mfn(fe->rxs)); grants_allow_access(&fe->tx_ref, backend_id, virt_to_mfn(fe->txs)); // set up receive buffers for (int i = 0; i < NR_RX_BUFFERS; i++) { fe->rx_buffers[i] = mm_alloc_page(); assert(fe->rx_buffers[i] != 0); unsigned long mfn = virt_to_mfn(fe->rx_buffers[i]); grants_allow_access(&fe->rx_buf_refs[i], backend_id, mfn); } // set up send buffers fe->free_tx_head = NO_TX_BUFFER; for (int i = 0; i < NR_TX_BUFFERS; i++) { fe->tx_buffers[i] = mm_alloc_page(); assert(fe->tx_buffers[i] != 0); unsigned long mfn = virt_to_mfn(fe->tx_buffers[i]); grants_allow_access(&fe->tx_buf_refs[i], backend_id, mfn); fe->free_tx_bufs[i] = fe->free_tx_head; fe->free_tx_head = i; } // set up interrupt fe->evtchn = event_alloc_unbound(backend_id); event_bind(fe->evtchn, netfe_int, (void *)fe); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/rx-ring-ref", index); rs = xenstore_write_uint(xs_key, fe->rx_ref); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/tx-ring-ref", index); rs = xenstore_write_uint(xs_key, fe->tx_ref); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/event-channel", index); rs = xenstore_write_uint(xs_key, fe->evtchn); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/request-rx-copy", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/feature-no-csum-offload", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/feature-rx-notify", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/state", index); rs = xenstore_write(xs_key, "4"); // XenbusStateConnected assert(rs == 0); // read MAC address char buf[64]; snprintf(xs_key, sizeof(xs_key), "device/vif/%d/mac", index); rs = xenstore_read(xs_key, buf, sizeof(buf)); assert(rs == 0); rs = parse_mac(buf, fe->mac); assert(rs == 0); fe->mac_len = ETH_ALEN; printk("\reth%d: MAC %02x:%02x:%02x:%02x:%02x:%02x\r\n", index, fe->mac[0], fe->mac[1], fe->mac[2], fe->mac[3], fe->mac[4], fe->mac[5]); // // Publish EXT_RX_BUFFERS requests only and replenish then to this number // during each interrupt handler invocation. // for (int i = 0; i < EXT_RX_BUFFERS; i++) { netif_rx_request_t *req = RING_GET_REQUEST(&fe->rx_ring, fe->rx_ring.req_prod_pvt); req->id = i; //rx_id++; req->gref = fe->rx_buf_refs[i]; fe->rx_ring.req_prod_pvt++; } RING_PUSH_REQUESTS(&fe->rx_ring); event_kick(fe->evtchn); fe->index = index++; //fe->next = 0; //fe->attached_lwip_netif = 0; //fe->attached_outlet = 0; // add to net_front_ends list *link = fe; link = &fe->next; } num_net_front_ends = index; }