void init_rx_buffers(struct netfront_dev *dev) { int i, requeue_idx; netif_rx_request_t *req; int notify; /* Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { struct net_buffer* buf = &dev->rx_buffers[requeue_idx]; req = RING_GET_REQUEST(&dev->rx, requeue_idx); buf->gref = req->gref = gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0); req->id = requeue_idx; requeue_idx++; } dev->rx.req_prod_pvt = requeue_idx; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify); if (notify) notify_remote_via_evtchn(dev->evtchn); dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1; }
CAMLprim value caml_gnttab_grant_access(value v_ref, value v_iopage, value v_domid, value v_readonly) { grant_ref_t ref = Int_val(v_ref); void *page = base_page_of(v_iopage); gnttab_grant_access(ref, page, Int_val(v_domid), Bool_val(v_readonly)); return Val_unit; }
/* Issue an aio */ void blkfront_aio(struct blkfront_aiocb *aiocbp, int write) { struct blkfront_dev *dev = aiocbp->aio_dev; struct blkif_request *req; RING_IDX i; int notify; int n, j; uintptr_t start, end; // Can't io at non-sector-aligned location ASSERT(!(aiocbp->aio_offset & (dev->info.sector_size-1))); // Can't io non-sector-sized amounts ASSERT(!(aiocbp->aio_nbytes & (dev->info.sector_size-1))); // Can't io non-sector-aligned buffer ASSERT(!((uintptr_t) aiocbp->aio_buf & (dev->info.sector_size-1))); start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK; end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + PAGE_SIZE - 1) & PAGE_MASK; aiocbp->n = n = (end - start) / PAGE_SIZE; /* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB, * so max 44KB can't happen */ ASSERT(n <= BLKIF_MAX_SEGMENTS_PER_REQUEST); blkfront_wait_slot(dev); i = dev->ring.req_prod_pvt; req = RING_GET_REQUEST(&dev->ring, i); req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; req->nr_segments = n; req->handle = dev->handle; req->id = (uintptr_t) aiocbp; req->sector_number = aiocbp->aio_offset / 512; for (j = 0; j < n; j++) { req->seg[j].first_sect = 0; req->seg[j].last_sect = PAGE_SIZE / 512 - 1; } req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / 512; req->seg[n-1].last_sect = (((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / 512; for (j = 0; j < n; j++) { uintptr_t data = start + j * PAGE_SIZE; if (!write) { /* Trigger CoW if needed */ *(char*)(data + (req->seg[j].first_sect << 9)) = 0; barrier(); } aiocbp->gref[j] = req->seg[j].gref = gnttab_grant_access(dev->dom, virtual_to_mfn(data), write); } dev->ring.req_prod_pvt = i + 1; wmb(); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify); if(notify) notify_remote_via_evtchn(dev->evtchn); }
CAMLprim value caml_gnttab_grant_access(value v_ref, value v_bs, value v_domid, value v_readonly) { CAMLparam4(v_ref, v_bs, v_domid, v_readonly); grant_ref_t ref = Int32_val(v_ref); char *page = String_val(Field(v_bs, 0)) + (Int_val(Field(v_bs,1)) / 8); ASSERT(((unsigned long)page) % PAGE_SIZE == 0); gnttab_grant_access(ref, page, Int_val(v_domid), Bool_val(v_readonly)); CAMLreturn(Val_unit); }
grant_ref_t gnttab_alloc_and_grant(void **map) { unsigned long mfn; grant_ref_t gref; *map = (void *)alloc_page(); mfn = virt_to_mfn(*map); gref = gnttab_grant_access(0, mfn, 0); return gref; }
static void netfront_fillup_rx_buffers(struct netfront_dev *dev) { RING_IDX prod; struct netif_rx_request *req; grant_ref_t ref; unsigned short id; int notify; #ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS struct net_rxbuffer* buf; int flags; #endif #ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS local_irq_save(flags); #endif /* fill-up slots again */ for (prod = dev->rx.req_prod_pvt; prod - dev->rx.rsp_cons < NET_RX_RING_SIZE; prod++) { id = netfront_rxidx(prod); #ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS buf = netfront_get_rxbuffer(dev); if (buf == NULL) break; /* out of rx buffers */ BUG_ON(buf->page == NULL); ref = gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0); buf->gref = ref; BUG_ON(ref == GRANT_INVALID_REF); dev->rx_buffers[id] = buf; #else ref = dev->rx_buffers[id].gref; #endif req = RING_GET_REQUEST(&dev->rx, prod); req->id = id; req->gref = ref; } #ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS local_irq_restore(flags); #endif if (dev->rx.req_prod_pvt != prod) { dev->rx.req_prod_pvt = prod; wmb(); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify); #ifdef CONFIG_SELECT_POLL files[dev->fd].read = 0; #endif if (notify) notify_remote_via_evtchn(dev->rx_evtchn); } }
void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len) { int flags; struct netif_tx_request *tx; RING_IDX i; int notify; unsigned short id; struct net_buffer* buf; void* page; //printf("netfront_xmit\n"); //farewellkou BUG_ON(len > PAGE_SIZE); down(&dev->tx_sem); local_irq_save(flags); id = get_id_from_freelist(dev->tx_freelist); local_irq_restore(flags); buf = &dev->tx_buffers[id]; page = buf->page; if (!page) page = buf->page = (char*) alloc_page(); i = dev->tx.req_prod_pvt; tx = RING_GET_REQUEST(&dev->tx, i); memcpy(page,data,len); buf->gref = tx->gref = gnttab_grant_access(dev->dom,virt_to_mfn(page),1); tx->offset=0; tx->size = len; tx->flags=0; tx->id = id; dev->tx.req_prod_pvt = i + 1; wmb(); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify); if(notify) notify_remote_via_evtchn(dev->evtchn); local_irq_save(flags); network_tx_buf_gc(dev); local_irq_restore(flags); }
static int tpmfront_connect(struct tpmfront_dev* dev) { char* err; /* Create shared page */ dev->page = (vtpm_shared_page_t*) alloc_page(); if(dev->page == NULL) { TPMFRONT_ERR("Unable to allocate page for shared memory\n"); goto error; } memset(dev->page, 0, PAGE_SIZE); dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0); TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref); /*Create event channel */ if(evtchn_alloc_unbound(dev->bedomid, tpmfront_handler, dev, &dev->evtchn)) { TPMFRONT_ERR("Unable to allocate event channel\n"); goto error_postmap; } unmask_evtchn(dev->evtchn); TPMFRONT_DEBUG("event channel is %lu\n", (unsigned long) dev->evtchn); /* Write the entries to xenstore */ if(publish_xenbus(dev)) { goto error_postevtchn; } /* Change state to connected */ dev->state = XenbusStateConnected; /* Tell the backend that we are ready */ if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", dev->state))) { TPMFRONT_ERR("Unable to write to xenstore %s/state, value=%u", dev->nodename, XenbusStateConnected); free(err); goto error; } return 0; error_postevtchn: mask_evtchn(dev->evtchn); unbind_evtchn(dev->evtchn); error_postmap: gnttab_end_access(dev->ring_ref); free_page(dev->page); error: return -1; }
/** * Transmit function for raw buffers (non-GSO/TCO) */ void netfront_xmit(struct netfront_dev *dev, unsigned char *data, int len) { int flags; struct netif_tx_request *tx; struct net_txbuffer* buf; void* page; #ifdef CONFIG_NETMAP if (dev->netmap) { netmap_netfront_xmit(dev->na, data, len); return; } #endif BUG_ON(len > PAGE_SIZE); if (!netfront_tx_available(dev, 1)) goto out; tx = netfront_get_page(dev); ASSERT(tx != NULL); buf = &dev->tx_buffers[tx->id]; page = buf->page; #ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS tx->gref = buf->gref = gnttab_grant_access(dev->dom, virt_to_mfn(page), 0); BUG_ON(tx->gref == GRANT_INVALID_REF); #endif NETIF_MEMCPY(page, data, len); tx->flags |= (NETTXF_data_validated); tx->size = len; #ifdef CONFIG_NETFRONT_STATS ++dev->txpkts; dev->txbytes += len; #endif netfront_xmit_notify(dev); dprintk("tx: raw %d\n", len); out: local_irq_save(flags); netfront_tx_buf_gc(dev); local_irq_restore(flags); }
void init_rx_buffers(struct netfront_dev *dev) { struct net_rxbuffer* buf; int i, requeue_idx; netif_rx_request_t *req; int notify; /* Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { req = RING_GET_REQUEST(&dev->rx, requeue_idx); #ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS buf = netfront_get_rxbuffer(dev); if (buf == NULL) break; /* out of rx buffers */ dev->rx_buffers[requeue_idx] = buf; #else buf = &dev->rx_buffers[requeue_idx]; #endif buf->gref = req->gref = gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0); BUG_ON(buf->gref == GRANT_INVALID_REF); req->id = requeue_idx; requeue_idx++; } dev->rx.req_prod_pvt = requeue_idx; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify); if (notify) notify_remote_via_evtchn(dev->rx_evtchn); dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1; }
void network_rx(struct netfront_dev *dev) { RING_IDX rp,cons,req_prod; struct netif_rx_response *rx; int nr_consumed, some, more, i, notify; moretodo: rp = dev->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ cons = dev->rx.rsp_cons; nr_consumed = 0; some = 0; while ((cons != rp) && !some) { struct net_buffer* buf; unsigned char* page; int id; rx = RING_GET_RESPONSE(&dev->rx, cons); if (rx->flags & NETRXF_extra_info) { printk("+++++++++++++++++++++ we have extras!\n"); continue; } if (rx->status == NETIF_RSP_NULL) continue; id = rx->id; BUG_ON(id >= NET_TX_RING_SIZE); buf = &dev->rx_buffers[id]; page = (unsigned char*)buf->page; gnttab_end_access(buf->gref); if(rx->status>0) { #ifdef HAVE_LIBC if (dev->netif_rx == NETIF_SELECT_RX) { int len = rx->status; ASSERT(current == main_thread); if (len > dev->len) len = dev->len; memcpy(dev->data, page+rx->offset, len); dev->rlen = len; some = 1; } else #endif dev->netif_rx(page+rx->offset,rx->status); } nr_consumed++; ++cons; } dev->rx.rsp_cons=cons; RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more); if(more && !some) goto moretodo; req_prod = dev->rx.req_prod_pvt; for(i=0; i<nr_consumed; i++) { int id = xennet_rxidx(req_prod + i); netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i); struct net_buffer* buf = &dev->rx_buffers[id]; void* page = buf->page; /* We are sure to have free gnttab entries since they got released above */ buf->gref = req->gref = gnttab_grant_access(dev->dom,virt_to_mfn(page),0); req->id = id; } wmb(); dev->rx.req_prod_pvt = req_prod + i; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify); if (notify) notify_remote_via_evtchn(dev->evtchn); }
static inline struct netif_tx_request *netfront_make_txreqs(struct netfront_dev *dev, struct netif_tx_request *tx, struct pbuf *p, int *slots) { struct netif_tx_request *first_tx = tx; struct net_txbuffer *buf; struct pbuf *first_p = p; struct pbuf *q; unsigned long tot_len; unsigned long s; void *page; int q_slots; size_t plen, left; tot_len = 0; buf = &dev->tx_buffers[tx->id]; /* map pages of pbuf */ for (q = p; q != NULL; q = q->next) { left = q->len; q_slots = (int) _count_pages(q->payload, q->len); /* grant pages of pbuf */ for (s = 0; s < q_slots; ++s) { /* read only mapping */ page = (void *)((((unsigned long) q->payload) & PAGE_MASK) + (s * PAGE_SIZE)); tx->gref = buf->gref = gnttab_grant_access(dev->dom, virtual_to_mfn(page), 0); BUG_ON(tx->gref == GRANT_INVALID_REF); if (s == 0) /* first slot */ tx->offset = ((unsigned long) q->payload) & ~PAGE_MASK; else tx->offset = 0; if ((s + 1) == q_slots) /* last slot */ tx->size = ((((unsigned long) q->payload) + q->len) & ~PAGE_MASK) - tx->offset; else tx->size = PAGE_SIZE - tx->offset; tot_len += tx->size; if ((s + 1) < q_slots || q->next != NULL) { /* there will be a follow-up slot */ tx->flags |= NETTXF_more_data; tx = netfront_get_page(dev); /* next slot */ BUG_ON(tx == NULL); /* out of memory -> this should have been catched before calling this function */ (*slots)++; buf = &dev->tx_buffers[tx->id]; } } } /* * The first fragment has the entire packet * size, subsequent fragments have just the * fragment size. The backend works out the * true size of the first fragment by * subtracting the sizes of the other * fragments. */ BUG_ON(first_p->tot_len != tot_len); /* broken pbuf?! */ first_tx->size = tot_len; pbuf_ref(first_p); /* increase ref count */ buf->pbuf = first_p; /* remember chain for later release on last buf */ return tx; }
struct netfront_dev *init_netfront(char *_nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip) { xenbus_transaction_t xbt; char* err; char* message=NULL; struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int retry=0; int i; char* msg; char nodename[256]; char path[256]; struct netfront_dev *dev; static int netfrontends = 0; if (!_nodename) snprintf(nodename, sizeof(nodename), "device/vif/%d", netfrontends); else strncpy(nodename, _nodename, strlen(nodename)); netfrontends++; if (!thenetif_rx) thenetif_rx = netif_rx; printk("************************ NETFRONT for %s **********\n\n\n", nodename); dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); #ifdef HAVE_LIBC dev->fd = -1; #endif printk("net TX ring size %d\n", NET_TX_RING_SIZE); printk("net RX ring size %d\n", NET_RX_RING_SIZE); init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE); for(i=0;i<NET_TX_RING_SIZE;i++) { add_id_to_freelist(i,dev->tx_freelist); dev->tx_buffers[i].page = NULL; } for(i=0;i<NET_RX_RING_SIZE;i++) { /* TODO: that's a lot of memory */ dev->rx_buffers[i].page = (char*)alloc_page(); } snprintf(path, sizeof(path), "%s/backend-id", nodename); dev->dom = xenbus_read_integer(path); #ifdef HAVE_LIBC if (thenetif_rx == NETIF_SELECT_RX) evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->evtchn); else #endif evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->evtchn); txs = (struct netif_tx_sring *) alloc_page(); rxs = (struct netif_rx_sring *) alloc_page(); memset(txs,0,PAGE_SIZE); memset(rxs,0,PAGE_SIZE); SHARED_RING_INIT(txs); SHARED_RING_INIT(rxs); FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE); FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE); dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0); dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0); init_rx_buffers(dev); dev->netif_rx = thenetif_rx; dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); } err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u", dev->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u", dev->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: xenbus_transaction_end(xbt, 1, &retry); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); snprintf(path, sizeof(path), "%s/mac", nodename); msg = xenbus_read(XBT_NIL, path, &dev->mac); if ((dev->backend == NULL) || (dev->mac == NULL)) { printk("%s: backend/mac failed\n", __func__); goto error; } printk("backend at %s\n",dev->backend); printk("mac is %s\n",dev->mac); { XenbusState state; char path[strlen(dev->backend) + 1 + 5 + 1]; snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); err = NULL; state = xenbus_read_integer(path); while (err == NULL && state < XenbusStateConnected) err = xenbus_wait_for_state_change(path, &state, &dev->events); if (state != XenbusStateConnected) { printk("backend not avalable, state=%d\n", state); xenbus_unwatch_path(XBT_NIL, path); goto error; } if (ip) { snprintf(path, sizeof(path), "%s/ip", dev->backend); xenbus_read(XBT_NIL, path, ip); } } printk("**************************\n"); unmask_evtchn(dev->evtchn); /* Special conversion specifier 'hh' needed for __ia64__. Without this mini-os panics with 'Unaligned reference'. */ if (rawmac) sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &rawmac[0], &rawmac[1], &rawmac[2], &rawmac[3], &rawmac[4], &rawmac[5]); return dev; error: free_netfront(dev); return NULL; }
struct consfront_dev *init_consfront(char *_nodename) { xenbus_transaction_t xbt; char* err; char* message=NULL; int retry=0; char* msg = NULL; char nodename[256]; char path[256]; static int consfrontends = 3; struct consfront_dev *dev; int res; if (!_nodename) snprintf(nodename, sizeof(nodename), "device/console/%d", consfrontends); else strncpy(nodename, _nodename, sizeof(nodename)); printk("******************* CONSFRONT for %s **********\n\n\n", nodename); consfrontends++; dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); #ifdef HAVE_LIBC dev->fd = -1; #endif snprintf(path, sizeof(path), "%s/backend-id", nodename); if ((res = xenbus_read_integer(path)) < 0) return NULL; else dev->dom = res; evtchn_alloc_unbound(dev->dom, handle_input, dev, &dev->evtchn); dev->ring = (struct xencons_interface *) alloc_page(); memset(dev->ring, 0, PAGE_SIZE); dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_mfn(dev->ring), 0); dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, nodename, "ring-ref","%u", dev->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "port", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "type", "%s", "ioemu"); if (err) { message = "writing type"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); if (err) free(err); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); if (msg) { printk("Error %s when reading the backend path %s\n", msg, path); goto error; } printk("backend at %s\n", dev->backend); { XenbusState state; char path[strlen(dev->backend) + 1 + 19 + 1]; snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); msg = NULL; state = xenbus_read_integer(path); while (msg == NULL && state < XenbusStateConnected) msg = xenbus_wait_for_state_change(path, &state, &dev->events); if (msg != NULL || state != XenbusStateConnected) { printk("backend not available, state=%d\n", state); xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } } unmask_evtchn(dev->evtchn); printk("**************************\n"); return dev; error: free(msg); free(err); free_consfront(dev); return NULL; }
struct blkfront_dev *init_blkfront(char *_nodename, struct blkfront_info *info) { xenbus_transaction_t xbt; char* err; char* message=NULL; struct blkif_sring *s; int retry=0; char* msg = NULL; char* c; char* nodename = _nodename ? _nodename : "device/vbd/768"; struct blkfront_dev *dev; char path[strlen(nodename) + strlen("/backend-id") + 1]; printk("******************* BLKFRONT for %s **********\n\n\n", nodename); dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); #ifdef HAVE_LIBC dev->fd = -1; #endif snprintf(path, sizeof(path), "%s/backend-id", nodename); dev->dom = xenbus_read_integer(path); evtchn_alloc_unbound(dev->dom, blkfront_handler, dev, &dev->evtchn); s = (struct blkif_sring*) alloc_page(); memset(s,0,PAGE_SIZE); SHARED_RING_INIT(s); FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE); dev->ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(s),0); dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, nodename, "ring-ref","%u", dev->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); free(err); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); printk("Abort transaction %s\n", message); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); if (msg) { printk("Error %s when reading the backend path %s\n", msg, path); goto error; } printk("backend at %s\n", dev->backend); dev->handle = strtoul(strrchr(nodename, '/')+1, NULL, 0); { XenbusState state; char path[strlen(dev->backend) + strlen("/feature-flush-cache") + 1]; snprintf(path, sizeof(path), "%s/mode", dev->backend); msg = xenbus_read(XBT_NIL, path, &c); if (msg) { printk("Error %s when reading the mode\n", msg); goto error; } if (*c == 'w') dev->info.mode = O_RDWR; else dev->info.mode = O_RDONLY; free(c); snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); msg = NULL; state = xenbus_read_integer(path); while (msg == NULL && state < XenbusStateConnected) msg = xenbus_wait_for_state_change(path, &state, &dev->events); if (msg != NULL || state != XenbusStateConnected) { printk("backend not available, state=%d\n", state); xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } snprintf(path, sizeof(path), "%s/info", dev->backend); dev->info.info = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/sectors", dev->backend); // FIXME: read_integer returns an int, so disk size limited to 1TB for now dev->info.sectors = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/sector-size", dev->backend); dev->info.sector_size = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/feature-barrier", dev->backend); dev->info.barrier = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/feature-flush-cache", dev->backend); dev->info.flush = xenbus_read_integer(path); *info = dev->info; } unmask_evtchn(dev->evtchn); printk("%lu sectors of %u bytes\n", (unsigned long) dev->info.sectors, dev->info.sector_size); printk("**************************\n"); return dev; error: free(msg); free(err); free_blkfront(dev); return NULL; }
struct pcifront_dev *init_pcifront(char *_nodename) { xenbus_transaction_t xbt; char* err; char* message=NULL; int retry=0; char* msg = NULL; char* nodename = _nodename ? _nodename : "device/pci/0"; int dom; struct pcifront_dev *dev; char path[strlen(nodename) + strlen("/backend-id") + 1]; if (!_nodename && pcidev) return pcidev; printk("******************* PCIFRONT for %s **********\n\n\n", nodename); snprintf(path, sizeof(path), "%s/backend-id", nodename); dom = xenbus_read_integer(path); if (dom == -1) { printk("no backend\n"); return NULL; } dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); dev->dom = dom; evtchn_alloc_unbound(dev->dom, pcifront_handler, dev, &dev->evtchn); dev->info = (struct xen_pci_sharedinfo*) alloc_page(); memset(dev->info,0,PAGE_SIZE); dev->info_ref = gnttab_grant_access(dev->dom,virt_to_mfn(dev->info),0); dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, nodename, "pci-op-ref","%u", dev->info_ref); if (err) { message = "writing pci-op-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "magic", XEN_PCI_MAGIC); if (err) { message = "writing magic"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateInitialised); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); free(err); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); printk("Abort transaction %s\n", message); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); if (msg) { printk("Error %s when reading the backend path %s\n", msg, path); goto error; } printk("backend at %s\n", dev->backend); { char path[strlen(dev->backend) + strlen("/state") + 1]; char frontpath[strlen(nodename) + strlen("/state") + 1]; XenbusState state; snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); err = NULL; state = xenbus_read_integer(path); while (err == NULL && state < XenbusStateConnected) err = xenbus_wait_for_state_change(path, &state, &dev->events); if (state != XenbusStateConnected) { printk("backend not avalable, state=%d\n", state); free(err); err = xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } snprintf(frontpath, sizeof(frontpath), "%s/state", nodename); if ((err = xenbus_switch_state(XBT_NIL, frontpath, XenbusStateConnected)) != NULL) { printk("error switching state %s\n", err); free(err); err = xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } } unmask_evtchn(dev->evtchn); printk("**************************\n"); if (!_nodename) pcidev = dev; return dev; error: free(msg); free(err); free_pcifront(dev); return NULL; }
static struct netfront_dev *_init_netfront(struct netfront_dev *dev, unsigned char rawmac[6], char **ip) { xenbus_transaction_t xbt; char* err = NULL; const char* message=NULL; struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int feature_split_evtchn; int retry=0; int i; char* msg = NULL; char path[256]; snprintf(path, sizeof(path), "%s/backend-id", dev->nodename); dev->dom = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/backend", dev->nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); snprintf(path, sizeof(path), "%s/mac", dev->nodename); msg = xenbus_read(XBT_NIL, path, &dev->mac); if ((dev->backend == NULL) || (dev->mac == NULL)) { printk("%s: backend/mac failed\n", __func__); goto error; } #ifdef CONFIG_NETMAP snprintf(path, sizeof(path), "%s/feature-netmap", dev->backend); dev->netmap = xenbus_read_integer(path) > 0 ? 1 : 0; if (dev->netmap) { dev->na = init_netfront_netmap(dev, dev->netif_rx); goto skip; } #endif /* Check feature-split-event-channels */ snprintf(path, sizeof(path), "%s/feature-split-event-channels", dev->backend); feature_split_evtchn = xenbus_read_integer(path) > 0 ? 1 : 0; #ifdef HAVE_LIBC /* Force the use of a single event channel */ if (dev->netif_rx == NETIF_SELECT_RX) feature_split_evtchn = 0; #endif printk("************************ NETFRONT for %s **********\n\n\n", dev->nodename); init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE); for(i=0;i<NET_TX_RING_SIZE;i++) { add_id_to_freelist(i,dev->tx_freelist); #if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY dev->tx_buffers[i].page = (void*)alloc_page(); BUG_ON(dev->tx_buffers[i].page == NULL); #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS dev->tx_buffers[i].gref = gnttab_grant_access(dev->dom, virt_to_mfn(dev->tx_buffers[i].page), 0); BUG_ON(dev->tx_buffers[i].gref == GRANT_INVALID_REF); dprintk("tx[%d]: page = %p, gref=0x%x\n", i, dev->tx_buffers[i].page, dev->tx_buffers[i].gref); #endif #endif } #if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY printk("net TX ring size %d, %lu KB\n", NET_TX_RING_SIZE, (unsigned long)(NET_TX_RING_SIZE * PAGE_SIZE)/1024); #else printk("net TX ring size %d\n", NET_TX_RING_SIZE); #endif #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS for(i=0;i<NET_RX_RING_SIZE;i++) { /* TODO: that's a lot of memory */ dev->rx_buffers[i].page = (void*)alloc_page(); BUG_ON(dev->rx_buffers[i].page == NULL); dprintk("rx[%d]: page = %p\n", i, dev->rx_buffers[i].page); } printk("net RX ring size %d, %lu KB\n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_RING_SIZE * PAGE_SIZE)/1024); #else for(i=0;i<NET_RX_RING_SIZE;i++) dev->rx_buffers[i] = NULL; for(i=0;i<NET_RX_BUFFERS;i++) { /* allocate rx buffer pool */ dev->rx_buffer_pool[i].page = (void*)alloc_page(); BUG_ON(dev->rx_buffer_pool[i].page == NULL); dprintk("rx[%d]: page = %p\n", i, dev->rx_buffer_pool[i].page); add_id_to_freelist(i,dev->rx_freelist); } dev->rx_avail = NET_RX_BUFFERS; printk("net RX ring size %d, %lu KB buffer space\n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_BUFFERS * PAGE_SIZE)/1024); #endif if (feature_split_evtchn) { evtchn_alloc_unbound(dev->dom, netfront_tx_handler, dev, &dev->tx_evtchn); evtchn_alloc_unbound(dev->dom, netfront_rx_handler, dev, &dev->rx_evtchn); printk("split event channels enabled\n"); } else { #ifdef HAVE_LIBC if (dev->netif_rx == NETIF_SELECT_RX) evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->tx_evtchn); else #endif evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->tx_evtchn); dev->rx_evtchn = dev->tx_evtchn; } #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS printk("persistent grants enabled\n"); #endif txs = (struct netif_tx_sring *) alloc_page(); rxs = (struct netif_rx_sring *) alloc_page(); memset(txs,0,PAGE_SIZE); memset(rxs,0,PAGE_SIZE); SHARED_RING_INIT(txs); SHARED_RING_INIT(rxs); FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE); FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE); dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0); BUG_ON(dev->tx_ring_ref == GRANT_INVALID_REF); dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0); BUG_ON(dev->rx_ring_ref == GRANT_INVALID_REF); init_rx_buffers(dev); dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", dev->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", dev->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } if (feature_split_evtchn) { err = xenbus_printf(xbt, dev->nodename, "event-channel-tx", "%u", dev->tx_evtchn); if (err) { message = "writing event-channel-tx"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel-rx", "%u", dev->rx_evtchn); if (err) { message = "writing event-channel-rx"; goto abort_transaction; } } else { err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", dev->tx_evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%u", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) { message = "writing feature-persistent"; goto abort_transaction; } #endif err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } #if defined(CONFIG_NETFRONT_GSO) && defined(HAVE_LWIP) err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%u", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%u", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", "%u", 1); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } #endif snprintf(path, sizeof(path), "%s/state", dev->nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); free(err); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); printk("Abort transaction %s\n", message); goto error; done: snprintf(path, sizeof(path), "%s/mac", dev->nodename); msg = xenbus_read(XBT_NIL, path, &dev->mac); if (dev->mac == NULL) { printk("%s: backend/mac failed\n", __func__); goto error; } printk("backend at %s\n",dev->backend); printk("mac is %s\n",dev->mac); { XenbusState state; char path[strlen(dev->backend) + strlen("/state") + 1]; snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); err = NULL; state = xenbus_read_integer(path); while (err == NULL && state < XenbusStateConnected) err = xenbus_wait_for_state_change(path, &state, &dev->events); if (state != XenbusStateConnected) { printk("backend not avalable, state=%d\n", state); xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } if (ip) { snprintf(path, sizeof(path), "%s/ip", dev->backend); xenbus_read(XBT_NIL, path, ip); } } printk("**************************\n"); unmask_evtchn(dev->tx_evtchn); if (feature_split_evtchn) unmask_evtchn(dev->rx_evtchn); #ifdef CONFIG_NETMAP skip: if (dev->netmap) connect_netfront(dev); #endif /* Special conversion specifier 'hh' needed for __ia64__. Without this mini-os panics with 'Unaligned reference'. */ if (rawmac) sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &rawmac[0], &rawmac[1], &rawmac[2], &rawmac[3], &rawmac[4], &rawmac[5]); #ifdef CONFIG_SELECT_POLL dev->fd = alloc_fd(FTYPE_TAP); files[dev->fd].read = 0; #endif #ifdef CONFIG_NETFRONT_STATS netfront_reset_txcounters(dev); #endif return dev; error: free(msg); free(err); free_netfront(dev); return NULL; }