static int setup_rings(struct xenbus_device *dev, struct usbfront_info *info) { usbif_urb_sring_t *urb_sring; usbif_conn_sring_t *conn_sring; int err; info->urb_ring_ref = GRANT_INVALID_REF; info->conn_ring_ref = GRANT_INVALID_REF; urb_sring = (usbif_urb_sring_t *)get_zeroed_page(GFP_NOIO|__GFP_HIGH); if (!urb_sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring"); return -ENOMEM; } SHARED_RING_INIT(urb_sring); FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(info->urb_ring.sring)); if (err < 0) { free_page((unsigned long)urb_sring); info->urb_ring.sring = NULL; goto fail; } info->urb_ring_ref = err; conn_sring = (usbif_conn_sring_t *)get_zeroed_page(GFP_NOIO|__GFP_HIGH); if (!conn_sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring"); return -ENOMEM; } SHARED_RING_INIT(conn_sring); FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(info->conn_ring.sring)); if (err < 0) { free_page((unsigned long)conn_sring); info->conn_ring.sring = NULL; goto fail; } info->conn_ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, xenhcd_int, SA_SAMPLE_RANDOM, "usbif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: destroy_rings(info); return err; }
static int ctl_close(struct tdlog_state* s) { while (s->connected) { s->connected--; tapdisk_server_unregister_event(s->connections[s->connected].id); close(s->connections[s->connected].fd); s->connections[s->connected].fd = -1; s->connections[s->connected].id = 0; } if (s->ctl.fd >= 0) { tapdisk_server_unregister_event(s->ctl.id); close(s->ctl.fd); s->ctl.fd = -1; s->ctl.id = 0; } if (s->ctlpath) { unlink(s->ctlpath); free(s->ctlpath); s->ctlpath = NULL; } /* XXX this must be fixed once requests are actually in flight */ /* could just drain the existing ring here first */ if (s->sring) { SHARED_RING_INIT(s->sring); BACK_RING_INIT(&s->bring, s->sring, SRINGSIZE); } return 0; }
static FORCEINLINE NTSTATUS __AllocRing( IN PXENVBD_FRONTEND Frontend ) { NTSTATUS Status; ULONG RingRef; PFN_NUMBER Pfn; // SharedRing ASSERT3P(Frontend->SharedRing, ==, NULL); Frontend->SharedRing = __FrontendAlloc(PAGE_SIZE); Status = STATUS_INSUFFICIENT_RESOURCES; if (!Frontend->SharedRing) goto fail1; #pragma warning(push) #pragma warning(disable: 4305) #pragma warning(disable: 4311) SHARED_RING_INIT(Frontend->SharedRing); FRONT_RING_INIT(&Frontend->FrontRing, Frontend->SharedRing, PAGE_SIZE); #pragma warning (pop) // GNTTAB Pfn = __VirtToPfn(Frontend->SharedRing); Status = GnttabGet(&RingRef); if (!NT_SUCCESS(Status)) goto fail2; GnttabPermitForeignAccess(RingRef, Frontend->BackendId, Pfn, FALSE); Frontend->RingGrantRef = RingRef; // EVTCHN Status = EventChannelAllocate(Frontend->BackendId, &Frontend->EvtchnPort); if (!NT_SUCCESS(Status)) goto fail3; return STATUS_SUCCESS; fail3: LogError("Fail3\n"); if (Frontend->RingGrantRef != 0) { GnttabRevokeForeignAccess(Frontend->RingGrantRef); GnttabPut(Frontend->RingGrantRef); Frontend->RingGrantRef = 0; } fail2: LogError("Fail2\n"); RtlZeroMemory(&Frontend->FrontRing, sizeof(Frontend->FrontRing)); __FrontendFree(Frontend->SharedRing); Frontend->SharedRing = NULL; fail1: LogError("Fail1 (%08x)\n", Status); return Status; }
int gnt_init(void) { int mfn; int err; struct as_sring *sring; struct evtchn_alloc_unbound alloc_unbound; printk(KERN_INFO "gnt_init\n"); page = __get_free_pages(GFP_KERNEL, 0); if (page == 0) { printk(KERN_DEBUG "\nxen:DomU:could not get free page"); return 0; } sring = (struct as_sring *)page; SHARED_RING_INIT(sring); FRONT_RING_INIT(&(info.ring), sring, PAGE_SIZE); mfn = virt_to_mfn(page); printk(KERN_INFO "grant foreign access\n"); info.gref = gnttab_grant_foreign_access(DOM0_ID, mfn, 0); if (info.gref < 0) { printk(KERN_DEBUG "\nxen:could not grant foreign access"); free_page((unsigned long)page); info.ring.sring = NULL; return 0; } printk(KERN_DEBUG "\n gref = %d", info.gref); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOM0_ID; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { printk(KERN_DEBUG "\nalloc unbound port failure"); return err; } err = bind_evtchn_to_irqhandler(alloc_unbound.port, as_int, 0, "xen-eg", &info); if (err < 0) { printk(KERN_DEBUG "\nbind evtchn to irqhandler failure"); return err; } info.irq = err; info.port = alloc_unbound.port; printk(KERN_DEBUG " interrupt = %d, local_port = %d", info.irq, info.port); printk("...\n..."); create_procfs_entry(); return 0; }
/* * xpvtap_user_ring_init() */ static int xpvtap_user_ring_init(xpvtap_state_t *state) { xpvtap_user_ring_t *usring; usring = &state->bt_user_ring; /* alocate and initialize the page for the shared user ring */ usring->ur_sring = (blkif_sring_t *)ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP, &usring->ur_cookie); SHARED_RING_INIT(usring->ur_sring); FRONT_RING_INIT(&usring->ur_ring, usring->ur_sring, PAGESIZE); usring->ur_prod_polled = 0; return (DDI_SUCCESS); }
static int blktap_ring_mmap_sring(struct blktap *tap, struct vm_area_struct *vma) { struct blktap_ring *ring = &tap->ring; struct blktap_sring *sring; struct page *page = NULL; int err; if (ring->vma) return -EBUSY; page = alloc_page(GFP_KERNEL|__GFP_ZERO); if (!page) return -ENOMEM; SetPageReserved(page); err = vm_insert_page(vma, vma->vm_start, page); if (err) goto fail; sring = page_address(page); SHARED_RING_INIT(sring); FRONT_RING_INIT(&ring->ring, sring, PAGE_SIZE); ring->ring_vstart = vma->vm_start; ring->user_vstart = ring->ring_vstart + PAGE_SIZE; vma->vm_private_data = tap; vma->vm_flags |= VM_DONTCOPY; vma->vm_flags |= VM_RESERVED; vma->vm_ops = &blktap_ring_vm_operations; ring->vma = vma; return 0; fail: if (page) { ClearPageReserved(page); __free_page(page); } return err; }
static int setup_ixpring(struct xenbus_device *dev, struct ixpfront_info *info) { struct ixp_sring *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (struct ixp_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, ixp_interrupt, IRQF_SAMPLE_RANDOM, "ixp", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler failed"); goto fail; } info->irq = err; return 0; fail: ixp_free(info, 0); return err; }
struct netfront_dev *init_netfront(char *_nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip) { xenbus_transaction_t xbt; char* err; char* message=NULL; struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int retry=0; int i; char* msg; char nodename[256]; char path[256]; struct netfront_dev *dev; static int netfrontends = 0; if (!_nodename) snprintf(nodename, sizeof(nodename), "device/vif/%d", netfrontends); else strncpy(nodename, _nodename, strlen(nodename)); netfrontends++; if (!thenetif_rx) thenetif_rx = netif_rx; printk("************************ NETFRONT for %s **********\n\n\n", nodename); dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); #ifdef HAVE_LIBC dev->fd = -1; #endif printk("net TX ring size %d\n", NET_TX_RING_SIZE); printk("net RX ring size %d\n", NET_RX_RING_SIZE); init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE); for(i=0;i<NET_TX_RING_SIZE;i++) { add_id_to_freelist(i,dev->tx_freelist); dev->tx_buffers[i].page = NULL; } for(i=0;i<NET_RX_RING_SIZE;i++) { /* TODO: that's a lot of memory */ dev->rx_buffers[i].page = (char*)alloc_page(); } snprintf(path, sizeof(path), "%s/backend-id", nodename); dev->dom = xenbus_read_integer(path); #ifdef HAVE_LIBC if (thenetif_rx == NETIF_SELECT_RX) evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->evtchn); else #endif evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->evtchn); txs = (struct netif_tx_sring *) alloc_page(); rxs = (struct netif_rx_sring *) alloc_page(); memset(txs,0,PAGE_SIZE); memset(rxs,0,PAGE_SIZE); SHARED_RING_INIT(txs); SHARED_RING_INIT(rxs); FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE); FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE); dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0); dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0); init_rx_buffers(dev); dev->netif_rx = thenetif_rx; dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); } err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u", dev->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u", dev->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: xenbus_transaction_end(xbt, 1, &retry); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); snprintf(path, sizeof(path), "%s/mac", nodename); msg = xenbus_read(XBT_NIL, path, &dev->mac); if ((dev->backend == NULL) || (dev->mac == NULL)) { printk("%s: backend/mac failed\n", __func__); goto error; } printk("backend at %s\n",dev->backend); printk("mac is %s\n",dev->mac); { XenbusState state; char path[strlen(dev->backend) + 1 + 5 + 1]; snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); err = NULL; state = xenbus_read_integer(path); while (err == NULL && state < XenbusStateConnected) err = xenbus_wait_for_state_change(path, &state, &dev->events); if (state != XenbusStateConnected) { printk("backend not avalable, state=%d\n", state); xenbus_unwatch_path(XBT_NIL, path); goto error; } if (ip) { snprintf(path, sizeof(path), "%s/ip", dev->backend); xenbus_read(XBT_NIL, path, ip); } } printk("**************************\n"); unmask_evtchn(dev->evtchn); /* Special conversion specifier 'hh' needed for __ia64__. Without this mini-os panics with 'Unaligned reference'. */ if (rawmac) sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &rawmac[0], &rawmac[1], &rawmac[2], &rawmac[3], &rawmac[4], &rawmac[5]); return dev; error: free_netfront(dev); return NULL; }
struct blkfront_dev *init_blkfront(char *_nodename, struct blkfront_info *info) { xenbus_transaction_t xbt; char* err; char* message=NULL; struct blkif_sring *s; int retry=0; char* msg = NULL; char* c; char* nodename = _nodename ? _nodename : "device/vbd/768"; struct blkfront_dev *dev; char path[strlen(nodename) + strlen("/backend-id") + 1]; printk("******************* BLKFRONT for %s **********\n\n\n", nodename); dev = malloc(sizeof(*dev)); memset(dev, 0, sizeof(*dev)); dev->nodename = strdup(nodename); #ifdef HAVE_LIBC dev->fd = -1; #endif snprintf(path, sizeof(path), "%s/backend-id", nodename); dev->dom = xenbus_read_integer(path); evtchn_alloc_unbound(dev->dom, blkfront_handler, dev, &dev->evtchn); s = (struct blkif_sring*) alloc_page(); memset(s,0,PAGE_SIZE); SHARED_RING_INIT(s); FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE); dev->ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(s),0); dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, nodename, "ring-ref","%u", dev->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } snprintf(path, sizeof(path), "%s/state", nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); free(err); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); printk("Abort transaction %s\n", message); goto error; done: snprintf(path, sizeof(path), "%s/backend", nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); if (msg) { printk("Error %s when reading the backend path %s\n", msg, path); goto error; } printk("backend at %s\n", dev->backend); dev->handle = strtoul(strrchr(nodename, '/')+1, NULL, 0); { XenbusState state; char path[strlen(dev->backend) + strlen("/feature-flush-cache") + 1]; snprintf(path, sizeof(path), "%s/mode", dev->backend); msg = xenbus_read(XBT_NIL, path, &c); if (msg) { printk("Error %s when reading the mode\n", msg); goto error; } if (*c == 'w') dev->info.mode = O_RDWR; else dev->info.mode = O_RDONLY; free(c); snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); msg = NULL; state = xenbus_read_integer(path); while (msg == NULL && state < XenbusStateConnected) msg = xenbus_wait_for_state_change(path, &state, &dev->events); if (msg != NULL || state != XenbusStateConnected) { printk("backend not available, state=%d\n", state); xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } snprintf(path, sizeof(path), "%s/info", dev->backend); dev->info.info = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/sectors", dev->backend); // FIXME: read_integer returns an int, so disk size limited to 1TB for now dev->info.sectors = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/sector-size", dev->backend); dev->info.sector_size = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/feature-barrier", dev->backend); dev->info.barrier = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/feature-flush-cache", dev->backend); dev->info.flush = xenbus_read_integer(path); *info = dev->info; } unmask_evtchn(dev->evtchn); printk("%lu sectors of %u bytes\n", (unsigned long) dev->info.sectors, dev->info.sector_size); printk("**************************\n"); return dev; error: free(msg); free(err); free_blkfront(dev); return NULL; }
NTSTATUS XenUsb_Connect(PVOID context, BOOLEAN suspend) { NTSTATUS status; PXENUSB_DEVICE_DATA xudd = context; PFN_NUMBER pfn; ULONG i; if (!suspend) { xudd->handle = XnOpenDevice(xudd->pdo, XenUsb_DeviceCallback, xudd); } if (!xudd->handle) { FUNCTION_MSG("Cannot open Xen device\n"); return STATUS_UNSUCCESSFUL; } if (xudd->device_state != DEVICE_STATE_INACTIVE) { for (i = 0; i <= 5 && xudd->backend_state != XenbusStateInitialising && xudd->backend_state != XenbusStateInitWait && xudd->backend_state != XenbusStateInitialised; i++) { FUNCTION_MSG("Waiting for XenbusStateInitXxx\n"); if (xudd->backend_state == XenbusStateClosed) { status = XnWriteInt32(xudd->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising); } KeWaitForSingleObject(&xudd->backend_event, Executive, KernelMode, FALSE, NULL); } if (xudd->backend_state != XenbusStateInitialising && xudd->backend_state != XenbusStateInitWait && xudd->backend_state != XenbusStateInitialised) { FUNCTION_MSG("Backend state timeout\n"); return STATUS_UNSUCCESSFUL; } if (!NT_SUCCESS(status = XnBindEvent(xudd->handle, &xudd->event_channel, XenUsb_HandleEvent_DIRQL, xudd))) { FUNCTION_MSG("Cannot allocate event channel\n"); return STATUS_UNSUCCESSFUL; } FUNCTION_MSG("event_channel = %d\n", xudd->event_channel); status = XnWriteInt32(xudd->handle, XN_BASE_FRONTEND, "event-channel", xudd->event_channel); xudd->urb_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENUSB_POOL_TAG); if (!xudd->urb_sring) { FUNCTION_MSG("Cannot allocate urb_sring\n"); return STATUS_UNSUCCESSFUL; } SHARED_RING_INIT(xudd->urb_sring); FRONT_RING_INIT(&xudd->urb_ring, xudd->urb_sring, PAGE_SIZE); pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xudd->urb_sring).QuadPart >> PAGE_SHIFT); FUNCTION_MSG("usb sring pfn = %d\n", (ULONG)pfn); xudd->urb_sring_gref = XnGrantAccess(xudd->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENUSB_POOL_TAG); FUNCTION_MSG("usb sring_gref = %d\n", xudd->urb_sring_gref); status = XnWriteInt32(xudd->handle, XN_BASE_FRONTEND, "urb-ring-ref", xudd->urb_sring_gref); xudd->conn_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENUSB_POOL_TAG); if (!xudd->conn_sring) { FUNCTION_MSG("Cannot allocate conn_sring\n"); return STATUS_UNSUCCESSFUL; } SHARED_RING_INIT(xudd->conn_sring); FRONT_RING_INIT(&xudd->conn_ring, xudd->conn_sring, PAGE_SIZE); pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xudd->conn_sring).QuadPart >> PAGE_SHIFT); FUNCTION_MSG("conn sring pfn = %d\n", (ULONG)pfn); xudd->conn_sring_gref = XnGrantAccess(xudd->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENUSB_POOL_TAG); FUNCTION_MSG("conn sring_gref = %d\n", xudd->conn_sring_gref); status = XnWriteInt32(xudd->handle, XN_BASE_FRONTEND, "conn-ring-ref", xudd->conn_sring_gref); /* fill conn ring with requests */ for (i = 0; i < USB_CONN_RING_SIZE; i++) { usbif_conn_request_t *req = RING_GET_REQUEST(&xudd->conn_ring, i); req->id = (uint16_t)i; } xudd->conn_ring.req_prod_pvt = i; status = XnWriteInt32(xudd->handle, XN_BASE_FRONTEND, "state", XenbusStateConnected); for (i = 0; i <= 5 && xudd->backend_state != XenbusStateConnected; i++) { FUNCTION_MSG("Waiting for XenbusStateConnected\n"); KeWaitForSingleObject(&xudd->backend_event, Executive, KernelMode, FALSE, NULL); } if (xudd->backend_state != XenbusStateConnected) { FUNCTION_MSG("Backend state timeout\n"); return STATUS_UNSUCCESSFUL; } xudd->device_state = DEVICE_STATE_ACTIVE; } return STATUS_SUCCESS; }
static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, SA_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev->dev.driver_data = info; info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev->dev.driver_data; DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev->dev.driver_data; DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; MODULE_ALIAS("xen:vscsi"); static struct xenbus_driver scsifront_driver = { .name = "vscsi", .owner = THIS_MODULE, .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); }
FILE *profile_fopen(const char *fname, const char *mode) { char *key = NULL, *val = NULL, *rsp = NULL, *domStr = NULL, *diskname = NULL; uint32_t req, rsptype, rsplen, domId; XenStorePaths *xsp = NULL; uint64_t store_mptr; FILE *retval = NULL; int vallen; long res; if(strncmp(mode, "w", 1) != 0) goto fail; if(strncmp(fname, "HaLVM.prof", 11) == 0) diskname = "xvdp1"; if(strncmp(fname, "HaLVM.hp", 9) == 0) diskname = "xvdp2"; if(!diskname) goto fail; store_mptr = (uint64_t)system_start_info->store_mfn << 12; unmask_channel(system_start_info->store_evtchn); xsint = (struct xenstore_domain_interface*)machine_to_virtual(store_mptr); if(!xsint) { printf("PROFILING ERROR: Could not map XenStore page.\n"); goto fail; } /* Try to run "ls devices/vbd" */ req = xenstore_write(XS_DIRECTORY, strlen("device/vbd") + 1, "device/vbd"); rsplen = xenstore_read(req, &rsptype, (void**)&rsp); if(rsptype == XS_ERROR) { printf("PROFILING: XenStore read error. Did you forget to add a disk?\n"); goto fail; } if(rsptype != XS_DIRECTORY) { printf("PROFILING: XenStore has gone weird. Giving up.\n"); goto fail; } /* Find the XenStore paths associated with the disk we want */ xsp = find_xs_paths(diskname, rsp, rsplen); if(!xsp) { printf("PROFILING: Couldn't find file to open.\n"); goto fail; } /* Pull out the other's domId */ key = malloc(256); snprintf(key, 256, "%s/backend-id", xsp->feDir); domStr = xenstore_getkey(key); domId = atoi(domStr); /* allocate the return structure and buffers */ retval = malloc(sizeof(FILE)); if(!retval) goto fail; memset(retval, 0, sizeof(FILE)); retval->cur_block_num = 1; retval->block = runtime_alloc(NULL, 4096, PROT_READ|PROT_WRITE); if(!retval->block) goto fail; assert( (((uintptr_t)retval->block) & 4095) == 0 ); retval->ring.sring = runtime_alloc(NULL, 4096, PROT_READ|PROT_WRITE); if(!retval->ring.sring) goto fail; assert( (((uintptr_t)retval->ring.sring) & 4095) == 0 ); SHARED_RING_INIT(retval->ring.sring); FRONT_RING_INIT(&(retval->ring), retval->ring.sring, 4096); /* get the device handle */ snprintf(key, 256, "%s/virtual-device", xsp->feDir); val = xenstore_getkey(key); retval->disk_handle = atoi(val); /* allocate the grant references and event channel */ res = alloc_grant(domId, retval->ring.sring, 4096, 0, &retval->ring_grant); if(res) { printf("PROFILING: Failed to allocate ring grant reference: %d\n", res); goto fail; } res = alloc_grant(domId, retval->block, 4096, 0, &retval->block_grant); if(res) { printf("PROFILING: Failed to allocate block grant reference: %d\n", res); goto fail; } res = channel_alloc(DOMID_SELF, domId); if(res < 0) { printf("PROFILING: Failed to allocate grant reference: %d\n", res); goto fail; } retval->chan = (uint32_t)res; set_c_handler(retval->chan, handler); /* write them into our tree */ val = malloc(256); /* */ snprintf(key, 256, "%s/ring-ref", xsp->feDir); vallen = snprintf(val, 256, "%d", retval->ring_grant); if(!xenstore_setkey(key, val, vallen)) goto fail; /* */ snprintf(key, 256, "%s/event-channel", xsp->feDir); vallen = snprintf(val, 256, "%d", retval->chan); if(!xenstore_setkey(key, val, vallen)) goto fail; /* */ snprintf(key, 256, "%s/state", xsp->feDir); vallen = snprintf(val, 256, "%d", XenbusStateInitialised); if(!xenstore_setkey(key, val, vallen)) goto fail; /* wait for the other side to sync up */ do { char *state; runtime_block(1); snprintf(key, 256, "%s/state", xsp->beDir); state = xenstore_getkey(key); res = atoi(state); free(state); } while(res != XenbusStateConnected); /* write out that we're good */ /* */ snprintf(key, 256, "%s/state", xsp->feDir); vallen = snprintf(val, 256, "%d", XenbusStateConnected); if(!xenstore_setkey(key, val, vallen)) goto fail; return retval; fail: if(key) free(key); if(val) free(val); if(rsp) free(rsp); if(xsp) { free(xsp->feDir); free(xsp->beDir); free(xsp); } if(domStr) free(domStr); if(retval) { if(retval->block_grant) end_grant(retval->block_grant); if(retval->ring_grant) end_grant(retval->ring_grant); if(retval->block) runtime_free(retval->block, 4096); if(retval->ring.sring) runtime_free(retval->ring.sring, 4096); if(retval->chan) channel_close(retval->chan); free(retval); } errno = -EACCES; return NULL; }
void netfe_init(void) { int index = 0; netfe_t **link = &net_front_ends; while (1) { int n; char xs_key[256]; snprintf(xs_key, sizeof(xs_key), "device/vif/%d/backend-id", index); int rs = xenstore_read_int(&n, xs_key); if (rs != 0) break; // FE/(index) is present domid_t backend_id = (domid_t)n; netfe_t *fe = (netfe_t *)mm_alloc_pages(PSIZE(sizeof(netfe_t))); memset(fe, 0, sizeof(*fe)); // setup shared rings fe->rxs = (netif_rx_sring_t *)mm_alloc_page(); assert(fe->rxs != 0); fe->txs = (netif_tx_sring_t *)mm_alloc_page(); assert(fe->txs != 0); SHARED_RING_INIT(fe->rxs); SHARED_RING_INIT(fe->txs); FRONT_RING_INIT(&fe->rx_ring, fe->rxs, PAGE_SIZE); FRONT_RING_INIT(&fe->tx_ring, fe->txs, PAGE_SIZE); grants_allow_access(&fe->rx_ref, backend_id, virt_to_mfn(fe->rxs)); grants_allow_access(&fe->tx_ref, backend_id, virt_to_mfn(fe->txs)); // set up receive buffers for (int i = 0; i < NR_RX_BUFFERS; i++) { fe->rx_buffers[i] = mm_alloc_page(); assert(fe->rx_buffers[i] != 0); unsigned long mfn = virt_to_mfn(fe->rx_buffers[i]); grants_allow_access(&fe->rx_buf_refs[i], backend_id, mfn); } // set up send buffers fe->free_tx_head = NO_TX_BUFFER; for (int i = 0; i < NR_TX_BUFFERS; i++) { fe->tx_buffers[i] = mm_alloc_page(); assert(fe->tx_buffers[i] != 0); unsigned long mfn = virt_to_mfn(fe->tx_buffers[i]); grants_allow_access(&fe->tx_buf_refs[i], backend_id, mfn); fe->free_tx_bufs[i] = fe->free_tx_head; fe->free_tx_head = i; } // set up interrupt fe->evtchn = event_alloc_unbound(backend_id); event_bind(fe->evtchn, netfe_int, (void *)fe); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/rx-ring-ref", index); rs = xenstore_write_uint(xs_key, fe->rx_ref); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/tx-ring-ref", index); rs = xenstore_write_uint(xs_key, fe->tx_ref); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/event-channel", index); rs = xenstore_write_uint(xs_key, fe->evtchn); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/request-rx-copy", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/feature-no-csum-offload", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/feature-rx-notify", index); rs = xenstore_write(xs_key, "1"); assert(rs == 0); snprintf(xs_key, sizeof(xs_key), "device/vif/%d/state", index); rs = xenstore_write(xs_key, "4"); // XenbusStateConnected assert(rs == 0); // read MAC address char buf[64]; snprintf(xs_key, sizeof(xs_key), "device/vif/%d/mac", index); rs = xenstore_read(xs_key, buf, sizeof(buf)); assert(rs == 0); rs = parse_mac(buf, fe->mac); assert(rs == 0); fe->mac_len = ETH_ALEN; printk("\reth%d: MAC %02x:%02x:%02x:%02x:%02x:%02x\r\n", index, fe->mac[0], fe->mac[1], fe->mac[2], fe->mac[3], fe->mac[4], fe->mac[5]); // // Publish EXT_RX_BUFFERS requests only and replenish then to this number // during each interrupt handler invocation. // for (int i = 0; i < EXT_RX_BUFFERS; i++) { netif_rx_request_t *req = RING_GET_REQUEST(&fe->rx_ring, fe->rx_ring.req_prod_pvt); req->id = i; //rx_id++; req->gref = fe->rx_buf_refs[i]; fe->rx_ring.req_prod_pvt++; } RING_PUSH_REQUESTS(&fe->rx_ring); event_kick(fe->evtchn); fe->index = index++; //fe->next = 0; //fe->attached_lwip_netif = 0; //fe->attached_outlet = 0; // add to net_front_ends list *link = fe; link = &fe->next; } num_net_front_ends = index; }
static struct netfront_dev *_init_netfront(struct netfront_dev *dev, unsigned char rawmac[6], char **ip) { xenbus_transaction_t xbt; char* err = NULL; const char* message=NULL; struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int feature_split_evtchn; int retry=0; int i; char* msg = NULL; char path[256]; snprintf(path, sizeof(path), "%s/backend-id", dev->nodename); dev->dom = xenbus_read_integer(path); snprintf(path, sizeof(path), "%s/backend", dev->nodename); msg = xenbus_read(XBT_NIL, path, &dev->backend); snprintf(path, sizeof(path), "%s/mac", dev->nodename); msg = xenbus_read(XBT_NIL, path, &dev->mac); if ((dev->backend == NULL) || (dev->mac == NULL)) { printk("%s: backend/mac failed\n", __func__); goto error; } #ifdef CONFIG_NETMAP snprintf(path, sizeof(path), "%s/feature-netmap", dev->backend); dev->netmap = xenbus_read_integer(path) > 0 ? 1 : 0; if (dev->netmap) { dev->na = init_netfront_netmap(dev, dev->netif_rx); goto skip; } #endif /* Check feature-split-event-channels */ snprintf(path, sizeof(path), "%s/feature-split-event-channels", dev->backend); feature_split_evtchn = xenbus_read_integer(path) > 0 ? 1 : 0; #ifdef HAVE_LIBC /* Force the use of a single event channel */ if (dev->netif_rx == NETIF_SELECT_RX) feature_split_evtchn = 0; #endif printk("************************ NETFRONT for %s **********\n\n\n", dev->nodename); init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE); for(i=0;i<NET_TX_RING_SIZE;i++) { add_id_to_freelist(i,dev->tx_freelist); #if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY dev->tx_buffers[i].page = (void*)alloc_page(); BUG_ON(dev->tx_buffers[i].page == NULL); #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS dev->tx_buffers[i].gref = gnttab_grant_access(dev->dom, virt_to_mfn(dev->tx_buffers[i].page), 0); BUG_ON(dev->tx_buffers[i].gref == GRANT_INVALID_REF); dprintk("tx[%d]: page = %p, gref=0x%x\n", i, dev->tx_buffers[i].page, dev->tx_buffers[i].gref); #endif #endif } #if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY printk("net TX ring size %d, %lu KB\n", NET_TX_RING_SIZE, (unsigned long)(NET_TX_RING_SIZE * PAGE_SIZE)/1024); #else printk("net TX ring size %d\n", NET_TX_RING_SIZE); #endif #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS for(i=0;i<NET_RX_RING_SIZE;i++) { /* TODO: that's a lot of memory */ dev->rx_buffers[i].page = (void*)alloc_page(); BUG_ON(dev->rx_buffers[i].page == NULL); dprintk("rx[%d]: page = %p\n", i, dev->rx_buffers[i].page); } printk("net RX ring size %d, %lu KB\n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_RING_SIZE * PAGE_SIZE)/1024); #else for(i=0;i<NET_RX_RING_SIZE;i++) dev->rx_buffers[i] = NULL; for(i=0;i<NET_RX_BUFFERS;i++) { /* allocate rx buffer pool */ dev->rx_buffer_pool[i].page = (void*)alloc_page(); BUG_ON(dev->rx_buffer_pool[i].page == NULL); dprintk("rx[%d]: page = %p\n", i, dev->rx_buffer_pool[i].page); add_id_to_freelist(i,dev->rx_freelist); } dev->rx_avail = NET_RX_BUFFERS; printk("net RX ring size %d, %lu KB buffer space\n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_BUFFERS * PAGE_SIZE)/1024); #endif if (feature_split_evtchn) { evtchn_alloc_unbound(dev->dom, netfront_tx_handler, dev, &dev->tx_evtchn); evtchn_alloc_unbound(dev->dom, netfront_rx_handler, dev, &dev->rx_evtchn); printk("split event channels enabled\n"); } else { #ifdef HAVE_LIBC if (dev->netif_rx == NETIF_SELECT_RX) evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->tx_evtchn); else #endif evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->tx_evtchn); dev->rx_evtchn = dev->tx_evtchn; } #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS printk("persistent grants enabled\n"); #endif txs = (struct netif_tx_sring *) alloc_page(); rxs = (struct netif_rx_sring *) alloc_page(); memset(txs,0,PAGE_SIZE); memset(rxs,0,PAGE_SIZE); SHARED_RING_INIT(txs); SHARED_RING_INIT(rxs); FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE); FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE); dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0); BUG_ON(dev->tx_ring_ref == GRANT_INVALID_REF); dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0); BUG_ON(dev->rx_ring_ref == GRANT_INVALID_REF); init_rx_buffers(dev); dev->events = NULL; again: err = xenbus_transaction_start(&xbt); if (err) { printk("starting transaction\n"); free(err); } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", dev->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", dev->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } if (feature_split_evtchn) { err = xenbus_printf(xbt, dev->nodename, "event-channel-tx", "%u", dev->tx_evtchn); if (err) { message = "writing event-channel-tx"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel-rx", "%u", dev->rx_evtchn); if (err) { message = "writing event-channel-rx"; goto abort_transaction; } } else { err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", dev->tx_evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%u", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } #ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) { message = "writing feature-persistent"; goto abort_transaction; } #endif err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } #if defined(CONFIG_NETFRONT_GSO) && defined(HAVE_LWIP) err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%u", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%u", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", "%u", 1); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } #endif snprintf(path, sizeof(path), "%s/state", dev->nodename); err = xenbus_switch_state(xbt, path, XenbusStateConnected); if (err) { message = "switching state"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0, &retry); free(err); if (retry) { goto again; printk("completing transaction\n"); } goto done; abort_transaction: free(err); err = xenbus_transaction_end(xbt, 1, &retry); printk("Abort transaction %s\n", message); goto error; done: snprintf(path, sizeof(path), "%s/mac", dev->nodename); msg = xenbus_read(XBT_NIL, path, &dev->mac); if (dev->mac == NULL) { printk("%s: backend/mac failed\n", __func__); goto error; } printk("backend at %s\n",dev->backend); printk("mac is %s\n",dev->mac); { XenbusState state; char path[strlen(dev->backend) + strlen("/state") + 1]; snprintf(path, sizeof(path), "%s/state", dev->backend); xenbus_watch_path_token(XBT_NIL, path, path, &dev->events); err = NULL; state = xenbus_read_integer(path); while (err == NULL && state < XenbusStateConnected) err = xenbus_wait_for_state_change(path, &state, &dev->events); if (state != XenbusStateConnected) { printk("backend not avalable, state=%d\n", state); xenbus_unwatch_path_token(XBT_NIL, path, path); goto error; } if (ip) { snprintf(path, sizeof(path), "%s/ip", dev->backend); xenbus_read(XBT_NIL, path, ip); } } printk("**************************\n"); unmask_evtchn(dev->tx_evtchn); if (feature_split_evtchn) unmask_evtchn(dev->rx_evtchn); #ifdef CONFIG_NETMAP skip: if (dev->netmap) connect_netfront(dev); #endif /* Special conversion specifier 'hh' needed for __ia64__. Without this mini-os panics with 'Unaligned reference'. */ if (rawmac) sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &rawmac[0], &rawmac[1], &rawmac[2], &rawmac[3], &rawmac[4], &rawmac[5]); #ifdef CONFIG_SELECT_POLL dev->fd = alloc_fd(FTYPE_TAP); files[dev->fd].read = 0; #endif #ifdef CONFIG_NETFRONT_STATS netfront_reset_txcounters(dev); #endif return dev; error: free(msg); free(err); free_netfront(dev); return NULL; }
/** * Open network device * * @v netdev Network device * @ret rc Return status code */ static int netfront_open ( struct net_device *netdev ) { struct netfront_nic *netfront = netdev->priv; struct xen_device *xendev = netfront->xendev; int rc; /* Ensure device is in a suitable initial state */ if ( ( rc = netfront_reset ( netfront ) ) != 0 ) goto err_reset; /* Create transmit descriptor ring */ if ( ( rc = netfront_create_ring ( netfront, &netfront->tx ) ) != 0 ) goto err_create_tx; SHARED_RING_INIT ( netfront->tx_sring ); FRONT_RING_INIT ( &netfront->tx_fring, netfront->tx_sring, PAGE_SIZE ); assert ( RING_SIZE ( &netfront->tx_fring ) >= netfront->tx.count ); /* Create receive descriptor ring */ if ( ( rc = netfront_create_ring ( netfront, &netfront->rx ) ) != 0 ) goto err_create_rx; SHARED_RING_INIT ( netfront->rx_sring ); FRONT_RING_INIT ( &netfront->rx_fring, netfront->rx_sring, PAGE_SIZE ); assert ( RING_SIZE ( &netfront->rx_fring ) >= netfront->rx.count ); /* Create event channel */ if ( ( rc = netfront_create_event ( netfront ) ) != 0 ) goto err_create_event; /* "Request" the rx-copy feature. Current versions of * xen_netback.ko will fail silently if this parameter is not * present. */ if ( ( rc = netfront_write_flag ( netfront, "request-rx-copy" ) ) != 0 ) goto err_request_rx_copy; /* Disable checksum offload, since we will always do the work anyway */ if ( ( rc = netfront_write_flag ( netfront, "feature-no-csum-offload" ) ) != 0 ) goto err_feature_no_csum_offload; /* Inform backend that we will send notifications for RX requests */ if ( ( rc = netfront_write_flag ( netfront, "feature-rx-notify" ) ) != 0 ) goto err_feature_rx_notify; /* Set state to Connected */ if ( ( rc = xenbus_set_state ( xendev, XenbusStateConnected ) ) != 0 ) { DBGC ( netfront, "NETFRONT %s could not set state=\"%d\": %s\n", xendev->key, XenbusStateConnected, strerror ( rc ) ); goto err_set_state; } /* Wait for backend to connect */ if ( ( rc = xenbus_backend_wait ( xendev, XenbusStateConnected ) ) !=0){ DBGC ( netfront, "NETFRONT %s could not connect to backend: " "%s\n", xendev->key, strerror ( rc ) ); goto err_backend_wait; } /* Refill receive descriptor ring */ netfront_refill_rx ( netdev ); /* Set link up */ netdev_link_up ( netdev ); return 0; err_backend_wait: netfront_reset ( netfront ); err_set_state: netfront_rm ( netfront, "feature-rx-notify" ); err_feature_rx_notify: netfront_rm ( netfront, "feature-no-csum-offload" ); err_feature_no_csum_offload: netfront_rm ( netfront, "request-rx-copy" ); err_request_rx_copy: netfront_destroy_event ( netfront ); err_create_event: netfront_destroy_ring ( netfront, &netfront->rx, NULL ); err_create_rx: netfront_destroy_ring ( netfront, &netfront->tx, NULL ); err_create_tx: err_reset: return rc; }
/* We've bound the scsifilt instance to a xenvbd instance, and we've disconnected xenvbd from the shared ring. Connect scsifilt. */ NTSTATUS connect_scsifilt_with_token(struct scsifilt *sf, SUSPEND_TOKEN token) { XENBUS_STATE state; blkif_sring_t *ring_shared; NTSTATUS status; KIRQL irql; if (sf->backend_path != NULL) { TraceVerbose(("Releasing old backend path (%p)\n", sf->backend_path)); XmFreeMemory(sf->backend_path); sf->backend_path = NULL; } if (sf->ring_shared != NULL) { TraceVerbose(("Releasing old shared ring (%p)\n", sf->ring_shared)); XmFreeMemory(sf->ring_shared); sf->ring_shared = NULL; sf->ring.sring = NULL; } find_backend_handle(sf); status = STATUS_UNSUCCESSFUL; sf->backend_path = get_backend_path(sf, token); if (sf->backend_path == NULL) goto fail1; sf->target_resume(sf->target_id, token); if (sf->stopped) { sf->target_start(sf->target_id, sf->backend_path, token); sf->stopped = FALSE; } state = XenbusWaitForBackendStateChange(sf->backend_path, null_XENBUS_STATE(), NULL, token); if (!same_XENBUS_STATE(state, XENBUS_STATE_INITWAIT)) goto fail2; probe_backend_capabilities(sf); status = STATUS_NO_MEMORY; ring_shared = XmAllocateZeroedMemory(PAGE_SIZE << sf->ring_order); if (ring_shared == NULL) goto fail3; KeAcquireSpinLock(&sf->ring_lock, &irql); sf->ring_shared = ring_shared; SHARED_RING_INIT(sf->ring_shared); FRONT_RING_INIT(&sf->ring, sf->ring_shared, PAGE_SIZE << sf->ring_order); KeReleaseSpinLock(&sf->ring_lock, irql); grant_ring(sf); status = open_evtchn(sf); if (!NT_SUCCESS(status)) goto fail4; do { xenbus_transaction_t xbt; xenbus_transaction_start(&xbt); xenbus_write_evtchn_port(xbt, sf->frontend_path, "event-channel", sf->evtchn_port); if (sf->single_page) { XM_ASSERT3U(sf->ring_order, ==, 0); TraceNotice(("%s: using single page handshake\n", sf->frontend_path)); /* single page handshake */ xenbus_write_grant_ref(xbt, sf->frontend_path, "ring-ref", sf->ring_gref[0]); } else { int i; TraceNotice(("%s: using multi-page handshake\n", sf->frontend_path)); xenbus_printf(xbt, sf->frontend_path, "ring-page-order", "%u", sf->ring_order); for (i = 0; i < (1 << sf->ring_order); i++) { char buffer[10]; Xmsnprintf(buffer, sizeof(buffer), "ring-ref%1u", i); xenbus_write_grant_ref(xbt, sf->frontend_path, buffer, sf->ring_gref[i]); } } xenbus_printf(xbt, sf->frontend_path, "protocol", "x86_32-abi"); xenbus_write_feature_flag(xbt, sf->frontend_path, "feature-surprise-remove", TRUE); xenbus_write_feature_flag(xbt, sf->frontend_path, "feature-online-resize", TRUE); xenbus_change_state(xbt, sf->frontend_path, "state", XENBUS_STATE_INITIALISED); status = xenbus_transaction_end(xbt, 0); } while (status == STATUS_RETRY);
static int xennet_xenbus_resume(void *p) { struct xennet_xenbus_softc *sc = p; struct xenbus_transaction *xbt; int error; netif_tx_sring_t *tx_ring; netif_rx_sring_t *rx_ring; paddr_t ma; const char *errmsg; sc->sc_tx_ring_gntref = GRANT_INVALID_REF; sc->sc_rx_ring_gntref = GRANT_INVALID_REF; /* setup device: alloc event channel and shared rings */ tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); if (tx_ring == NULL || rx_ring == NULL) panic("xennet_xenbus_resume: can't alloc rings"); SHARED_RING_INIT(tx_ring); FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE); SHARED_RING_INIT(rx_ring); FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE); (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma); error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref); if (error) return error; (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma); error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref); if (error) return error; error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn); if (error) return error; aprint_verbose_dev(sc->sc_dev, "using event channel %d\n", sc->sc_evtchn); event_set_handler(sc->sc_evtchn, &xennet_handler, sc, IPL_NET, device_xname(sc->sc_dev)); again: xbt = xenbus_transaction_start(); if (xbt == NULL) return ENOMEM; error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "tx-ring-ref","%u", sc->sc_tx_ring_gntref); if (error) { errmsg = "writing tx ring-ref"; goto abort_transaction; } error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "rx-ring-ref","%u", sc->sc_rx_ring_gntref); if (error) { errmsg = "writing rx ring-ref"; goto abort_transaction; } error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "event-channel", "%u", sc->sc_evtchn); if (error) { errmsg = "writing event channel"; goto abort_transaction; } error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "state", "%d", XenbusStateConnected); if (error) { errmsg = "writing frontend XenbusStateConnected"; goto abort_transaction; } error = xenbus_transaction_end(xbt, 0); if (error == EAGAIN) goto again; if (error) { xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction"); return -1; } xennet_alloc_rx_buffer(sc); sc->sc_backend_status = BEST_CONNECTED; return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg); return error; }
static int omx_xenfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct omx_xenfront_info *fe; struct omx_xenif_sring *sring, *recv_sring; int err = 0; int i = 0; dprintk_in(); dprintk_deb("Frontend Probe Fired!\n"); fe = kzalloc(sizeof(*fe), GFP_KERNEL); dprintk_deb("fe info is @%#llx!\n", (unsigned long long)fe); if (!fe) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); err = -ENOMEM; goto out; } __omx_xen_frontend = fe; for (i = 0; i < OMX_XEN_MAX_ENDPOINTS; i++) { fe->endpoints[i] = NULL; } fe->requests = kzalloc(OMX_MAX_INFLIGHT_REQUESTS * sizeof(enum frontend_status), GFP_KERNEL); spin_lock_init(&fe->status_lock); fe->xbdev = dev; fe->connected = OMXIF_STATE_DISCONNECTED; init_waitqueue_head(&fe->wq); fe->msg_workq = create_singlethread_workqueue("ReQ_FE"); if (unlikely(!fe->msg_workq)) { printk_err("Couldn't create msg_workq!\n"); err = -ENOMEM; goto out; } INIT_WORK(&fe->msg_workq_task, omx_xenif_interrupt); spin_lock_init(&fe->lock); dprintk_deb("Setting up shared ring\n"); sring = (struct omx_xenif_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); err = -ENOMEM; goto out; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&fe->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(fe->ring.sring)); if (err < 0) { free_page((unsigned long)sring); fe->ring.sring = NULL; printk_err("Failed to grant ring\n"); goto out; } fe->ring_ref = err; recv_sring = (struct omx_xenif_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); err = -ENOMEM; goto out; } SHARED_RING_INIT(recv_sring); FRONT_RING_INIT(&fe->recv_ring, recv_sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(fe->recv_ring.sring)); if (err < 0) { free_page((unsigned long)recv_sring); fe->recv_ring.sring = NULL; printk_err("Failed to grant recv_ring\n"); goto out; } fe->recv_ring_ref = err; fe->handle = simple_strtoul(strrchr(dev->nodename, '/') + 1, NULL, 0); dprintk_deb("setting handle = %u\n", fe->handle); dev_set_drvdata(&dev->dev, fe); err = 0; //omx_xenfront_dev->info = info; //fe->endpoints = kzalloc(sizeof(struct omx_endpoint*) * OMX_XEN_MAX_ENDPOINTS, GFP_KERNEL); xenbus_switch_state(dev, XenbusStateInitialising); out: dprintk_out(); return err; }