/** * map frontend ring page * bind event channel * init **/ static int chrif_map(struct xen_chrif *chrif, unsigned long ring_ref, unsigned int evtchn) { int err; chrif_sring_t *sring; chrif->comms_area = alloc_vm_area(PAGE_SIZE, NULL); if(chrif->comms_area == NULL){ free_vm_area(chrif->comms_area); printk("\nxen: dom0: could not allocate shared_page"); return -ENOMEM; } err = map_frontend_pages(chrif, ring_ref); if(err){ free_vm_area(chrif->comms_area); printk("\nxen: dom0: map frontend page fail"); return err; } sring = (chrif_sring_t *)chrif->comms_area->addr; BACK_RING_INIT(&chrif->chr_ring, sring, PAGE_SIZE); err = bind_interdomain_evtchn_to_irqhandler(chrif->domid, evtchn, chrif_int, 0, "domtest2", chrif); if (err < 0) { printk(KERN_DEBUG "\nxen: dom0: chrif_int failed binding to evtchn"); unmap_frontend_pages(chrif); return -EFAULT; } chrif->irq = err; printk(KERN_DEBUG "\nxen: dom0:bind event channel fineshed: irq = %d\n", chrif->irq); printk("\nxen: dom0: chrif map finished, otherend_id"); return 0; }
static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, int remote_evtchn) { int err = 0; void *vaddr; dev_dbg(&pdev->xdev->dev, "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", gnt_ref, remote_evtchn); err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error mapping other domain page in ours."); goto out; } pdev->sh_info = vaddr; err = bind_interdomain_evtchn_to_irqhandler( pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event, 0, DRV_NAME, pdev); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error binding event channel to IRQ"); goto out; } pdev->evtchn_irq = err; err = 0; dev_dbg(&pdev->xdev->dev, "Attached!\n"); out: return err; }
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, unsigned int evtchn) { int err; /* Already connected through? */ if (blkif->irq) return 0; blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE); if (!blkif->blk_ring_area) return -ENOMEM; err = map_frontend_page(blkif, shared_page); if (err) { free_vm_area(blkif->blk_ring_area); return err; } switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: { struct blkif_sring *sring; sring = (struct blkif_sring *)blkif->blk_ring_area->addr; BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_32: { struct blkif_x86_32_sring *sring_x86_32; sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr; BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_64: { struct blkif_x86_64_sring *sring_x86_64; sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr; BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE); break; } default: BUG(); } err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, xen_blkif_be_int, 0, "blkif-backend", blkif); if (err < 0) { unmap_frontend_page(blkif); free_vm_area(blkif->blk_ring_area); blkif->blk_rings.common.sring = NULL; return err; } blkif->irq = err; return 0; }
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, unsigned int evtchn) { int err; /* Already connected through? */ if (blkif->irq) return 0; err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring); if (err < 0) return err; switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: { struct blkif_sring *sring; sring = (struct blkif_sring *)blkif->blk_ring; BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_32: { struct blkif_x86_32_sring *sring_x86_32; sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring; BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_64: { struct blkif_x86_64_sring *sring_x86_64; sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring; BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE); break; } default: BUG(); } err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, xen_blkif_be_int, 0, "blkif-backend", blkif); if (err < 0) { xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring); blkif->blk_rings.common.sring = NULL; return err; } blkif->irq = err; return 0; }
int scsiback_init_sring(struct vscsibk_info *info, unsigned long ring_ref, unsigned int evtchn) { struct vscsiif_sring *sring; int err; if (info->irq) { printk(KERN_ERR "scsiback: Already connected through?\n"); return -1; } info->ring_area = alloc_vm_area(PAGE_SIZE); if (!info) return -ENOMEM; err = map_frontend_page(info, ring_ref); if (err) goto free_vm; sring = (struct vscsiif_sring *) info->ring_area->addr; BACK_RING_INIT(&info->ring, sring, PAGE_SIZE); err = bind_interdomain_evtchn_to_irqhandler( info->domid, evtchn, scsiback_intr, 0, "vscsiif-backend", info); if (err < 0) goto unmap_page; info->irq = err; return 0; unmap_page: unmap_frontend_page(info); free_vm: free_vm_area(info->ring_area); return err; }
//map the ring_page,init backend ring and bind event channle static int connection_establishment(void ) { int err; chrif_sring_t *sring; struct vm_struct *v_start; //map the granted page v_start = map_sharedpage(info.ring_gref); //init back ring and bing event channel unmap_ops.host_addr = (unsigned long)(v_start->addr); unmap_ops.handle = ops.handle; sring = (chrif_sring_t *)v_start->addr; BACK_RING_INIT(&info.ring, sring, PAGE_SIZE); err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, chrif_int, 0, "dom0", &info); if (err < 0) { printk(KERN_DEBUG "\nxen: dom0: gnt_init failed binding to evtchn"); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1); return -EFAULT; } info.irq = err; printk(KERN_DEBUG "\nxen: dom0: irq = %d\n", info.irq); return 0; }
/* Setup all the comms needed to chat with the front end driver */ static int setup_vnic(struct xenbus_device *dev) { struct netback_accel *bend; int grants[2], err, msgs_per_queue; bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev); err = cfg_frontend_info(dev, bend, grants); if (err) goto fail1; /* * If we get here, both frontend Connected and configuration * options available. All is well. */ /* Get the hardware quotas for the VNIC in question. */ cfg_hw_quotas(dev, bend); /* Set up the deferred work handlers */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&bend->handle_msg, netback_accel_msg_rx_handler); #else INIT_WORK(&bend->handle_msg, netback_accel_msg_rx_handler, (void*)bend); #endif /* Request the frontend mac */ err = net_accel_xen_net_read_mac(dev, bend->mac); if (err) goto fail2; /* Set up the shared page. */ bend->shared_page = net_accel_map_grants_contig(dev, grants, 2, &bend->sh_pages_unmap); if (bend->shared_page == NULL) { EPRINTK("failed to map shared page for %s\n", dev->otherend); err = -ENOMEM; goto fail2; } /* Initialise the shared page(s) used for comms */ net_accel_msg_init_page(bend->shared_page, PAGE_SIZE, (bend->net_dev->flags & IFF_UP) && (netif_carrier_ok(bend->net_dev))); msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg); net_accel_msg_init_queue (&bend->to_domU, &bend->shared_page->queue0, (struct net_accel_msg *)((__u8*)bend->shared_page + PAGE_SIZE), msgs_per_queue); net_accel_msg_init_queue (&bend->from_domU, &bend->shared_page->queue1, (struct net_accel_msg *)((__u8*)bend->shared_page + (3 * PAGE_SIZE / 2)), msgs_per_queue); /* Bind the message event channel to a handler * * Note that we will probably get a spurious interrupt when we * do this, so it must not be done until we have set up * everything we need to handle it. */ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id, bend->msg_channel, msgirq_from_frontend, 0, "netback_accel", dev); if (err < 0) { EPRINTK("failed to bind event channel: %d\n", err); goto fail3; } else bend->msg_channel_irq = err; /* TODO: No need to bind this evtchn to an irq. */ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id, bend->net_channel, netirq_from_frontend, 0, "netback_accel", dev); if (err < 0) { EPRINTK("failed to bind net channel: %d\n", err); goto fail4; } else bend->net_channel_irq = err; /* * Grab ourselves an entry in the forwarding hash table. We do * this now so we don't have the embarassmesnt of sorting out * an allocation failure while at IRQ. Because we pass NULL as * the context, the actual hash lookup will succeed for this * NIC, but the check for somewhere to forward to will * fail. This is necessary to prevent forwarding before * hardware resources are set up */ err = netback_accel_fwd_add(bend->mac, NULL, bend->fwd_priv); if (err) { EPRINTK("failed to add to fwd hash table\n"); goto fail5; } /* * Say hello to frontend. Important to do this straight after * obtaining the message queue as otherwise we are vulnerable * to an evil frontend sending a HELLO-REPLY before we've sent * the HELLO and confusing us */ netback_accel_msg_tx_hello(bend, NET_ACCEL_MSG_VERSION); return 0; fail5: unbind_from_irqhandler(bend->net_channel_irq, dev); fail4: unbind_from_irqhandler(bend->msg_channel_irq, dev); fail3: net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap); bend->shared_page = NULL; bend->sh_pages_unmap = NULL; fail2: fail1: return err; }
int init_module(void) { struct vm_struct * v_start; int ret, err; struct xenbus_transaction trans; int rc; log_info("init_module"); rc = xenbus_transaction_start(&trans); check(rc==0, "transaction start failed"); rc = xenbus_scanf(trans, "yinwuzhe", "gref", "%d",&gref); check(rc!=-ERANGE, "xenbus_scanf failed"); rc = xenbus_scanf(trans, "yinwuzhe", "port", "%d",&port); check(rc!=-ERANGE, "xenbus_scanf failed"); xenbus_transaction_end(trans, 0); log_info("read from the xenstore is gref=%d,port=%d",gref, port); //this func reserves a range of kernel address space //and allocates pagetables to map that area, but no actual mapping here. v_start = alloc_vm_area(PAGE_SIZE , NULL); check(v_start, "xen:could not allocate page\n"); /* //gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, // uint32_t flags, grant_ref_t ref, domid_t domid) gnttab_set_map_op(&ops, (unsigned long)v_start->addr, GNTMAP_host_map, gref , remoteDomain); //do the map hypercall ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1); check(ret==0, "xen: HYPERSIOR map grant ref failed.\n"); //check the staus from the out param of the ops check(ops.status==0, "xen: HYPERSIOR map grant ref failed with status %d.\n",ops.status); //printk handle log_info("xen:map grant success:\n\tshared_page=%x, handle=%d\n", (unsigned long)v_start->addr,ops.handle); //if mapped,init the unmap ops with the handle and host_addr unmap_ops.host_addr=(unsigned long)v_start->addr; unmap_ops.handle=ops.handle; */ info.remoteDomain=0; info.evtchn =port; err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, handle_evt, 0,"get-grant",&info); check(err>=0,"bind_interdomain_evtchn_to_irqhandler failed."); info.irq=err; log_info("evtchn port = %d, the handle irq = %d", info.evtchn, info.irq); return 0; error: free_vm_area(v_start); return -1; }
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id) { struct backendinfo* binfo; // int i; // char *p; struct vm_struct *v_start; int err; as_sring_t *sring; char *gref,*port; binfo = kmalloc(sizeof(*binfo),GFP_KERNEL); if (!binfo) { xenbus_dev_error(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } memset(binfo,0,sizeof(*binfo)); binfo->dev = dev; printk(KERN_ALERT"\nProbe fired!\n"); gref = xenbus_read(XBT_NIL, binfo->dev->otherend, "gref", NULL); port = xenbus_read(XBT_NIL, binfo->dev->otherend, "port", NULL); info.gref=mycti(gref); info.evtchn=mycti(port); printk("Xenstore read port and gref success: %d, %d \n", info.evtchn, info.gref); info.remoteDomain = binfo->dev->otherend_id; printk(KERN_DEBUG "\nxen: dom0: gnt_init with gref = %d\n", info.gref); v_start = alloc_vm_area(PAGE_SIZE,NULL); if(v_start == 0){ free_vm_area(v_start); printk("\nxen: dom0:could not allocate page"); return -EFAULT; } gnttab_set_map_op(&ops,(unsigned long)v_start->addr,GNTMAP_host_map,info.gref,info.remoteDomain); if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,&ops,1)){ printk(KERN_DEBUG"\nxen:dom0:HYPERVISOR map grant ref failed"); return -EFAULT; } if (ops.status) { printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status); return -EFAULT; } printk(KERN_DEBUG "\nxen:dom0:shared_page=%x, handle = %x, status = %x", (unsigned int)v_start->addr, ops.handle, ops.status); unmap_ops.host_addr = (unsigned long)(v_start->addr); unmap_ops.handle = ops.handle; ////////////// /* p = (char *)(v_start->addr) + PAGE_SIZE - 1; printk(KERN_DEBUG "\nbytes in page"); for (i = 0;i <= 10; i++, p--) { printk(KERN_DEBUG "%c", *p); } */ //////////////// sring = (as_sring_t *)v_start->addr; BACK_RING_INIT(&info.ring, sring, PAGE_SIZE); err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, as_int, 0, "dom0", &info); if (err < 0) { printk(KERN_DEBUG "\nxen:dom0: gnt_init failed binding to evtchn"); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1); return -EFAULT; } info.irq = err; printk(KERN_DEBUG "\nxen:dom0:end gnt_int: int = %d", info.irq); return 0; }