static bool xencons_resume(device_t dev, const pmf_qual_t *qual) { int evtch = -1; if (xendomain_is_dom0()) { /* dom0 console resume is required only during first start-up */ if (cold) { evtch = bind_virq_to_evtch(VIRQ_CONSOLE); event_set_handler(evtch, xencons_intr, xencons_console_device, IPL_TTY, "xencons"); } } else { evtch = xen_start_info.console_evtchn; event_set_handler(evtch, xencons_handler, xencons_console_device, IPL_TTY, "xencons"); } if (evtch != -1) { aprint_verbose_dev(dev, "using event channel %d\n", evtch); hypervisor_enable_event(evtch); } return true; }
static inline error_t dma_do_async_request(void *dst, void *src, size_t size) { kmem_req_t req; dev_request_t *rq; req.type = KMEM_DMA_REQUEST; req.size = sizeof(*rq); req.flags = AF_KERNEL; if((rq = kmem_alloc(&req)) == NULL) return ENOMEM; memset(rq, 0, sizeof(*rq)); rq->src = src; rq->dst = dst; rq->count = size; rq->flags = DEV_RQ_NOBLOCK; event_set_priority(&rq->event, E_FUNC); event_set_handler(&rq->event, &dma_async_request_event); event_set_argument(&rq->event, rq); return __sys_dma->op.dev.write(__sys_dma, rq); }
static void init_interface(void) { block_io_op_t op; reset_interface(); if (blk_ring == NULL) { op.cmd = BLOCK_IO_OP_RING_ADDRESS; (void)HYPERVISOR_block_io_op(&op); blk_ring = (blk_ring_t *)uvm_km_valloc_align(kernel_map, PAGE_SIZE, PAGE_SIZE); pmap_kenter_ma((vaddr_t)blk_ring, op.u.ring_mfn << PAGE_SHIFT, VM_PROT_READ|VM_PROT_WRITE); DPRINTF(XBDB_SETUP, ("init_interface: " "ring va %p and wired to %p\n", blk_ring, (void *)(op.u.ring_mfn << PAGE_SHIFT))); blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = last_req_prod = 0; event_set_handler(_EVENT_BLKDEV, &xbd_response_handler, NULL, IPL_BIO); hypervisor_enable_event(_EVENT_BLKDEV); } __insn_barrier(); state = STATE_ACTIVE; }
static void enable_update_events(struct device *self) { kthread_create(xbd_update_create_kthread, self); event_set_handler(_EVENT_VBD_UPD, &xbd_update_handler, self, IPL_BIO); hypervisor_enable_event(_EVENT_VBD_UPD); }
void xen_initclocks() { get_time_values_from_xen(); processed_system_time = shadow_system_time; event_set_handler(_EVENT_TIMER, (int (*)(void *))xen_timer_handler, NULL, IPL_CLOCK); hypervisor_enable_event(_EVENT_TIMER); }
/* Set up interrupt handler of store event channel. */ int xb_init_comms(device_t dev) { int evtchn; evtchn = xen_start_info.store_evtchn; event_set_handler(evtchn, wake_waiting, NULL, IPL_TTY, "xenbus"); hypervisor_enable_event(evtchn); aprint_verbose_dev(dev, "using event channel %d\n", evtchn); return 0; }
/* Set up interrupt handler off store event channel. */ int xb_init_comms(struct device *dev) { int err; if (xenbus_irq) event_remove_handler(xenbus_irq, wake_waiting, NULL); err = event_set_handler(xen_start_info.store_evtchn, wake_waiting, NULL, IPL_TTY, "xenbus"); if (err) { printf("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = xen_start_info.store_evtchn; printf("%s: using event channel %d\n", dev->dv_xname, xenbus_irq); hypervisor_enable_event(xenbus_irq); return 0; }
static int xennet_xenbus_resume(void *p) { struct xennet_xenbus_softc *sc = p; struct xenbus_transaction *xbt; int error; netif_tx_sring_t *tx_ring; netif_rx_sring_t *rx_ring; paddr_t ma; const char *errmsg; sc->sc_tx_ring_gntref = GRANT_INVALID_REF; sc->sc_rx_ring_gntref = GRANT_INVALID_REF; /* setup device: alloc event channel and shared rings */ tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); if (tx_ring == NULL || rx_ring == NULL) panic("xennet_xenbus_resume: can't alloc rings"); SHARED_RING_INIT(tx_ring); FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE); SHARED_RING_INIT(rx_ring); FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE); (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma); error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref); if (error) return error; (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma); error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref); if (error) return error; error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn); if (error) return error; aprint_verbose_dev(sc->sc_dev, "using event channel %d\n", sc->sc_evtchn); event_set_handler(sc->sc_evtchn, &xennet_handler, sc, IPL_NET, device_xname(sc->sc_dev)); again: xbt = xenbus_transaction_start(); if (xbt == NULL) return ENOMEM; error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "tx-ring-ref","%u", sc->sc_tx_ring_gntref); if (error) { errmsg = "writing tx ring-ref"; goto abort_transaction; } error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "rx-ring-ref","%u", sc->sc_rx_ring_gntref); if (error) { errmsg = "writing rx ring-ref"; goto abort_transaction; } error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "event-channel", "%u", sc->sc_evtchn); if (error) { errmsg = "writing event channel"; goto abort_transaction; } error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, "state", "%d", XenbusStateConnected); if (error) { errmsg = "writing frontend XenbusStateConnected"; goto abort_transaction; } error = xenbus_transaction_end(xbt, 0); if (error == EAGAIN) goto again; if (error) { xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction"); return -1; } xennet_alloc_rx_buffer(sc); sc->sc_backend_status = BEST_CONNECTED; return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg); return error; }
error_t barrier_init(struct barrier_s *barrier, uint_t count, uint_t scope) { struct page_s *page; uint_t wqdbsz; uint_t i; error_t err; kmem_req_t req; wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t); if(count == 0) return EINVAL; if(current_task->threads_limit > (BARRIER_WQDB_NR*wqdbsz)) { printk(INFO, "INFO: %s: pid %d, cpu %d, task threads limit exceed barrier ressources of %d\n", __FUNCTION__, current_task->pid, cpu_get_id(), BARRIER_WQDB_NR*wqdbsz); return ENOMEM; } if(count > BARRIER_WQDB_NR*wqdbsz) return ENOMEM; barrier->owner = (scope == BARRIER_INIT_PRIVATE) ? current_task : NULL; #if ARCH_HAS_BARRIERS barrier->cluster = current_cluster; event_set_handler(&barrier->event, &barrier_broadcast_event); event_set_argument(&barrier->event, barrier); barrier->hwid = arch_barrier_init(barrier->cluster, &barrier->event, count); if(barrier->hwid < 0) return ENOMEM; /* TODO: we can use software barrier instead */ #else if(barrier->owner != NULL) atomic_init(&barrier->waiting, count); else { spinlock_init(&barrier->lock, "barrier"); barrier->index = 0; } #endif /* ARCH_HAS_BARRIERS */ req.type = KMEM_PAGE; req.size = 0; req.flags = AF_USER | AF_ZERO; err = 0; for(i = 0; i < BARRIER_WQDB_NR; i++) { page = kmem_alloc(&req); if(page == NULL) { err = ENOMEM; break; } barrier->wqdb_tbl[i] = ppm_page2addr(page); barrier->pages_tbl[i] = page; } if(err) { err = i; for(i = 0; i < err; i++) { req.ptr = barrier->pages_tbl[i]; kmem_free(&req); } return ENOMEM; } barrier->count = count; barrier->signature = BARRIER_ID; barrier->state[0] = 0; barrier->state[1] = 0; barrier->phase = 0; barrier->name = "Barrier-Sync"; return 0; }
void* kvfsd(void *arg) { uint_t tm_now, cntr; struct task_s *task; struct thread_s *this; struct cpu_s *cpu; struct alarm_info_s info; struct event_s event; uint_t fs_type; error_t err; cpu_enable_all_irq(NULL); printk(INFO, "INFO: Starting KVFSD on CPU %d [ %d ]\n", cpu_get_id(), cpu_time_stamp()); task = current_task; fs_type = VFS_TYPES_NR; #if CONFIG_ROOTFS_IS_EXT2 fs_type = VFS_EXT2_TYPE; #endif #if CONFIG_ROOTFS_IS_VFAT #if CONFIG_ROOTFS_IS_EXT2 #error More than one root fs has been selected #endif fs_type = VFS_VFAT_TYPE; #endif /* CONFIG_ROOTFS_IS_VFAT_TYPE */ err = vfs_init(__sys_blk, fs_type, VFS_MAX_NODE_NUMBER, VFS_MAX_FILE_NUMBER, &task->vfs_root); task->vfs_cwd = task->vfs_root; printk(INFO, "INFO: Virtual File System (VFS) Is Ready\n"); sysconf_init(); if(err == 0) { if((err = task_load_init(task))) { printk(WARNING, "WARNING: failed to load user process, err %d [%u]\n", err, cpu_time_stamp()); } } #if CONFIG_DEV_VERSION if(err != 0) { struct thread_s *thread; printk(INFO, "INFO: Creating kernel level terminal\n"); thread = kthread_create(task, &kMiniShelld, NULL, current_cluster->id, current_cpu->lid); thread->task = task; list_add_last(&task->th_root, &thread->rope); err = sched_register(thread); assert(err == 0); sched_add_created(thread); } #endif this = current_thread; cpu = current_cpu; event_set_senderId(&event, this); event_set_priority(&event, E_FUNC); event_set_handler(&event, &kvfsd_alarm_event_handler); info.event = &event; cntr = 0; while(1) { alarm_wait(&info, 10); sched_sleep(this); tm_now = cpu_time_stamp(); printk(INFO, "INFO: System Current TimeStamp %u\n", tm_now); sync_all_pages(); if((cntr % 4) == 0) dqdt_print_summary(dqdt_root); cntr ++; } return NULL; }