Ejemplo n.º 1
0
/*
 * handle_epinirq()
*/
static int write_ep_fifo(struct rt_ep_struct *rt_ep, struct rt_request *req)
{
	u8	*buf, epcs;
	int	length, i, ep_no = EP_NO(rt_ep);

DBG;
	xprintk("w ep%d req=%p,r.l=%d,r.a=%d\n",EP_NO(rt_ep),&req->req,req->req.length,req->req.actual);
	epcs = read_epcs(rt_ep);
	if(epcs & EP_CS_BSY)
		FATAL_ERROR("EP%d busy. epcs=%x\n", ep_no, epcs);

	/* check INEP byte count is zero? */
	if(read_inbc(ep_no))
		FATAL_ERROR("EP%d bc=%d\n", ep_no, read_inbc(ep_no));

	buf = req->req.buf + req->req.actual;
	length = (req->req.length - req->req.actual) < rt_ep->ep.maxpacket ? (req->req.length - req->req.actual) : rt_ep->ep.maxpacket;
	req->req.actual += length;
	if (!length) {	/* zlp */
		// for debug
		xprintk("<%s> zero packet\n", __func__);
		write_ep_fifo_zlp(rt_ep);
		return 0;
	}

	// write to ep in fifo
	for (i=0; i< length; i++)
		usb_write(0x80+ep_no*4, *buf++);

	epcs = read_epcs(rt_ep);
	write_epcs(rt_ep, epcs);

	return length;
}
Ejemplo n.º 2
0
static int write_ep0_fifo(struct rt_ep_struct *rt_ep, struct rt_request *req)
{
	u8	*buf;
	int	length, i;
	u32 maxpacket;

DBG;
	xprintk("q.l=%d, q.a=%d, maxp=%d\n", req->req.length, req->req.actual, rt_ep->ep.maxpacket);

	buf = req->req.buf + req->req.actual;
	maxpacket = (u32)(rt_ep->ep.maxpacket);
	length = min(req->req.length - req->req.actual, maxpacket);

	req->req.actual += length;

	if (!length && req->req.zero)
		FATAL_ERROR("zlp");

	if(!in_irq())
		FATAL_ERROR("Not in irq context");

	//write to ep0in fifo
	for (i=0; i< length; i++)
		usb_write(EP0INDAT+i, *buf++);

	// arm ep0in
	usb_write(IN0BC, length);
	if(length != rt_ep->ep.maxpacket)
		usb_write(EP0CS, 0x2);		// clear NAK bit to ACK host.

	return length;
}
Ejemplo n.º 3
0
static void rx_do_tasklet(unsigned long arg)
{
	struct rt_ep_struct 	*rt_ep;
	struct rt_request 		*req;
	struct usb_ep 			*ep;
	int						i;
	struct rt_udc_struct 	*rt_usb = &controller;

	for (i = (IN_EP_NUM+1/* EP0 */); i < RT_USB_NB_EP; i++){
		u8 epcs;
		rt_ep = &rt_usb->rt_ep[i];
		ep = &rt_ep->ep;

		epcs = read_epcs(rt_ep);        
		while(!(epcs & EP_CS_BSY)){
			req = handle_outep(rt_ep);
			if(!req){
				// No usb request found.
				// Just set up the flag (pending) and clear int.
				rt_ep->pending = 1;
				break;
	        }else{
				if(req && ( (req->req.actual % rt_ep->ep.maxpacket) || (req->req.actual >= req->req.length))){
					xprintk("q.l=%d,q.a=%d\n", req->req.length, req->req.actual);
					done(rt_ep, req, 0);
				}
			}

	        epcs = read_epcs(rt_ep);
	        write_epcs(rt_ep, 0x0);
			epcs = read_epcs(rt_ep);
		}
    }
}
Ejemplo n.º 4
0
static void ep0_chg_stat(const char *label, struct rt_udc_struct *rt_usb, enum ep0_state stat)
{
	xprintk("<0st>%s->%s\n", state_name[rt_usb->ep0state], state_name[stat]);

	if (rt_usb->ep0state == stat)
		return;
	rt_usb->ep0state = stat;
}
Ejemplo n.º 5
0
static int write_ep_fifo_zlp(struct rt_ep_struct *rt_ep)
{
	u8	epcs;
	int	ep_no = EP_NO(rt_ep);

DBG;
	xprintk("w%d ZLP\n", EP_NO(rt_ep));
	epcs = read_epcs(rt_ep);
	if(epcs & EP_CS_BSY)
		FATAL_ERROR("EP%d busy. cs=%x\n", ep_no, epcs);

	/* check INEP byte count is zero? */
	if(read_inbc(ep_no))
		FATAL_ERROR("EP%d bc zero. bc=%d\n", ep_no, read_inbc(ep_no));

	epcs = read_epcs(rt_ep);
	write_epcs(rt_ep, epcs);
	return 0;
}
Ejemplo n.º 6
0
/* check for suspend and resume events on this cpu and change cpu state
 * only called by the idle thread
 */
void smp_cpu_safe(int cpu)
{
    struct thread *idle;
    switch (per_cpu(cpu, cpu_state)) {
	case CPU_SUSPENDING: /* cpu is marked to suspend */
	    spin_lock(&cpu_lock);
	    --suspend_count;
	    spin_unlock(&cpu_lock);
	    per_cpu(cpu, cpu_state) = CPU_DOWN; /* mark cpu as down */
	    spin_lock(&cpu_lock);
	    smp_active--;
	    spin_unlock(&cpu_lock);
	    break;
	case CPU_RESUMING: /* cpu is marked to resume */
	    local_irq_disable();
	    idle = per_cpu(cpu, idle_thread);
	    spin_lock(&cpu_lock);
	    smp_active++;
	    spin_unlock(&cpu_lock);

	    /* restart idle thread */
	    /* the idle thread will set the state of the CPU to UP again */
	    restart_idle_thread((long)cpu,
		   (long)idle->stack + idle->stack_size,
		   (long)idle_thread_starter,
		   idle_thread_fn);
	    /* will never return */
	    BUG();
	    break;
	case CPU_DOWN:
	case CPU_UP:
	case CPU_SLEEPING:
	    break;
	default:
	    xprintk("WARNING: unknown CPU state\n");
    }
}
Ejemplo n.º 7
0
static int read_ep_fifo(struct rt_ep_struct *rt_ep, struct rt_request *req)
{
	u8	*buf, ep_no, ep_no_shift;
	int	byte_count, req_bufferspace, count, i;

DBG;
	ep_no = EP_NO(rt_ep);

	byte_count = read_outbc(ep_no);
	if(unlikely(!byte_count))
		FATAL_ERROR("ep_no:%d bc = 0", ep_no);

	req_bufferspace = req->req.length - req->req.actual;

	buf = req->req.buf + req->req.actual;

	if(unlikely(!req_bufferspace))
		FATAL_ERROR("zlp");

	xprintk("bc=%d,r.l=%d,r.a=%d\n", byte_count, req->req.length ,req->req.actual);
	if(unlikely(byte_count > req_bufferspace))
		FATAL_ERROR("buffer overflow, byte_count=%d, req->req.length=%d, req->req.actual=%d\n", byte_count, req->req.length ,req->req.actual);

	count = min(byte_count, req_bufferspace);

	ep_no_shift = 0x80+ep_no * 4;
	for (i = 0; i < count; i++){
		*buf = usb_read(ep_no_shift);
		buf++;
	}

	req->req.actual += count;

	// EP Out irq handler would arm another transaction.
	return count;
}
Ejemplo n.º 8
0
void kexec(void *kernel, long kernel_size, void *module, long module_size, char *cmdline, unsigned long flags)
{
    struct xc_dom_image *dom;
    int rc;
    domid_t domid = DOMID_SELF;
    xen_pfn_t pfn;
    xc_interface *xc_handle;
    unsigned long i;
    void *seg;
    xen_pfn_t boot_page_mfn = virt_to_mfn(&_boot_page);
    char features[] = "";
    struct mmu_update *m2p_updates;
    unsigned long nr_m2p_updates;

    DEBUG("booting with cmdline %s\n", cmdline);
    xc_handle = xc_interface_open(0,0,0);

    dom = xc_dom_allocate(xc_handle, cmdline, features);
    dom->allocate = kexec_allocate;

    /* We are using guest owned memory, therefore no limits. */
    xc_dom_kernel_max_size(dom, 0);
    xc_dom_ramdisk_max_size(dom, 0);

    dom->kernel_blob = kernel;
    dom->kernel_size = kernel_size;

    dom->ramdisk_blob = module;
    dom->ramdisk_size = module_size;

    dom->flags = flags;
    dom->console_evtchn = start_info.console.domU.evtchn;
    dom->xenstore_evtchn = start_info.store_evtchn;

    tpm_hash2pcr(dom, cmdline);

    if ( (rc = xc_dom_boot_xen_init(dom, xc_handle, domid)) != 0 ) {
        grub_printf("xc_dom_boot_xen_init returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }
    if ( (rc = xc_dom_parse_image(dom)) != 0 ) {
        grub_printf("xc_dom_parse_image returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }

#ifdef __i386__
    if (strcmp(dom->guest_type, "xen-3.0-x86_32p")) {
        grub_printf("can only boot x86 32 PAE kernels, not %s\n", dom->guest_type);
        errnum = ERR_EXEC_FORMAT;
        goto out;
    }
#endif
#ifdef __x86_64__
    if (strcmp(dom->guest_type, "xen-3.0-x86_64")) {
        grub_printf("can only boot x86 64 kernels, not %s\n", dom->guest_type);
        errnum = ERR_EXEC_FORMAT;
        goto out;
    }
#endif

    /* equivalent of xc_dom_mem_init */
    dom->arch_hooks = xc_dom_find_arch_hooks(xc_handle, dom->guest_type);
    dom->total_pages = start_info.nr_pages;

    /* equivalent of arch_setup_meminit */

    /* setup initial p2m */
    dom->p2m_host = malloc(sizeof(*dom->p2m_host) * dom->total_pages);

    /* Start with our current P2M */
    for (i = 0; i < dom->total_pages; i++)
        dom->p2m_host[i] = pfn_to_mfn(i);

    if ( (rc = xc_dom_build_image(dom)) != 0 ) {
        grub_printf("xc_dom_build_image returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }

    /* copy hypercall page */
    /* TODO: domctl instead, but requires privileges */
    if (dom->parms.virt_hypercall != -1) {
        pfn = PHYS_PFN(dom->parms.virt_hypercall - dom->parms.virt_base);
        memcpy((void *) pages[pfn], hypercall_page, PAGE_SIZE);
    }

    /* Equivalent of xc_dom_boot_image */
    dom->shared_info_mfn = PHYS_PFN(start_info.shared_info);

    if (!xc_dom_compat_check(dom)) {
        grub_printf("xc_dom_compat_check failed\n");
        errnum = ERR_EXEC_FORMAT;
        goto out;
    }

    /* Move current console, xenstore and boot MFNs to the allocated place */
    do_exchange(dom, dom->console_pfn, start_info.console.domU.mfn);
    do_exchange(dom, dom->xenstore_pfn, start_info.store_mfn);
    DEBUG("virt base at %llx\n", dom->parms.virt_base);
    DEBUG("bootstack_pfn %lx\n", dom->bootstack_pfn);
    _boot_target = dom->parms.virt_base + PFN_PHYS(dom->bootstack_pfn);
    DEBUG("_boot_target %lx\n", _boot_target);
    do_exchange(dom, PHYS_PFN(_boot_target - dom->parms.virt_base),
            virt_to_mfn(&_boot_page));

    /* Make sure the bootstrap page table does not RW-map any of our current
     * page table frames */
    kexec_allocate(dom, dom->virt_pgtab_end);

    if ( (rc = xc_dom_update_guest_p2m(dom))) {
        grub_printf("xc_dom_update_guest_p2m returned %d\n", rc);
        errnum = ERR_BOOT_FAILURE;
        goto out;
    }

    if ( dom->arch_hooks->setup_pgtables )
        if ( (rc = dom->arch_hooks->setup_pgtables(dom))) {
            grub_printf("setup_pgtables returned %d\n", rc);
            errnum = ERR_BOOT_FAILURE;
            goto out;
        }

    /* start info page */
#undef start_info
    if ( dom->arch_hooks->start_info )
        dom->arch_hooks->start_info(dom);
#define start_info (start_info_union.start_info)

    xc_dom_log_memory_footprint(dom);

    /* Unmap libxc's projection of the boot page table */
    seg = xc_dom_seg_to_ptr(dom, &dom->pgtables_seg);
    munmap(seg, dom->pgtables_seg.vend - dom->pgtables_seg.vstart);

    /* Unmap day0 pages to avoid having a r/w mapping of the future page table */
    for (pfn = 0; pfn < allocated; pfn++)
        munmap((void*) pages[pfn], PAGE_SIZE);

    /* Pin the boot page table base */
    if ( (rc = pin_table(dom->xch,
#ifdef __i386__
                MMUEXT_PIN_L3_TABLE,
#endif
#ifdef __x86_64__
                MMUEXT_PIN_L4_TABLE,
#endif
                xc_dom_p2m_host(dom, dom->pgtables_seg.pfn),
                dom->guest_domid)) != 0 ) {
        grub_printf("pin_table(%lx) returned %d\n", xc_dom_p2m_host(dom,
                    dom->pgtables_seg.pfn), rc);
        errnum = ERR_BOOT_FAILURE;
        goto out_remap;
    }

    /* We populate the Mini-OS page table here so that boot.S can just call
     * update_va_mapping to project itself there.  */
    need_pgt(_boot_target);
    DEBUG("day0 pages %lx\n", allocated);
    DEBUG("boot target page %lx\n", _boot_target);
    DEBUG("boot page %p\n", &_boot_page);
    DEBUG("boot page mfn %lx\n", boot_page_mfn);
    _boot_page_entry = PFN_PHYS(boot_page_mfn) | L1_PROT;
    DEBUG("boot page entry %llx\n", _boot_page_entry);
    _boot_oldpdmfn = virt_to_mfn(start_info.pt_base);
    DEBUG("boot old pd mfn %lx\n", _boot_oldpdmfn);
    DEBUG("boot pd virt %lx\n", dom->pgtables_seg.vstart);
    _boot_pdmfn = dom->p2m_host[PHYS_PFN(dom->pgtables_seg.vstart - dom->parms.virt_base)];
    DEBUG("boot pd mfn %lx\n", _boot_pdmfn);
    _boot_stack = _boot_target + PAGE_SIZE;
    DEBUG("boot stack %lx\n", _boot_stack);
    _boot_start_info = dom->parms.virt_base + PFN_PHYS(dom->start_info_pfn);
    DEBUG("boot start info %lx\n", _boot_start_info);
    _boot_start = dom->parms.virt_entry;
    DEBUG("boot start %lx\n", _boot_start);

    /* Keep only useful entries */
    for (nr_m2p_updates = pfn = 0; pfn < start_info.nr_pages; pfn++)
        if (dom->p2m_host[pfn] != pfn_to_mfn(pfn))
            nr_m2p_updates++;

    m2p_updates = malloc(sizeof(*m2p_updates) * nr_m2p_updates);
    for (i = pfn = 0; pfn < start_info.nr_pages; pfn++)
        if (dom->p2m_host[pfn] != pfn_to_mfn(pfn)) {
            m2p_updates[i].ptr = PFN_PHYS(dom->p2m_host[pfn]) | MMU_MACHPHYS_UPDATE;
            m2p_updates[i].val = pfn;
            i++;
        }

    for (i = 0; i < blk_nb; i++)
        shutdown_blkfront(blk_dev[i]);
    if (net_dev)
        shutdown_netfront(net_dev);
    if (kbd_dev)
        shutdown_kbdfront(kbd_dev);
    stop_kernel();

    /* Update M2P */
    if ((rc = HYPERVISOR_mmu_update(m2p_updates, nr_m2p_updates, NULL, DOMID_SELF)) < 0) {
        xprintk("Could not update M2P\n");
        ASSERT(0);
    }

    xprintk("go!\n");

    /* Jump to trampoline boot page */
    _boot();

    ASSERT(0);

out_remap:
    for (pfn = 0; pfn < allocated; pfn++)
        do_map_frames(pages[pfn], &pages_mfns[pfn], 1, 0, 0, DOMID_SELF, 0, L1_PROT);
out:
    xc_dom_release(dom);
    for (pfn = 0; pfn < allocated; pfn++)
        free_page((void*)pages[pfn]);
    free(pages);
    free(pages_mfns);
    pages = NULL;
    pages_mfns = NULL;
    allocated = 0;
    xc_interface_close(xc_handle );
}
Ejemplo n.º 9
0
static void cpu_initialize_context(unsigned int cpu)
{
    vcpu_guest_context_t ctxt;
    struct thread *idle_thread;

    init_cpu_pda(cpu);

    idle_thread = per_cpu(cpu, idle_thread);
    memset(&ctxt, 0, sizeof(ctxt));
    ctxt.flags = VGCF_IN_KERNEL;
    ctxt.user_regs.ds = __KERNEL_DS;
    ctxt.user_regs.es = 0;
    ctxt.user_regs.fs = 0;
    ctxt.user_regs.gs = 0;
    ctxt.user_regs.ss = __KERNEL_SS;
    ctxt.user_regs.eip = idle_thread->ip;
    ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000;	/* IOPL_RING1 */
    memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
    ctxt.ldt_ents = 0;
    ctxt.gdt_ents = 0;
#ifdef __i386__
    ctxt.user_regs.cs = __KERNEL_CS;
    ctxt.user_regs.esp = idle_thread->sp;
    ctxt.kernel_ss = __KERNEL_SS;
    ctxt.kernel_sp = ctxt.user_regs.esp;
    ctxt.event_callback_cs = __KERNEL_CS;
    ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
    ctxt.failsafe_callback_cs = __KERNEL_CS;
    ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
    ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(start_info.pt_base));
#else /* __x86_64__ */
    ctxt.user_regs.cs = __KERNEL_CS;
    ctxt.user_regs.esp = idle_thread->sp;
    ctxt.kernel_ss = __KERNEL_SS;
    ctxt.kernel_sp = ctxt.user_regs.esp;
    ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
    ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
    ctxt.syscall_callback_eip = 0;
    ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(start_info.pt_base));
    ctxt.gs_base_kernel = (unsigned long)&percpu[cpu];
#endif
    int err = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);
    if (err) {
	char *str;

	switch (err) {
	    case -EINVAL:
		/*
		 * This interface squashes multiple error sources
		 * to one error code.  In particular, an X_EINVAL
		 * code can mean:
		 *
		 * -	the vcpu id is out of range
		 * -	cs or ss are in ring 0
		 * -	cr3 is wrong
		 * -	an entry in the new gdt is above the
		 *	reserved entry
		 * -	a frame underneath the new gdt is bad
		 */
		str = "something is wrong :(";
		break;
	    case -ENOENT:
		str = "no such cpu";
		break;
	    case -ENOMEM:
		str = "no mem to copy ctxt";
		break;
	    case -EFAULT:
		str = "bad address";
		break;
	    case -EEXIST:
		/*
		 * Hmm.  This error is returned if the vcpu has already
		 * been initialized once before in the lifetime of this
		 995 			 * domain.  This is a logic error in the kernel.
		 996 			 */
		str = "already initialized";
		break;
	    default:
		str = "<unexpected>";
		break;
	}

	xprintk("vcpu%d: failed to init: error %d: %s",
		cpu, -err, str);
    }
}