Exemplo n.º 1
0
struct vmm_netport *vmm_netport_alloc(char *name, u32 queue_size)
{
	u32 i;
	struct dlist *l;
	struct vmm_netport *port;

	port = vmm_zalloc(sizeof(struct vmm_netport));
	if (!port) {
		vmm_printf("%s Failed to allocate net port\n", __func__);
		return NULL;
	}

	INIT_LIST_HEAD(&port->head);
	strncpy(port->name, name, VMM_NETPORT_MAX_NAME_SIZE);
	port->queue_size = (queue_size < VMM_NETPORT_MAX_QUEUE_SIZE) ?
				queue_size : VMM_NETPORT_MAX_QUEUE_SIZE;

	port->free_count = port->queue_size;
	INIT_SPIN_LOCK(&port->free_list_lock);
	INIT_LIST_HEAD(&port->free_list);

	for(i = 0; i < port->queue_size; i++) {
		l = &((port->xfer_pool + i)->head);
		list_add_tail(l, &port->free_list);
	}

	return port;
}
Exemplo n.º 2
0
void INIT_EVENT( EVENT_HNDL* pEvent )
{
	pEvent->SetFlag = 0;
	pEvent->WaitCnt = 0;
	init_waitqueue_head( &pEvent->WaitQue);
	INIT_SPIN_LOCK( &pEvent->FlagLock );
}
Exemplo n.º 3
0
static struct vmm_blockdev *__blockdev_alloc(bool alloc_rq)
{
	struct vmm_blockdev *bdev;

	bdev = vmm_zalloc(sizeof(struct vmm_blockdev));
	if (!bdev) {
		return NULL;
	}

	INIT_LIST_HEAD(&bdev->head);
	INIT_MUTEX(&bdev->child_lock);
	bdev->child_count = 0;
	INIT_LIST_HEAD(&bdev->child_list);

	if (alloc_rq) {
		bdev->rq = vmm_zalloc(sizeof(struct vmm_request_queue));
		if (!bdev->rq) {
			vmm_free(bdev);
			return NULL;
		}

		INIT_SPIN_LOCK(&bdev->rq->lock);
	} else {
		bdev->rq = NULL;
	}

	return bdev;
}
Exemplo n.º 4
0
int __cpuinit vmm_scheduler_init(void)
{
	int rc;
	char vcpu_name[VMM_FIELD_NAME_SIZE];
	u32 cpu = vmm_smp_processor_id();
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	/* Reset the scheduler control structure */
	memset(schedp, 0, sizeof(struct vmm_scheduler_ctrl));

	/* Create ready queue (Per Host CPU) */
	schedp->rq = vmm_schedalgo_rq_create();
	if (!schedp->rq) {
		return VMM_EFAIL;
	}
	INIT_SPIN_LOCK(&schedp->rq_lock);

	/* Initialize current VCPU. (Per Host CPU) */
	schedp->current_vcpu = NULL;

	/* Initialize IRQ state (Per Host CPU) */
	schedp->irq_context = FALSE;
	schedp->irq_regs = NULL;

	/* Initialize yield on exit (Per Host CPU) */
	schedp->yield_on_irq_exit = FALSE;

	/* Create timer event and start it. (Per Host CPU) */
	INIT_TIMER_EVENT(&schedp->ev, &vmm_scheduler_timer_event, schedp);

	/* Create idle orphan vcpu with default time slice. (Per Host CPU) */
	vmm_snprintf(vcpu_name, sizeof(vcpu_name), "idle/%d", cpu);
	schedp->idle_vcpu = vmm_manager_vcpu_orphan_create(vcpu_name,
						(virtual_addr_t)&idle_orphan,
						IDLE_VCPU_STACK_SZ,
						IDLE_VCPU_PRIORITY, 
						IDLE_VCPU_TIMESLICE);
	if (!schedp->idle_vcpu) {
		return VMM_EFAIL;
	}

	/* The idle vcpu need to stay on this cpu */
	if ((rc = vmm_manager_vcpu_set_affinity(schedp->idle_vcpu,
						vmm_cpumask_of(cpu)))) {
		return rc;
	}

	/* Kick idle orphan vcpu */
	if ((rc = vmm_manager_vcpu_kick(schedp->idle_vcpu))) {
		return rc;
	}

	/* Start scheduler timer event */
	vmm_timer_event_start(&schedp->ev, 0);

	/* Mark this CPU online */
	vmm_set_cpu_online(cpu, TRUE);

	return VMM_OK;
}
Exemplo n.º 5
0
struct vmm_ringbuf *vmm_ringbuf_alloc(u32 key_size, u32 key_count)
{
	struct vmm_ringbuf *rb;

	rb = vmm_malloc(sizeof(struct vmm_ringbuf));
	if (!rb) {
		return NULL;
	}

	INIT_SPIN_LOCK(&rb->lock);
	rb->keys = vmm_malloc(key_size * key_count);
	if (!rb->keys) {
		goto rb_init_fail;
	}
	rb->key_size = key_size;
	rb->key_count = key_count;
	rb->read_pos = 0;
	rb->write_pos = 0;
	rb->avail_count = 0;

	return rb;

rb_init_fail:
	vmm_free(rb);
	return NULL;
}
Exemplo n.º 6
0
static int arm11mpcore_emulator_probe(struct vmm_guest *guest,
				      struct vmm_emudev *edev,
				      const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct arm11mpcore_priv_state *s;
	u32 parent_irq, timer_irq[2];

	s = vmm_zalloc(sizeof(struct arm11mpcore_priv_state));
	if (!s) {
		rc = VMM_ENOMEM;
		goto arm11mp_probe_done;
	}

	s->num_cpu = guest->vcpu_count;

	rc = vmm_devtree_read_u32(edev->node, "parent_irq", &parent_irq);
	if (rc) {
		goto arm11mp_probe_failed;
	}

	rc = vmm_devtree_read_u32_array(edev->node, "timer_irq",
					timer_irq, array_size(timer_irq));
	if (rc) {
		goto arm11mp_probe_failed;
	}

	/* Allocate and init MPT state */
	if (!(s->mpt = mptimer_state_alloc(guest, edev, s->num_cpu, 1000000,
				 	   timer_irq[0], timer_irq[1]))) {
		rc = VMM_ENOMEM;
		goto arm11mp_probe_failed;
	}

	/* Allocate and init GIC state */
	if (!(s->gic = gic_state_alloc(edev->node->name, guest, 
				       GIC_TYPE_ARM11MPCORE, s->num_cpu,
				       FALSE, 0, 96, parent_irq))) {
		rc = VMM_ENOMEM;
		goto arm11mp_gic_alloc_failed;
	}

	s->guest = guest;
	INIT_SPIN_LOCK(&s->lock);

	edev->priv = s;

	goto arm11mp_probe_done;

arm11mp_gic_alloc_failed:
	mptimer_state_free(s->mpt);

arm11mp_probe_failed:
	vmm_free(s);

arm11mp_probe_done:
	return rc;
}
Exemplo n.º 7
0
static int vsdaemon_telnet_setup(struct vsdaemon *vsd, int argc, char **argv)
{
	int rc = VMM_OK;
	u32 port;
	struct vsdaemon_telnet *tnet;

	if (argc < 1) {
		return VMM_EINVALID;
	}

	port = strtoul(argv[0], NULL, 0);
	if (!vsdaemon_valid_port(port)) {
		return VMM_EINVALID;
	}

	tnet = vmm_zalloc(sizeof(*tnet));
	if (!tnet) {
		return VMM_ENOMEM;
	}

	tnet->port = port;

	tnet->sk = netstack_socket_alloc(NETSTACK_SOCKET_TCP);
	if (!tnet->sk) {
		rc = VMM_ENOMEM;
		goto fail1;
	}

	rc = netstack_socket_bind(tnet->sk, NULL, tnet->port); 
	if (rc) {
		goto fail2;
	}

	rc = netstack_socket_listen(tnet->sk);
	if (rc) {
		goto fail3;
	}

	tnet->active_sk = NULL;

	tnet->tx_buf_head = tnet->tx_buf_tail = tnet->tx_buf_count = 0;
	INIT_SPIN_LOCK(&tnet->tx_buf_lock);

	vsdaemon_transport_set_data(vsd, tnet);

	return VMM_OK;

fail3:
	netstack_socket_close(tnet->sk);
fail2:
	netstack_socket_free(tnet->sk);
fail1:
	vmm_free(tnet);
	return rc;
}
Exemplo n.º 8
0
int __init vmm_profiler_init(void)
{
	pctrl.stat =
	    vmm_malloc(sizeof(struct vmm_profiler_stat) * kallsyms_num_syms);

	if (pctrl.stat == NULL) {
		return VMM_EFAIL;
	}

	vmm_memset(pctrl.stat, 0, sizeof(struct vmm_profiler_stat) *
		   kallsyms_num_syms);

	INIT_SPIN_LOCK(&pctrl.lock);

	return VMM_OK;
}
Exemplo n.º 9
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	u64 stack_start;
	extern struct cpuinfo_x86 cpu_info;

	if (!vcpu->is_normal) {
		/* For orphan vcpu */
		stack_start = vcpu->stack_va + vcpu->stack_sz - sizeof(u64);
		vcpu->regs.rip = vcpu->start_pc;
		vcpu->regs.rip = vcpu->start_pc;
		vcpu->regs.rsp = stack_start;
		vcpu->regs.cs = VMM_CODE_SEG_SEL;
		vcpu->regs.ss = VMM_DATA_SEG_SEL;
		vcpu->regs.rflags = (X86_EFLAGS_IF | X86_EFLAGS_PF | X86_EFLAGS_CF);
	} else {
		if (!vcpu->reset_count) {
			vcpu->arch_priv = vmm_zalloc(sizeof(struct x86_vcpu_priv));

			if (!vcpu->arch_priv)
				return VMM_EFAIL;

			INIT_SPIN_LOCK(&x86_vcpu_priv(vcpu)->lock);

			init_cpu_capabilities(vcpu);

			x86_vcpu_priv(vcpu)->hw_context = vmm_zalloc(sizeof(struct vcpu_hw_context));
			x86_vcpu_priv(vcpu)->hw_context->assoc_vcpu = vcpu;

			x86_vcpu_priv(vcpu)->hw_context->vcpu_emergency_shutdown = arch_vcpu_emergency_shutdown;
			cpu_init_vcpu_hw_context(&cpu_info, x86_vcpu_priv(vcpu)->hw_context);

			/*
			 * This vcpu has to run VMM code before and after guest mode
			 * switch. Prepare for the same.
			 */
			stack_start = vcpu->stack_va + vcpu->stack_sz - sizeof(u64);
			vcpu->regs.rip = (u64)arch_guest_vcpu_trampoline;
			vcpu->regs.rsp = stack_start;
			vcpu->regs.cs = VMM_CODE_SEG_SEL;
			vcpu->regs.ss = VMM_DATA_SEG_SEL;
			vcpu->regs.rdi = (u64)vcpu; /* this VCPU as parameter */
			vcpu->regs.rflags = (X86_EFLAGS_IF | X86_EFLAGS_PF | X86_EFLAGS_CF);
		}
	}

	return VMM_OK;
}
Exemplo n.º 10
0
static int virtio_blk_connect(struct virtio_device *dev, 
			      struct virtio_emulator *emu)
{
	int rc;
	char *attr;
	struct virtio_blk_dev *bdev;

	bdev = vmm_zalloc(sizeof(struct virtio_blk_dev));
	if (!bdev) {
		vmm_printf("Failed to allocate virtio block device....\n");
		return VMM_ENOMEM;
	}
	bdev->vdev = dev;

	bdev->blk_client.notifier_call = &virtio_blk_notification;
	bdev->blk_client.priority = 0;
	rc = vmm_blockdev_register_client(&bdev->blk_client);
	if (rc) {
		vmm_free(bdev);
		return rc;
	}

	INIT_SPIN_LOCK(&bdev->blk_lock);

	attr = vmm_devtree_attrval(dev->edev->node, "blkdev");
	if (attr) {
		if (strlcpy(bdev->blk_name,attr, sizeof(bdev->blk_name)) >=
		    sizeof(bdev->blk_name)) {
			vmm_free(bdev);
			return VMM_EOVERFLOW;
		}
		bdev->blk = vmm_blockdev_find(bdev->blk_name);
	} else {
		bdev->blk_name[0] = 0;
		bdev->blk = NULL;
	}

	bdev->config.capacity = (bdev->blk) ? bdev->blk->num_blocks : 0;
	bdev->config.seg_max = VIRTIO_BLK_DISK_SEG_MAX,
	bdev->config.blk_size = 
		(bdev->blk) ? bdev->blk->block_size : VIRTIO_BLK_SECTOR_SIZE;

	dev->emu_data = bdev;

	return VMM_OK;
}
/**
 * @brief
 * This function initializes the Timer structure variables
 */
void init_sw_timer(void )
{
    /* TODO :Later, this function should be made to read values
     * from Device Tree */
    int iter;

    timer_cpu_info.expires_next.tval64 = TIMEVAL_MAX;
    INIT_LIST_HEAD(&timer_cpu_info.pending_routines);
    timer_cpu_info.num_events = 0;

    for(iter = 0; iter < MAX_NUM_OF_TIMERS; iter++)
    {
        timer_cpu_info.clock_info[iter].cpu_info = &timer_cpu_info;
        timer_cpu_info.clock_info[iter].clock_id = iter;
        INIT_LIST_HEAD(&timer_cpu_info.clock_info[iter].active);
        INIT_SPIN_LOCK(&timer_cpu_info.clock_info[iter].spin_lock);
        timer_cpu_info.clock_info[iter].clock_period.tval64 = 
                                                        get_clock_period();
        timer_cpu_info.clock_info[iter].timer_period.tval64 = 
                                                        get_timer_period();
    }

    /* TODO :Later this should be made to obtain from Device Tree */
    timer_cpu_info.free_run_clock_id = 0;
    timer_cpu_info.tick_timer_id = 1;

#define SW_FREE_RUNNING_CNTR    timer_cpu_info.free_run_clock_id
#define SW_TICKTIMER            timer_cpu_info.tick_timer_id


    sw_timer_info.sw_timestamp.tval64 = 0;
    sw_timer_info.abs_cycles_count = 0;
    sw_timer_info.cycles_count_new = 0;
    sw_timer_info.cycles_count_old = 0;
    sw_timer_info.timer_period.tval64 = get_timer_period();
    sw_timer_info.clock_period.tval64 = get_clock_period();

}
Exemplo n.º 12
0
Arquivo: ps2.c Projeto: PyroOS/Pyro
	int			nIrq;
    char	    pBuffer[ PS2_BUF_SIZE ];
    atomic_t	nOutPos;
    atomic_t	nInPos;
    atomic_t	nBytesReceived;
    atomic_t	nOpenCount;
    int			nDevHandle;
    int			nIrqHandle;
} PS2_Port_s;

static PS2_Port_s g_sKbdPort;
static PS2_Port_s g_sAuxPort;
static uint8 g_nKbdLedStatus;
static int g_nDevNum = 0;

SpinLock_s g_sLock = INIT_SPIN_LOCK( "ps2_lock" );

#define PS2_TIMEOUT 10000

/* Wait until data is available in the output (from device) buffer */
/* Needs to be called with g_sLock held */
static status_t ps2_wait_read()
{
	
	int i = 0;
	while( ( ~inb( PS2_STATUS_REG ) & PS2_STS_OBF ) && ( i < PS2_TIMEOUT ) )
	{
		udelay( 50 );
		i++;
	}
	if( i == PS2_TIMEOUT )
Exemplo n.º 13
0
struct usb_device *usb_alloc_device(struct usb_device *parent,
				    struct usb_hcd *hcd, unsigned port)
{
	int i;
	irq_flags_t flags;
	struct usb_device *dev;

	/* Sanity checks */
	if (parent) {
		if (USB_MAXCHILDREN <= port) {
			return NULL;
		}
		vmm_spin_lock_irqsave(&parent->children_lock, flags);
		if (parent->children[port]) {
			vmm_spin_unlock_irqrestore(&parent->children_lock,
						   flags);
			return NULL;
		}
		vmm_spin_unlock_irqrestore(&parent->children_lock, flags);
	}

	/* Alloc new device */
	dev = vmm_zalloc(sizeof(*dev));
	if (!dev) {
		return NULL;
	}

	/* Initialize devdrv context */
	vmm_devdrv_initialize_device(&dev->dev);
	dev->dev.autoprobe_disabled = TRUE;
	dev->dev.parent = (parent) ? &parent->dev : NULL;
	dev->dev.bus = &usb_bus_type;
	dev->dev.type = &usb_device_type;

	/* Increment reference count of HCD */
	usb_ref_hcd(hcd);

	/* Root hubs aren't true devices, so don't allocate HCD resources */
	if (hcd->driver->alloc_dev && parent &&
		!hcd->driver->alloc_dev(hcd, dev)) {
		usb_dref_hcd(hcd);
		vmm_free(dev);
		return NULL;
	}

	/* Update device state */
	dev->state = USB_STATE_NOTATTACHED;

	/* Update device name, devpath, route, and level */
	if (unlikely(!parent)) {
		dev->devpath[0] = '0';
		dev->route = 0;
		dev->level = 0;
		vmm_snprintf(dev->dev.name, sizeof(dev->dev.name),
			     "usb%d", hcd->bus_num);
	} else {
		if (parent->level == 0) {
			/* Root hub port is not counted in route string
			 * because it is always zero.
			 */
			vmm_snprintf(dev->devpath, sizeof(dev->devpath),
				     "%d", port);
		} else {
			vmm_snprintf(dev->devpath, sizeof(dev->devpath),
				     "%s.%d", parent->devpath, port);
		}
		/* Route string assumes hubs have less than 16 ports */
		if (port < 15) {
			dev->route = parent->route +
				(port << (parent->level * 4));
		} else {
			dev->route = parent->route +
				(15 << (parent->level * 4));
		}
		dev->level = parent->level + 1;
		vmm_snprintf(dev->dev.name, sizeof(dev->dev.name),
			     "usb%d-%s", hcd->bus_num, dev->devpath);
		/* FIXME: hub driver sets up TT records */
		/* Update parent device */
		vmm_spin_lock_irqsave(&parent->children_lock, flags);
		parent->children[port] = dev;
		vmm_spin_unlock_irqrestore(&parent->children_lock, flags);
	}

	/* Update rest of the device fields */
	dev->portnum = port;
	dev->hcd = hcd;
	dev->maxchild = 0;
	INIT_SPIN_LOCK(&dev->children_lock);
	for (i = 0; i < USB_MAXCHILDREN; i++) {
		dev->children[i] = NULL;
	}

	/* Assign device number based on HCD device bitmap
	 * Note: Device number starts from 1.
	 * Note: Device number 0 is default device.
	 */
	vmm_spin_lock_irqsave(&hcd->devicemap_lock, flags);
	dev->devnum = 0;
	for (i = 0; i < USB_MAXCHILDREN; i++) {
		if (!test_bit(i, hcd->devicemap)) {
			__set_bit(i, hcd->devicemap);
			dev->devnum = i + 1;
			break;
		}
	}
	i = dev->devnum;
	vmm_spin_unlock_irqrestore(&hcd->devicemap_lock, flags);
	if (i == 0) {
		usb_dref_hcd(hcd);
		vmm_free(dev);
		return NULL;
	}

	return dev;
}
Exemplo n.º 14
0
int uip_netport_init(void)
{
	struct vmm_netswitch *nsw;
	struct uip_port_state *s = &uip_port_state;
	struct uip_fw_netif *netif;
	uip_ipaddr_t ipaddr;
	char tname[64];

	uip_buf = vmm_malloc(UIP_BUFSIZE + 2);
	if(!uip_buf) {
		vmm_panic("%s: uip_buf alloc failed\n", __func__);
	}

	INIT_SPIN_LOCK(&s->lock);
	INIT_LIST_HEAD(&s->rxbuf);
	INIT_COMPLETION(&s->rx_possible);

	/* Get the first netswitch */
	nsw = vmm_netswitch_get(0);
	if(!nsw) {
		vmm_panic("No netswitch found\n");
	}
	/* Create a port-name */
	vmm_sprintf(tname, "%s-uip", nsw->name); 
	/* Allocate a netport for this netswitch */
	s->port = vmm_netport_alloc(tname);
	if(!s->port) {
		vmm_printf("UIP->netport alloc failed\n");
		return VMM_EFAIL;
	}
	/* Allocate a uip_fw_netif */ 
	netif = vmm_malloc(sizeof(struct uip_fw_netif));
	if(!netif) {
		vmm_printf("UIP->netif alloc failed\n");
		return VMM_EFAIL;
	}
	/* Register the netport */
	s->port->mtu = UIP_BUFSIZE;
	s->port->link_changed = uip_set_link;
	s->port->can_receive = uip_can_receive;
	s->port->switch2port_xfer = uip_switch2port_xfer;
	s->port->priv = s;
	s->netif = netif;

	vmm_netport_register(s->port);
	/* Attach with the netswitch */
	vmm_netswitch_port_add(nsw, s->port);
	/* Notify our ethernet address */
	uip_setethaddr(((struct uip_eth_addr *)(s->port->macaddr)));
	/* Generate an IP address */
	uip_ipaddr(ipaddr, 192,168,0,1);
	uip_fw_setipaddr(netif, ipaddr);
	uip_ipaddr(ipaddr, 255,255,255,0);
	uip_fw_setnetmask(netif, ipaddr);
	/* Register the netif with uip stack */
	netif->output = &uip_netport_output;
	netif->priv = s;
	uip_fw_register(netif);
	/* Set this interface as default one */
	uip_fw_default(netif);
	return 0;
}
Exemplo n.º 15
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 cpuid = 0;
	const char *attr;
	irq_flags_t flags;
	u32 phys_timer_irq, virt_timer_irq;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	arm_regs(vcpu)->sp = vcpu->stack_va + vcpu->stack_sz - 8;
	if (!vcpu->is_normal) {
		arm_regs(vcpu)->pstate = PSR_MODE64_EL2h;
		arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
		return VMM_OK;
	}

	/* Following initialization for normal VCPUs only */
	rc = vmm_devtree_read_string(vcpu->node, 
			VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr);
	if (rc) {
		goto fail;
	}
	if (strcmp(attr, "armv7a,cortex-a8") == 0) {
		cpuid = ARM_CPUID_CORTEXA8;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a9") == 0) {
		cpuid = ARM_CPUID_CORTEXA9;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a15") == 0) {
		cpuid = ARM_CPUID_CORTEXA15;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a7") == 0) {
		cpuid = ARM_CPUID_CORTEXA7;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv8,generic") == 0) {
		cpuid = ARM_CPUID_ARMV8;
	} else {
		rc = VMM_EINVALID;
		goto fail;
	}
	if (arm_regs(vcpu)->pstate == PSR_MODE32) {
		/* Check if the host supports A32 mode @ EL1 */
		if (!cpu_supports_el1_a32()) {
			vmm_printf("Host does not support AArch32 mode\n");
			rc = VMM_ENOTAVAIL;
			goto fail;
		}
		arm_regs(vcpu)->pstate |= PSR_ZERO_MASK;
		arm_regs(vcpu)->pstate |= PSR_MODE32_SUPERVISOR;
	} else {
		arm_regs(vcpu)->pstate |= PSR_MODE64_DEBUG_DISABLED;
		arm_regs(vcpu)->pstate |= PSR_MODE64_EL1h;
	}
	arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_IRQ_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_FIQ_DISABLED;

	/* First time initialization of private context */
	if (!vcpu->reset_count) {
		/* Alloc private context */
		vcpu->arch_priv = vmm_zalloc(sizeof(struct arm_priv));
		if (!vcpu->arch_priv) {
			rc = VMM_ENOMEM;
			goto fail;
		}
		/* Setup CPUID value expected by VCPU in MIDR register
		 * as-per HW specifications.
		 */
		arm_priv(vcpu)->cpuid = cpuid;
		/* Initialize VCPU features */
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA7:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA15:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_ARMV8:
			arm_set_feature(vcpu, ARM_FEATURE_V8);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			break;
		default:
			break;
		};
		/* Some features automatically imply others: */
		if (arm_feature(vcpu, ARM_FEATURE_V7)) {
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_MPIDR);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_V6K);
			} else {
				arm_set_feature(vcpu, ARM_FEATURE_V6);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6K)) {
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_MVFR);
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6)) {
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V5)) {
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
		}
		if (arm_feature(vcpu, ARM_FEATURE_M)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP4)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP3)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
		}
		if (arm_feature(vcpu, ARM_FEATURE_LPAE)) {
			arm_set_feature(vcpu, ARM_FEATURE_PXN);
		}
		/* Initialize Hypervisor Configuration */
		INIT_SPIN_LOCK(&arm_priv(vcpu)->hcr_lock);
		arm_priv(vcpu)->hcr =  (HCR_TSW_MASK |
					HCR_TACR_MASK |
					HCR_TIDCP_MASK |
					HCR_TSC_MASK |
					HCR_TWE_MASK |
					HCR_TWI_MASK |
					HCR_AMO_MASK |
					HCR_IMO_MASK |
					HCR_FMO_MASK |
					HCR_SWIO_MASK |
					HCR_VM_MASK);
		if (!(arm_regs(vcpu)->pstate & PSR_MODE32)) {
			arm_priv(vcpu)->hcr |= HCR_RW_MASK;
		}
		/* Initialize Coprocessor Trap Register */
		arm_priv(vcpu)->cptr = CPTR_TTA_MASK;
		arm_priv(vcpu)->cptr |= CPTR_TFP_MASK;
		/* Initialize Hypervisor System Trap Register */
		arm_priv(vcpu)->hstr = 0;
		/* Cleanup VGIC context first time */
		arm_vgic_cleanup(vcpu);
	}

	/* Clear virtual exception bits in HCR */
	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	arm_priv(vcpu)->hcr &= ~(HCR_VSE_MASK | 
				 HCR_VI_MASK | 
				 HCR_VF_MASK);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	/* Set last host CPU to invalid value */
	arm_priv(vcpu)->last_hcpu = 0xFFFFFFFF;

	/* Initialize sysregs context */
	rc = cpu_vcpu_sysregs_init(vcpu, cpuid);
	if (rc) {
		goto fail_sysregs_init;
	}

	/* Initialize VFP context */
	rc = cpu_vcpu_vfp_init(vcpu);
	if (rc) {
		goto fail_vfp_init;
	}

	/* Initialize generic timer context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_phys_irq",
					 &phys_timer_irq)) {
			phys_timer_irq = 0;
		}
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_virt_irq",
					 &virt_timer_irq)) {
			virt_timer_irq = 0;
		}
		rc = generic_timer_vcpu_context_init(vcpu,
						&arm_gentimer_context(vcpu),
						phys_timer_irq,
						virt_timer_irq);
		if (rc) {
			goto fail_gentimer_init;
		}
	}

	return VMM_OK;

fail_gentimer_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_vfp_deinit(vcpu);
	}
fail_vfp_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_sysregs_deinit(vcpu);
	}
fail_sysregs_init:
	if (!vcpu->reset_count) {
		vmm_free(vcpu->arch_priv);
		vcpu->arch_priv = NULL;
	}
fail:
	return rc;
}
Exemplo n.º 16
0
int vmm_vcpu_irq_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 ite, irq_count;
	struct vmm_timer_event *ev;

	/* Sanity Checks */
	if (!vcpu) {
		return VMM_EFAIL;
	}

	/* For Orphan VCPU just return */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Get irq count */
	irq_count = arch_vcpu_irq_count(vcpu);

	/* Only first time */
	if (!vcpu->reset_count) {
		/* Clear the memory of irq */
		memset(&vcpu->irqs, 0, sizeof(struct vmm_vcpu_irqs));

		/* Allocate memory for flags */
		vcpu->irqs.irq =
		    vmm_zalloc(sizeof(struct vmm_vcpu_irq) * irq_count);
		if (!vcpu->irqs.irq) {
			return VMM_ENOMEM;
		}

		/* Create wfi_timeout event */
		ev = vmm_zalloc(sizeof(struct vmm_timer_event));
		if (!ev) {
			vmm_free(vcpu->irqs.irq);
			vcpu->irqs.irq = NULL;
			return VMM_ENOMEM;
		}
		vcpu->irqs.wfi.priv = ev;

		/* Initialize wfi lock */
		INIT_SPIN_LOCK(&vcpu->irqs.wfi.lock);

		/* Initialize wfi timeout event */
		INIT_TIMER_EVENT(ev, vcpu_irq_wfi_timeout, vcpu);
	}

	/* Save irq count */
	vcpu->irqs.irq_count = irq_count;

	/* Set execute pending to zero */
	arch_atomic_write(&vcpu->irqs.execute_pending, 0);

	/* Set default assert & deassert counts */
	arch_atomic64_write(&vcpu->irqs.assert_count, 0);
	arch_atomic64_write(&vcpu->irqs.execute_count, 0);
	arch_atomic64_write(&vcpu->irqs.deassert_count, 0);

	/* Reset irq processing data structures for VCPU */
	for (ite = 0; ite < irq_count; ite++) {
		vcpu->irqs.irq[ite].reason = 0;
		arch_atomic_write(&vcpu->irqs.irq[ite].assert, DEASSERTED);
	}

	/* Setup wait for irq context */
	vcpu->irqs.wfi.state = FALSE;
	rc = vmm_timer_event_stop(vcpu->irqs.wfi.priv);
	if (rc != VMM_OK) {
		vmm_free(vcpu->irqs.irq);
		vcpu->irqs.irq = NULL;
		vmm_free(vcpu->irqs.wfi.priv);
		vcpu->irqs.wfi.priv = NULL;
	}

	return rc;
}
Exemplo n.º 17
0
#include <pyro/kernel.h>
#include <pyro/spinlock.h>


#include <macros.h>


enum
{
	PCI_METHOD_1 = 0x01,
	PCI_METHOD_2 = 0x02,
	PCI_METHOD_BIOS = 0x04
};

uint32 g_nPCIMethod;
SpinLock_s g_sPCILock = INIT_SPIN_LOCK( "simple_pci_lock" );

/** 
 * \par Description: Checks wether a PCI bus is present and selects the access ethod
 * \par Note:
 * \par Warning:
 * \param
 * \return
 * \sa
 * \author	Kurt Skauen ([email protected])
 *****************************************************************************/

void simple_pci_init( void )
{
	g_nPCIMethod = 0;
	struct RMREGS rm;
Exemplo n.º 18
0
static int gpex_emulator_probe(struct vmm_guest *guest,
			       struct vmm_emudev *edev,
			       const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK, i;
	char name[64];
	struct gpex_state *s;
	struct pci_class *class;

	s = vmm_zalloc(sizeof(struct gpex_state));
	if (!s) {
		GPEX_LOG(LVL_ERR, "Failed to allocate gpex state.\n");
		rc = VMM_EFAIL;
		goto _failed;
	}

	s->node = edev->node;
	s->guest = guest;
	s->controller = vmm_zalloc(sizeof(struct pci_host_controller));
	if (!s->controller) {
		GPEX_LOG(LVL_ERR, "Failed to allocate pci host contoller"
					"for gpex.\n");
		goto _failed;
	}
	INIT_MUTEX(&s->lock);
	INIT_LIST_HEAD(&s->controller->head);
	INIT_LIST_HEAD(&s->controller->attached_buses);
	INIT_SPIN_LOCK(&s->controller->lock);

	/* initialize class */
	class = PCI_CONTROLLER_TO_CLASS(s->controller);

	INIT_SPIN_LOCK(&class->lock);
	class->conf_header.vendor_id = PCI_VENDOR_ID_REDHAT;
	class->conf_header.device_id = PCI_DEVICE_ID_REDHAT_PCIE_HOST;
	class->config_read = gpex_config_read;
	class->config_write = gpex_config_write;

	rc = vmm_devtree_read_u32(edev->node, "nr_buses",
				  &s->controller->nr_buses);
	if (rc) {
		GPEX_LOG(LVL_ERR, "Failed to get fifo size in guest DTS.\n");
		goto _failed;
	}

	GPEX_LOG(LVL_VERBOSE, "%s: %d busses on this controller.\n",
		   __func__, s->controller->nr_buses);

	for (i = 0; i < s->controller->nr_buses; i++) {
		if ((rc = pci_emu_attach_new_pci_bus(s->controller, i))
		    != VMM_OK) {
			GPEX_LOG(LVL_ERR, "Failed to attach PCI bus %d\n",
				   i+1);
			goto _failed;
		}
	}

	strlcpy(name, guest->name, sizeof(name));
	strlcat(name, "/", sizeof(name));
	if (strlcat(name, edev->node->name, sizeof(name)) >= sizeof(name)) {
		rc = VMM_EOVERFLOW;
		goto _failed;
	}

	edev->priv = s;

	vmm_mutex_lock(&s->lock);

	if ((rc = pci_emu_register_controller(s->node, s->guest,
				s->controller)) != VMM_OK) {
			GPEX_LOG(LVL_ERR,
				   "Failed to attach PCI controller.\n");
			goto _controller_failed;
	}

	vmm_mutex_unlock(&s->lock);

	GPEX_LOG(LVL_VERBOSE, "Success.\n");

	goto _done;

_controller_failed:
	vmm_mutex_unlock(&s->lock);

_failed:
	if (s && s->controller) vmm_free(s->controller);
	if (s) vmm_free(s);

_done:
	return rc;
}
Exemplo n.º 19
0
Arquivo: serial.c Projeto: PyroOS/Pyro
#include <posix/errno.h>
#include <posix/ioctls.h>
#include <posix/fcntl.h>
#include <posix/termios.h>

#include <pyro/kernel.h>
#include <pyro/device.h>
#include <pyro/semaphore.h>
#include <pyro/spinlock.h>
#include <pyro/irq.h>

#include "serial_reg.h"

#include <macros.h>

static SpinLock_s g_sSPinLock = INIT_SPIN_LOCK( "ser_slock" );

#define RECV_BUF_SIZE 4096

static int g_nIRQHandle;

typedef struct
{
    struct termios sp_sTermios;
    int	 	 sp_nPortBase;
    int	 	 sp_nBaudRate;
    sem_id 	 sp_hRecvMutex;
    sem_id 	 sp_hRecvWaitQueue;
    int	 	 sp_nRecvInPos;
    int	 	 sp_nRecvOutPos;
    int	 	 sp_nRecvSize;	// Number of bytes in sp_anReceiveBuffer