示例#1
0
static int waitqueue2_do_test(struct vmm_chardev *cdev)
{
	u64 i, timeout, etimeout, tstamp;
	int rc, failures = 0;

	/* Try waitqueue with timeout few times */
	for (i = 1; i <= 10; i++) {
		/* Save current timestamp */
		tstamp = vmm_timer_timestamp();

		/* Lock mutex with timeout */
		etimeout = i * SLEEP_MSECS * 1000000ULL;
		timeout = etimeout;
		rc = vmm_waitqueue_sleep_timeout(&wq0, &timeout);
		if (rc != VMM_ETIMEDOUT) {
			vmm_cprintf(cdev, "error: did not timeout\n");
			failures++;
		}

		/* Check elapsed time */
		tstamp = vmm_timer_timestamp() - tstamp;
		if (tstamp < etimeout) {
			vmm_cprintf(cdev, "error: time elapsed %"PRIu64
				    " nanosecs instead of %"PRIu64" nanosecs",
				    tstamp, etimeout);
			failures++;
		}
	}

	return (failures) ? VMM_EFAIL : 0;
}
示例#2
0
static int mutex9_do_test(struct vmm_chardev *cdev)
{
	u64 i, timeout, etimeout, tstamp;
	int rc, failures = 0;

	/* Initialise the shared_data to zero */
	shared_data = 0;

	/* Start worker */
	vmm_threads_start(workers[0]);

	/*
	 * The worker thread has now been started and should take ownership
	 * of the mutex. We wait a while and check that shared_data has been
	 * modified, which proves to us that the thread has taken the mutex.
	 */
	vmm_msleep(SLEEP_MSECS*10);

	/* Check shared data. It should be one. */
	if (shared_data != 1) {
		vmm_cprintf(cdev, "error: shared data unmodified\n");
		failures++;
	}

	/* Try mutex lock with timeout few times */
	for (i = 1; i <= 10; i++) {
		/* Save current timestamp */
		tstamp = vmm_timer_timestamp();

		/* Lock mutex with timeout */
		etimeout = i * SLEEP_MSECS * 1000000ULL;
		timeout = etimeout;
		rc = vmm_mutex_lock_timeout(&mutex1, &timeout);
		if (rc != VMM_ETIMEDOUT) {
			vmm_cprintf(cdev, "error: did not timeout\n");
			failures++;
		}

		/* Check elapsed time */
		tstamp = vmm_timer_timestamp() - tstamp;
		if (tstamp < etimeout) {
			vmm_cprintf(cdev, "error: time elapsed %"PRIu64
				    " nanosecs instead of %"PRIu64" nanosecs",
				    tstamp, etimeout);
			failures++;
		}
	}

	/* Stop worker thread. */
	vmm_threads_stop(workers[0]);

	return (failures) ? VMM_EFAIL : 0;
}
示例#3
0
static u32 pl031_get_count(struct pl031_state *s)
{
	/* This assumes qemu_get_clock_ns returns the time since 
	 * the machine was created.
	 */
	return s->tick_offset + 
	(u32)udiv64(vmm_timer_timestamp() - s->tick_tstamp, 1000000000);
}
示例#4
0
void usb_set_device_state(struct usb_device *udev,
			  enum usb_device_state new_state)
{
	irq_flags_t flags;

	vmm_spin_lock_irqsave(&device_state_lock, flags);
	if (udev->state == USB_STATE_NOTATTACHED) {
		;	/* do nothing */
	} else if (new_state != USB_STATE_NOTATTACHED) {
		if (udev->state == USB_STATE_SUSPENDED &&
			new_state != USB_STATE_SUSPENDED)
			udev->active_duration -= vmm_timer_timestamp();
		else if (new_state == USB_STATE_SUSPENDED &&
				udev->state != USB_STATE_SUSPENDED)
			udev->active_duration += vmm_timer_timestamp();
		udev->state = new_state;
	} else {
		recursively_mark_NOTATTACHED(udev);
	}
	vmm_spin_unlock_irqrestore(&device_state_lock, flags);
}
示例#5
0
/* Must be called with sp805->lock held */
static u32 _sp805_reg_value(struct sp805_state *sp805)
{
	u64 load = vmm_timer_timestamp();

	if (!_sp805_enabled(sp805)) {
		/* Interrupt disabled: counter is disabled */
		return sp805->freezed_value;
	}

	if (likely(load > sp805->timestamp)) {
		load = load - sp805->timestamp;
	} else {
		load = sp805->timestamp + ~load + 1;
	}

	return udiv64(load, 1000);
}
示例#6
0
/* Must be called with sp805->lock held */
static int _sp805_counter_reload(struct sp805_state *sp805)
{
	int rc = VMM_OK;
	u64 reload = (sp805->load + 1) * 1000;

	if (!_sp805_enabled(sp805)) {
		sp805_debug(sp805, "Disabled, event not started.\n");
		return VMM_OK;
	}

	sp805->timestamp = vmm_timer_timestamp();
	vmm_timer_event_stop(&sp805->event);
	rc = vmm_timer_event_start(&sp805->event, reload);
	sp805_debug(sp805, "Counter started: IRQ in %d ms (%d)\n",
		    udiv32(sp805->load + 1, 1000), rc);

	return rc;
}
示例#7
0
static u8_t ping_recv(void *arg, struct raw_pcb *pcb, 
			struct pbuf *p, ip_addr_t *addr)
{
	struct ip_hdr *iphdr;
	struct icmp_echo_hdr *iecho;
	LWIP_UNUSED_ARG(arg);
	LWIP_UNUSED_ARG(pcb);
	LWIP_UNUSED_ARG(addr);

	LWIP_ASSERT("p != NULL", p != NULL);

	if ((p->tot_len >= (PBUF_IP_HLEN + sizeof(struct icmp_echo_hdr)))) {
		iphdr = (struct ip_hdr *)p->payload;
		iecho = (struct icmp_echo_hdr *)(p->payload + (IPH_HL(iphdr) * 4));
		if ((lns.ping_reply != NULL) &&
		    (iecho->id == PING_ID) && 
		    (iecho->seqno == htons(lns.ping_seq_num))) {
			lns.ping_recv_tstamp = vmm_timer_timestamp();

			lns.ping_reply->ripaddr[0] = ip4_addr1(&lns.ping_addr);
			lns.ping_reply->ripaddr[1] = ip4_addr2(&lns.ping_addr);
			lns.ping_reply->ripaddr[2] = ip4_addr3(&lns.ping_addr);
			lns.ping_reply->ripaddr[3] = ip4_addr4(&lns.ping_addr);
			lns.ping_reply->ttl = IPH_TTL(iphdr);
			lns.ping_reply->len = p->tot_len - (IPH_HL(iphdr) * 4);
			lns.ping_reply->seqno = lns.ping_seq_num;

			vmm_completion_complete(&lns.ping_done);

			/* Free the pbuf */
			pbuf_free(p);

			/* Eat the packet. lwIP should not process it. */
			return 1;
		}
	}

	/* Don't eat the packet. Let lwIP process it. */
	return 0;
}
示例#8
0
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
	int i;
	irq_flags_t flags;

	vmm_spin_lock_irqsave(&udev->children_lock, flags);
	for (i = 0; i < udev->maxchild; ++i) {
		if (!udev->children[i]) {
			continue;
		}
		vmm_spin_unlock_irqrestore(&udev->children_lock, flags);
		recursively_mark_NOTATTACHED(udev->children[i]);
		vmm_spin_lock_irqsave(&udev->children_lock, flags);
	}
	vmm_spin_unlock_irqrestore(&udev->children_lock, flags);

	if (udev->state == USB_STATE_SUSPENDED) {
		udev->active_duration -= vmm_timer_timestamp();
	}

	udev->state = USB_STATE_NOTATTACHED;
}
示例#9
0
int vmm_scheduler_state_change(struct vmm_vcpu *vcpu, u32 new_state)
{
	u64 tstamp;
	int rc = VMM_OK;
	irq_flags_t flags;
	bool preempt = FALSE;
	u32 chcpu = vmm_smp_processor_id(), vhcpu;
	struct vmm_scheduler_ctrl *schedp;
	u32 current_state;

	if (!vcpu) {
		return VMM_EFAIL;
	}

	vmm_write_lock_irqsave_lite(&vcpu->sched_lock, flags);

	vhcpu = vcpu->hcpu;
	schedp = &per_cpu(sched, vhcpu);

	current_state = arch_atomic_read(&vcpu->state);

	switch(new_state) {
	case VMM_VCPU_STATE_UNKNOWN:
		/* Existing VCPU being destroyed */
		rc = vmm_schedalgo_vcpu_cleanup(vcpu);
		break;
	case VMM_VCPU_STATE_RESET:
		if (current_state == VMM_VCPU_STATE_UNKNOWN) {
			/* New VCPU */
			rc = vmm_schedalgo_vcpu_setup(vcpu);
		} else if (current_state != VMM_VCPU_STATE_RESET) {
			/* Existing VCPU */
			/* Make sure VCPU is not in a ready queue */
			if ((schedp->current_vcpu != vcpu) &&
			    (current_state == VMM_VCPU_STATE_READY)) {
				if ((rc = rq_detach(schedp, vcpu))) {
					break;
				}
			}
			/* Make sure current VCPU is preempted */
			if ((schedp->current_vcpu == vcpu) &&
			    (current_state == VMM_VCPU_STATE_RUNNING)) {
				preempt = TRUE;
			}
			vcpu->reset_count++;
			if ((rc = arch_vcpu_init(vcpu))) {
				break;
			}
			if ((rc = vmm_vcpu_irq_init(vcpu))) {
				break;
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_READY:
		if ((current_state == VMM_VCPU_STATE_RESET) ||
		    (current_state == VMM_VCPU_STATE_PAUSED)) {
			/* Enqueue VCPU to ready queue */
			rc = rq_enqueue(schedp, vcpu);
			if (!rc && (schedp->current_vcpu != vcpu)) {
				preempt = rq_prempt_needed(schedp);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_PAUSED:
	case VMM_VCPU_STATE_HALTED:
		if ((current_state == VMM_VCPU_STATE_READY) ||
		    (current_state == VMM_VCPU_STATE_RUNNING)) {
			/* Expire timer event if current VCPU 
			 * is paused or halted 
			 */
			if (schedp->current_vcpu == vcpu) {
				preempt = TRUE;
			} else if (current_state == VMM_VCPU_STATE_READY) {
				/* Make sure VCPU is not in a ready queue */
				rc = rq_detach(schedp, vcpu);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	}

	if (rc == VMM_OK) {
		tstamp = vmm_timer_timestamp();
		switch (current_state) {
		case VMM_VCPU_STATE_READY:
			vcpu->state_ready_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_RUNNING:
			vcpu->state_running_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_PAUSED:
			vcpu->state_paused_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_HALTED:
			vcpu->state_halted_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		default:
			break; 
		}
		if (new_state == VMM_VCPU_STATE_RESET) {
			vcpu->state_ready_nsecs = 0;
			vcpu->state_running_nsecs = 0;
			vcpu->state_paused_nsecs = 0;
			vcpu->state_halted_nsecs = 0;
			vcpu->reset_tstamp = tstamp;
		}
		arch_atomic_write(&vcpu->state, new_state);
		vcpu->state_tstamp = tstamp;
	}

	vmm_write_unlock_irqrestore_lite(&vcpu->sched_lock, flags);

	if (preempt && schedp->current_vcpu) {
		if (chcpu == vhcpu) {
			if (schedp->current_vcpu->is_normal) {
				schedp->yield_on_irq_exit = TRUE;
			} else if (schedp->irq_context) {
				vmm_scheduler_preempt_orphan(schedp->irq_regs);
			} else {
				arch_vcpu_preempt_orphan();
			}
		} else {
			vmm_smp_ipi_async_call(vmm_cpumask_of(vhcpu),
						scheduler_ipi_resched,
						NULL, NULL, NULL);
		}
	}

	return rc;
}
示例#10
0
static void vmm_scheduler_next(struct vmm_scheduler_ctrl *schedp,
			       struct vmm_timer_event *ev, 
			       arch_regs_t *regs)
{
	irq_flags_t cf, nf;
	u64 tstamp = vmm_timer_timestamp();
	struct vmm_vcpu *next = NULL; 
	struct vmm_vcpu *tcurrent = NULL, *current = schedp->current_vcpu;
	u32 current_state;

	/* First time scheduling */
	if (!current) {
		next = rq_dequeue(schedp);
		if (!next) {
			/* This should never happen !!! */
			vmm_panic("%s: no vcpu to switch to.\n", __func__);
		}

		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);

		arch_vcpu_switch(NULL, next, regs);
		next->state_ready_nsecs += tstamp - next->state_tstamp;
		arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
		next->state_tstamp = tstamp;
		schedp->current_vcpu = next;
		vmm_timer_event_start(ev, next->time_slice);

		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);

		return;
	}

	/* Normal scheduling */
	vmm_write_lock_irqsave_lite(&current->sched_lock, cf);

	current_state = arch_atomic_read(&current->state);

	if (current_state & VMM_VCPU_STATE_SAVEABLE) {
		if (current_state == VMM_VCPU_STATE_RUNNING) {
			current->state_running_nsecs += 
				tstamp - current->state_tstamp;
			arch_atomic_write(&current->state, VMM_VCPU_STATE_READY);
			current->state_tstamp = tstamp;
			rq_enqueue(schedp, current);
		}
		tcurrent = current;
	}

	next = rq_dequeue(schedp);
	if (!next) {
		/* This should never happen !!! */
		vmm_panic("%s: no vcpu to switch to.\n", 
			  __func__);
	}

	if (next != current) {
		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);
		arch_vcpu_switch(tcurrent, next, regs);
	}

	next->state_ready_nsecs += tstamp - next->state_tstamp;
	arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
	next->state_tstamp = tstamp;
	schedp->current_vcpu = next;
	vmm_timer_event_start(ev, next->time_slice);

	if (next != current) {
		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);
	}

	vmm_write_unlock_irqrestore_lite(&current->sched_lock, cf);
}
示例#11
0
static int semaphore5_do_test(struct vmm_chardev *cdev)
{
	int i, rc, failures = 0;
	u64 timeout, etimeout, tstamp;

	/* Clear shared data */
	for (i = 0; i < NUM_THREADS; i++) {
		shared_data[i] = 0;
	}

	/* s1 semaphore should be available */
	if (vmm_semaphore_avail(&s1) != 3) {
		vmm_cprintf(cdev, "error: initial semaphore not available\n");
		failures++;
	}

	/* Start worker0 */
	vmm_threads_start(workers[0]);

	/* Wait for worker0 to acquire s1 semaphore */
	vmm_msleep(SLEEP_MSECS * 10);

	/* Check worker0 shared data */
	if (shared_data[0] != 1) {
		vmm_cprintf(cdev, "error: worker0 shared data not updated\n");
		failures++;
	}

	/* s1 semaphore should not be available */
	if (vmm_semaphore_avail(&s1) != 0) {
		vmm_cprintf(cdev, "error: semaphore available\n");
		failures++;
	}

	/* Try semaphore down with timeout few times */
	for (i = 1; i <= 10; i++) {
		/* Save current timestamp */
		tstamp = vmm_timer_timestamp();

		/* Down s1 semaphore with some timeout */
		etimeout = i * SLEEP_MSECS * 1000000ULL;
		timeout = etimeout;
		rc = vmm_semaphore_down_timeout(&s1, &timeout);
		if (rc != VMM_ETIMEDOUT) {
			vmm_cprintf(cdev,
				"error: semaphore down did not timeout\n");
			failures++;
		}

		/* Check elapsed time */
		tstamp = vmm_timer_timestamp() - tstamp;
		if (tstamp < etimeout) {
			vmm_cprintf(cdev, "error: time elapsed %"PRIu64
				    " nanosecs instead of %"PRIu64" nanosecs",
				    tstamp, etimeout);
			failures++;
		}
	}

	/* Release s1 acquired by worker0 */
	for (i = 0; i < 3; i++) {
		rc = vmm_semaphore_up(&s1);
		if (rc) {
			vmm_cprintf(cdev, "error: semaphore not released\n");
			failures++;
		}
	}

	/* Release s1 which is already realeased which will fail */
	for (i = 0; i < 3; i++) {
		rc = vmm_semaphore_up(&s1);
		if (rc == VMM_OK) {
			vmm_cprintf(cdev, "error: semaphore released\n");
			failures++;
		}
	}

	/* s1 semaphore should be available */
	if (vmm_semaphore_avail(&s1) != 3) {
		vmm_cprintf(cdev, "error: semaphore not available\n");
		failures++;
	}

	return (failures) ? VMM_EFAIL : 0;
}
示例#12
0
int netstack_send_echo(u8 *ripaddr, u16 size, u16 seqno, 
			struct netstack_echo_reply *reply)
{
	int i, rc;
	u64 timeout = PING_DELAY_NS;
	struct pbuf *p;
	struct icmp_echo_hdr *iecho;
	size_t len = sizeof(struct icmp_echo_hdr) + size;

	LWIP_ASSERT("ping_size <= 0xffff", len <= 0xffff);

	/* Lock ping context for atomicity */
	vmm_mutex_lock(&lns.ping_lock);

	/* Alloc ping pbuf */
	p = pbuf_alloc(PBUF_IP, (u16_t)len, PBUF_RAM);
	if (!p) {
		vmm_mutex_unlock(&lns.ping_lock);
		return VMM_ENOMEM;
	}
	if ((p->len != p->tot_len) || (p->next != NULL)) {
		pbuf_free(p);
		vmm_mutex_unlock(&lns.ping_lock);
		return VMM_EFAIL;
	}

	/* Prepare ECHO request */
	iecho = (struct icmp_echo_hdr *)p->payload;
	ICMPH_TYPE_SET(iecho, ICMP_ECHO);
	ICMPH_CODE_SET(iecho, 0);
	iecho->chksum = 0;
	iecho->id     = PING_ID;
	iecho->seqno  = htons(seqno);
	for (i = 0; i < size; i++) {
		((char*)iecho)[sizeof(struct icmp_echo_hdr) + i] = (char)i;
	}
	iecho->chksum = inet_chksum(iecho, len);

	/* Prepare target address */
	IP4_ADDR(&lns.ping_addr, ripaddr[0],ripaddr[1],ripaddr[2],ripaddr[3]);

	/* Save ping info */
	lns.ping_seq_num = seqno;
	lns.ping_reply = reply;
	lns.ping_recv_tstamp = 0;
	lns.ping_send_tstamp = vmm_timer_timestamp();
	lns.ping_recv_tstamp = lns.ping_send_tstamp + PING_DELAY_NS;

	/* Send ping packet */
	raw_sendto(lns.ping_pcb, p, &lns.ping_addr);

	/* Wait for ping to complete with timeout */
	timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp;
	rc = vmm_completion_wait_timeout(&lns.ping_done, &timeout);
	timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp;
	lns.ping_reply->rtt = udiv64(timeout, 1000);

	/* Free ping pbuf */
	pbuf_free(p);

	/* Clear ping reply pointer */
	lns.ping_reply = NULL;

	/* Unloack ping context */
	vmm_mutex_unlock(&lns.ping_lock);

	return rc;
}
示例#13
0
int netstack_send_echo(u8 *ripaddr, u16 size, u16 seqno, 
			struct netstack_echo_reply *reply)
{
	u64 ts;
	int s, i, err;
	char buf[64];
	size_t fromlen, off, len = sizeof(struct icmp_echo_hdr) + size;
	ip_addr_t to_addr, from_addr;
	struct sockaddr_in sock;
	struct ip_hdr *iphdr;
	struct icmp_echo_hdr *iecho;

	LWIP_ASSERT("ping_size is too big\n", len <= 0xffff);

	/* Prepare target address */
	IP4_ADDR(&to_addr, ripaddr[0],ripaddr[1],ripaddr[2],ripaddr[3]);

	/* Open RAW socket */
	if ((s = lwip_socket(AF_INET, SOCK_RAW, IP_PROTO_ICMP)) < 0) {
		vmm_printf("%s: failed to open ICMP socket\n", __func__);
		return VMM_EFAIL;
	}

	/* Set socket option */
	i = PING_RCV_TIMEO;
	lwip_setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, &i, sizeof(i));

	/* Prepare socket address */
	sock.sin_len = sizeof(sock);
	sock.sin_family = AF_INET;
	inet_addr_from_ipaddr(&sock.sin_addr, &to_addr);

	/* Prepare ECHO request */
	iecho = (struct icmp_echo_hdr *)vmm_zalloc(len);
	if (!iecho) {
		return VMM_ENOMEM;
	}
	ICMPH_TYPE_SET(iecho, ICMP_ECHO);
	ICMPH_CODE_SET(iecho, 0);
	iecho->chksum = 0;
	iecho->id     = PING_ID;
	iecho->seqno  = htons(seqno);
	for (i = 0; i < size; i++) {
		((char*)iecho)[sizeof(struct icmp_echo_hdr) + i] = (char)i;
	}
	iecho->chksum = inet_chksum(iecho, len);

	/* Send ECHO request */
	err = lwip_sendto(s, iecho, len, 0, 
				(struct sockaddr*)&sock, sizeof(sock));
	vmm_free(iecho);
	if (!err) {
		return VMM_EFAIL;
	}

	/* Get reference timestamp */
	ts = vmm_timer_timestamp();

	/* Wait for ECHO reply */
	err = VMM_EFAIL;
	off = lwip_recvfrom(s, buf, sizeof(buf), 0, 
			    (struct sockaddr*)&sock, (socklen_t*)&fromlen);
	if (off >= (sizeof(struct ip_hdr) + sizeof(struct icmp_echo_hdr))) {
		inet_addr_to_ipaddr(&from_addr, &sock.sin_addr);
		iphdr = (struct ip_hdr *)buf;
		iecho = (struct icmp_echo_hdr *)(buf + (IPH_HL(iphdr) * 4));
		if ((iecho->id == PING_ID) && 
		    (iecho->seqno == htons(seqno))) {
			reply->ripaddr[0] = ip4_addr1(&from_addr);
			reply->ripaddr[1] = ip4_addr2(&from_addr);
			reply->ripaddr[2] = ip4_addr3(&from_addr);
			reply->ripaddr[3] = ip4_addr4(&from_addr);
			reply->ttl = IPH_TTL(iphdr);
			reply->len = len;
			reply->seqno = seqno;
			reply->rtt = 
				udiv64(vmm_timer_timestamp() - ts, 1000);
			err = VMM_OK;
		}
	}
	while (off < len) {
		off = lwip_recvfrom(s, buf, sizeof(buf), 0, 
			(struct sockaddr*)&sock, (socklen_t*)&fromlen);
	}

	/* Close RAW socket */
	lwip_close(s);

	return err;
}
示例#14
0
文件: pl031.c 项目: 32bitmicro/xvisor
static u32 pl031_get_count(struct pl031_state *s)
{
	return s->tick_offset +
	(u32)udiv64(vmm_timer_timestamp() - s->tick_tstamp, 1000000000);
}