static int semaphore1_do_test(struct vmm_chardev *cdev) { int rc, failures = 0; /* Initialize semaphores */ INIT_SEMAPHORE(&s1, 1, 1); INIT_SEMAPHORE(&s2, 1, 0); /* s1 semaphore should be available */ if (!vmm_semaphore_avail(&s1)) { failures++; } /* s2 semaphore should not be available */ if (vmm_semaphore_avail(&s2)) { failures++; } /* Acquire s1 semaphore */ rc = vmm_semaphore_down(&s1); if (rc) { return rc; } /* Start workers */ vmm_threads_start(workers[0]); /* Wait for worker0 block on s1 semaphore */ vmm_msleep(SLEEP_MSECS * 10); /* s2 semaphore should not be available */ if (vmm_semaphore_avail(&s2)) { failures++; } /* Release s1 semaphore */ rc = vmm_semaphore_up(&s1); if (rc) { return rc; } /* Wait for worker0 wakeup and release s2 semaphore */ vmm_msleep(SLEEP_MSECS * 10); /* s2 semaphore should be available */ if (!vmm_semaphore_avail(&s2)) { failures++; } /* Stop workers */ vmm_threads_stop(workers[0]); return (failures) ? VMM_EFAIL : 0; }
static int mutex9_do_test(struct vmm_chardev *cdev) { u64 i, timeout, etimeout, tstamp; int rc, failures = 0; /* Initialise the shared_data to zero */ shared_data = 0; /* Start worker */ vmm_threads_start(workers[0]); /* * The worker thread has now been started and should take ownership * of the mutex. We wait a while and check that shared_data has been * modified, which proves to us that the thread has taken the mutex. */ vmm_msleep(SLEEP_MSECS*10); /* Check shared data. It should be one. */ if (shared_data != 1) { vmm_cprintf(cdev, "error: shared data unmodified\n"); failures++; } /* Try mutex lock with timeout few times */ for (i = 1; i <= 10; i++) { /* Save current timestamp */ tstamp = vmm_timer_timestamp(); /* Lock mutex with timeout */ etimeout = i * SLEEP_MSECS * 1000000ULL; timeout = etimeout; rc = vmm_mutex_lock_timeout(&mutex1, &timeout); if (rc != VMM_ETIMEDOUT) { vmm_cprintf(cdev, "error: did not timeout\n"); failures++; } /* Check elapsed time */ tstamp = vmm_timer_timestamp() - tstamp; if (tstamp < etimeout) { vmm_cprintf(cdev, "error: time elapsed %"PRIu64 " nanosecs instead of %"PRIu64" nanosecs", tstamp, etimeout); failures++; } } /* Stop worker thread. */ vmm_threads_stop(workers[0]); return (failures) ? VMM_EFAIL : 0; }
static int mutex4_do_test(struct vmm_chardev *cdev) { int done_count = 0; /* Initialize work done completion */ INIT_COMPLETION(&work_done); /* Acquire mutex1 */ vmm_mutex_lock(&mutex1); /* Start workers */ vmm_threads_start(workers[0]); vmm_threads_start(workers[1]); vmm_threads_start(workers[2]); vmm_threads_start(workers[3]); vmm_msleep(SLEEP_MSECS*40); /* Release mutex1 */ vmm_mutex_unlock(&mutex1); /* Wait for workers to complete */ do { if (done_count == NUM_THREADS) { break; } vmm_completion_wait(&work_done); done_count++; } while (1); /* Stop workers */ vmm_threads_stop(workers[3]); vmm_threads_stop(workers[2]); vmm_threads_stop(workers[1]); vmm_threads_stop(workers[0]); return 0; }
static int __init daemon_mterm_init(void) { u8 mterm_priority; u32 mterm_time_slice; struct vmm_devtree_node * node; const char * attrval; /* Reset the control structure */ vmm_memset(&mtctrl, 0, sizeof(mtctrl)); /* Retrive mterm time slice */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_VMMINFO_NODE_NAME); if (!node) { return VMM_EFAIL; } attrval = vmm_devtree_attrval(node, "mterm_priority"); if (attrval) { mterm_priority = *((u32 *) attrval); } else { mterm_priority = VMM_THREAD_DEF_PRIORITY; } attrval = vmm_devtree_attrval(node, "mterm_time_slice"); if (attrval) { mterm_time_slice = *((u32 *) attrval); } else { mterm_time_slice = VMM_THREAD_DEF_TIME_SLICE; } /* Create mterm thread */ mtctrl.thread = vmm_threads_create("mterm", &mterm_main, NULL, mterm_priority, mterm_time_slice); if (!mtctrl.thread) { vmm_panic("Creation of system critical thread failed.\n"); } /* Start the mterm thread */ vmm_threads_start(mtctrl.thread); return VMM_OK; }
static int kern4_do_test(struct vmm_chardev *cdev) { int i, w, failures = 0; /* Start workers */ for (w = 0; w < NUM_THREADS; w++) { vmm_threads_start(workers[w]); } /* Wait for workers to sleep */ vmm_msleep(SLEEP_MSECS*NUM_THREADS); /* Do this few times */ for (i = 0; i < 10; i++) { /* Try wakeup API */ for (w = 0; w < NUM_THREADS; w++) { /* Reset shared_data to zero */ shared_data[w] = 0; /* Wakeup worker using wakefirst */ vmm_threads_wakeup(workers[w]); /* Wait for worker to update shared data */ vmm_msleep(SLEEP_MSECS*NUM_THREADS); /* Check shared data for worker. It should be one. */ if (shared_data[0] != 1) { vmm_cprintf(cdev, "error: i=%d w=%d wakeup" "shared data unmodified\n", i, w); failures++; } } } /* * We don't stop workers here instead we let them block and * destroyed later. */ vmm_msleep(SLEEP_MSECS*NUM_THREADS); return (failures) ? VMM_EFAIL : 0; }
static int mutex7_do_test(struct vmm_chardev *cdev) { int rc, failures = 0; /* Initialise the shared_data to zero */ shared_data = 0; /* Attempt to release the mutex when not owned by any thread */ rc = vmm_mutex_unlock(&mutex1); if (rc == VMM_OK) { vmm_cprintf(cdev, "error: unlocking mutex worked\n"); failures++; } /* Start worker */ vmm_threads_start(workers[0]); /* * The worker thread has now been started and should take ownership * of the mutex. We wait a while and check that shared_data has been * modified, which proves to us that the thread has taken the mutex. */ vmm_msleep(SLEEP_MSECS*10); /* Attempt to release the mutex when owned by worker thread */ rc = vmm_mutex_unlock(&mutex1); if (rc == VMM_OK) { vmm_cprintf(cdev, "error: unlocking mutex worked\n"); failures++; } /* Stop worker thread */ vmm_threads_stop(workers[0]); return (failures) ? VMM_EFAIL : 0; }
static int semaphore5_do_test(struct vmm_chardev *cdev) { int i, rc, failures = 0; u64 timeout, etimeout, tstamp; /* Clear shared data */ for (i = 0; i < NUM_THREADS; i++) { shared_data[i] = 0; } /* s1 semaphore should be available */ if (vmm_semaphore_avail(&s1) != 3) { vmm_cprintf(cdev, "error: initial semaphore not available\n"); failures++; } /* Start worker0 */ vmm_threads_start(workers[0]); /* Wait for worker0 to acquire s1 semaphore */ vmm_msleep(SLEEP_MSECS * 10); /* Check worker0 shared data */ if (shared_data[0] != 1) { vmm_cprintf(cdev, "error: worker0 shared data not updated\n"); failures++; } /* s1 semaphore should not be available */ if (vmm_semaphore_avail(&s1) != 0) { vmm_cprintf(cdev, "error: semaphore available\n"); failures++; } /* Try semaphore down with timeout few times */ for (i = 1; i <= 10; i++) { /* Save current timestamp */ tstamp = vmm_timer_timestamp(); /* Down s1 semaphore with some timeout */ etimeout = i * SLEEP_MSECS * 1000000ULL; timeout = etimeout; rc = vmm_semaphore_down_timeout(&s1, &timeout); if (rc != VMM_ETIMEDOUT) { vmm_cprintf(cdev, "error: semaphore down did not timeout\n"); failures++; } /* Check elapsed time */ tstamp = vmm_timer_timestamp() - tstamp; if (tstamp < etimeout) { vmm_cprintf(cdev, "error: time elapsed %"PRIu64 " nanosecs instead of %"PRIu64" nanosecs", tstamp, etimeout); failures++; } } /* Release s1 acquired by worker0 */ for (i = 0; i < 3; i++) { rc = vmm_semaphore_up(&s1); if (rc) { vmm_cprintf(cdev, "error: semaphore not released\n"); failures++; } } /* Release s1 which is already realeased which will fail */ for (i = 0; i < 3; i++) { rc = vmm_semaphore_up(&s1); if (rc == VMM_OK) { vmm_cprintf(cdev, "error: semaphore released\n"); failures++; } } /* s1 semaphore should be available */ if (vmm_semaphore_avail(&s1) != 3) { vmm_cprintf(cdev, "error: semaphore not available\n"); failures++; } return (failures) ? VMM_EFAIL : 0; }