int master_fun(int argc, char *argv[]) { msg_vm_t vm; unsigned int i; xbt_dynar_t worker_pms = MSG_process_get_data(MSG_process_self()); int nb_workers = xbt_dynar_length(worker_pms); xbt_dynar_t vms = xbt_dynar_new(sizeof(msg_vm_t), NULL); /* Launch VMs and worker processes. One VM per PM, and one worker process per VM. */ XBT_INFO("# Launch %d VMs", nb_workers); for (i = 0; i< nb_workers; i++) { char *vm_name = bprintf("VM%02d", i); char *pr_name = bprintf("WRK%02d", i); msg_host_t pm = xbt_dynar_get_as(worker_pms, i, msg_host_t); XBT_INFO("create %s on PM(%s)", vm_name, MSG_host_get_name(pm)); msg_vm_t vm = MSG_vm_create_core(pm, vm_name); s_vm_params_t params; memset(¶ms, 0, sizeof(params)); params.ramsize = 1L * 1024 * 1024 * 1024; // 1Gbytes MSG_host_set_params(vm, ¶ms); MSG_vm_start(vm); xbt_dynar_push(vms, &vm); XBT_INFO("put a process (%s) on %s", pr_name, vm_name); MSG_process_create(pr_name, worker_fun, NULL, vm); xbt_free(vm_name); xbt_free(pr_name); } /* Send a bunch of work to every one */ XBT_INFO("# Send a task to %d worker process", nb_workers); send_tasks(nb_workers); XBT_INFO("# Suspend all VMs"); xbt_dynar_foreach(vms, i, vm) { const char *vm_name = MSG_host_get_name(vm); XBT_INFO("suspend %s", vm_name); MSG_vm_suspend(vm); } XBT_INFO("# Wait a while"); MSG_process_sleep(2); XBT_INFO("# Resume all VMs"); xbt_dynar_foreach(vms, i, vm) { MSG_vm_resume(vm); }
static void test_dynamic_change(void) { msg_host_t pm0 = MSG_host_by_name("Fafard"); msg_host_t vm0 = MSG_vm_create_core(pm0, "VM0"); msg_host_t vm1 = MSG_vm_create_core(pm0, "VM1"); MSG_vm_start(vm0); MSG_vm_start(vm1); msg_task_t task0 = MSG_task_create("Task0", DOUBLE_MAX, 0, NULL); msg_task_t task1 = MSG_task_create("Task1", DOUBLE_MAX, 0, NULL); MSG_process_create("worker0", worker_busy_loop_main, &task0, vm0); MSG_process_create("worker1", worker_busy_loop_main, &task1, vm1); double task0_remain_prev = MSG_task_get_flops_amount(task0); double task1_remain_prev = MSG_task_get_flops_amount(task1); const double cpu_speed = MSG_host_get_speed(pm0); for (int i = 0; i < 10; i++) { double new_bound = (cpu_speed / 10) * i; XBT_INFO("set bound of VM1 to %f", new_bound); MSG_vm_set_bound(vm1, new_bound); MSG_process_sleep(100); double task0_remain_now = MSG_task_get_flops_amount(task0); double task1_remain_now = MSG_task_get_flops_amount(task1); double task0_flops_per_sec = task0_remain_prev - task0_remain_now; double task1_flops_per_sec = task1_remain_prev - task1_remain_now; XBT_INFO("Task0@VM0: %f flops/s", task0_flops_per_sec / 100); XBT_INFO("Task1@VM1: %f flops/s", task1_flops_per_sec / 100); task0_remain_prev = task0_remain_now; task1_remain_prev = task1_remain_now; } MSG_process_sleep(2000); // let the tasks end MSG_vm_destroy(vm0); MSG_vm_destroy(vm1); }
static int master_main(int argc, char* argv[]) { msg_host_t pm0 = MSG_host_by_name("Fafard"); msg_host_t pm1 = MSG_host_by_name("Tremblay"); msg_host_t pm2 = MSG_host_by_name("Bourassa"); msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0"); MSG_vm_set_ramsize(vm0, 1e9); // 1Gbytes MSG_vm_start(vm0); XBT_INFO("Test: Migrate a VM with %zu Mbytes RAM", MSG_vm_get_ramsize(vm0) / 1000 / 1000); vm_migrate(vm0, pm1); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); MSG_vm_set_ramsize(vm0, 1e8); // 100Mbytes MSG_vm_start(vm0); XBT_INFO("Test: Migrate a VM with %zu Mbytes RAM", MSG_vm_get_ramsize(vm0) / 1000 / 1000); vm_migrate(vm0, pm1); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); msg_vm_t vm1 = MSG_vm_create_core(pm0, "VM1"); MSG_vm_set_ramsize(vm0, 1e9); // 1Gbytes MSG_vm_set_ramsize(vm1, 1e9); // 1Gbytes MSG_vm_start(vm0); MSG_vm_start(vm1); XBT_INFO("Test: Migrate two VMs at once from PM0 to PM1"); vm_migrate_async(vm0, pm1); vm_migrate_async(vm1, pm1); MSG_process_sleep(10000); MSG_vm_destroy(vm0); MSG_vm_destroy(vm1); vm0 = MSG_vm_create_core(pm0, "VM0"); vm1 = MSG_vm_create_core(pm0, "VM1"); MSG_vm_set_ramsize(vm0, 1e9); // 1Gbytes MSG_vm_set_ramsize(vm1, 1e9); // 1Gbytes MSG_vm_start(vm0); MSG_vm_start(vm1); XBT_INFO("Test: Migrate two VMs at once to different PMs"); vm_migrate_async(vm0, pm1); vm_migrate_async(vm1, pm2); MSG_process_sleep(10000); MSG_vm_destroy(vm0); MSG_vm_destroy(vm1); return 0; }
static int dvfs(int argc, char *argv[]) { msg_host_t host1 = MSG_host_by_name("MyHost1"); msg_host_t host2 = MSG_host_by_name("MyHost2"); msg_host_t host3 = MSG_host_by_name("MyHost3"); /* Host 1 */ XBT_INFO("Creating and starting two VMs"); msg_vm_t vm_host1 = MSG_vm_create(host1, "vm_host1", 4, 2048, 100, NULL, 1024 * 20, 10,50); MSG_vm_start(vm_host1); msg_vm_t vm_host3 = MSG_vm_create(host3, "vm_host3", 4, 2048, 100, NULL, 1024 * 20, 10,50); MSG_vm_start(vm_host3); XBT_INFO("Create two tasks on Host1: one inside a VM, the other directly on the host"); MSG_process_create("p11", worker_func, NULL, vm_host1); MSG_process_create("p12", worker_func, NULL, host1); XBT_INFO("Create two tasks on Host2: both directly on the host"); MSG_process_create("p21", worker_func, NULL, host2); MSG_process_create("p22", worker_func, NULL, host2); XBT_INFO("Create two tasks on Host3: both inside a VM"); MSG_process_create("p31", worker_func, NULL, vm_host3); MSG_process_create("p32", worker_func, NULL, vm_host3); XBT_INFO("Wait 5 seconds. The tasks are still running (they run for 3 seconds, but 2 tasks are co-located, " "so they run for 6 seconds)"); MSG_process_sleep(5); XBT_INFO("Wait another 5 seconds. The tasks stop at some point in between"); MSG_process_sleep(5); MSG_vm_shutdown(vm_host1); MSG_vm_shutdown(vm_host3); MSG_vm_destroy(vm_host1); MSG_vm_destroy(vm_host3); return 0; }
static int master_main(int argc, char *argv[]) { xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar(); msg_host_t pm0 = MSG_host_by_name("Fafard"); msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0"); MSG_vm_start(vm0); launch_computation_worker(vm0); while(MSG_get_clock()<100) { if (atask != NULL) XBT_INFO("aTask remaining duration: %g", MSG_task_get_flops_amount(atask)); MSG_process_sleep(1); } MSG_process_sleep(10000); MSG_vm_destroy(vm0); xbt_dynar_free(&hosts_dynar); return 1; }
static int master_main(int argc, char *argv[]) { msg_host_t pm0 = MSG_host_by_name("Fafard"); msg_host_t pm1 = MSG_host_by_name("Tremblay"); msg_host_t pm2 = MSG_host_by_name("Bourassa"); msg_vm_t vm0, vm1; s_vm_params_t params; memset(¶ms, 0, sizeof(params)); vm0 = MSG_vm_create_core(pm0, "VM0"); params.ramsize = 1L * 1000 * 1000 * 1000; // 1Gbytes MSG_host_set_params(vm0, ¶ms); MSG_vm_start(vm0); XBT_INFO("Test: Migrate a VM with %llu Mbytes RAM", params.ramsize / 1000 / 1000); vm_migrate(vm0, pm1); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); params.ramsize = 1L * 1000 * 1000 * 100; // 100Mbytes MSG_host_set_params(vm0, ¶ms); MSG_vm_start(vm0); XBT_INFO("Test: Migrate a VM with %llu Mbytes RAM", params.ramsize / 1000 / 1000); vm_migrate(vm0, pm1); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); vm1 = MSG_vm_create_core(pm0, "VM1"); params.ramsize = 1L * 1000 * 1000 * 1000; // 1Gbytes MSG_host_set_params(vm0, ¶ms); MSG_host_set_params(vm1, ¶ms); MSG_vm_start(vm0); MSG_vm_start(vm1); XBT_INFO("Test: Migrate two VMs at once from PM0 to PM1"); vm_migrate_async(vm0, pm1); vm_migrate_async(vm1, pm1); MSG_process_sleep(10000); MSG_vm_destroy(vm0); MSG_vm_destroy(vm1); vm0 = MSG_vm_create_core(pm0, "VM0"); vm1 = MSG_vm_create_core(pm0, "VM1"); params.ramsize = 1L * 1000 * 1000 * 1000; // 1Gbytes MSG_host_set_params(vm0, ¶ms); MSG_host_set_params(vm1, ¶ms); MSG_vm_start(vm0); MSG_vm_start(vm1); XBT_INFO("Test: Migrate two VMs at once to different PMs"); vm_migrate_async(vm0, pm1); vm_migrate_async(vm1, pm2); MSG_process_sleep(10000); MSG_vm_destroy(vm0); MSG_vm_destroy(vm1); return 0; }
static int master_main(int argc, char *argv[]) { msg_host_t pm0 = MSG_host_by_name("Fafard"); msg_host_t pm1 = MSG_host_by_name("Fafard"); XBT_INFO("# 1. Put a single task on a PM. "); test_one_task(pm0); XBT_INFO(" "); XBT_INFO("# 2. Put two tasks on a PM."); test_two_tasks(pm0, pm0); XBT_INFO(" "); msg_host_t vm0 = MSG_vm_create_core(pm0, "VM0"); MSG_vm_start(vm0); XBT_INFO("# 3. Put a single task on a VM. "); test_one_task(vm0); XBT_INFO(" "); XBT_INFO("# 4. Put two tasks on a VM."); test_two_tasks(vm0, vm0); XBT_INFO(" "); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); MSG_vm_start(vm0); XBT_INFO("# 6. Put a task on a PM and a task on a VM."); test_two_tasks(pm0, vm0); XBT_INFO(" "); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); double cpu_speed = MSG_host_get_speed(pm0); MSG_vm_set_bound(vm0, cpu_speed / 10); MSG_vm_start(vm0); XBT_INFO("# 7. Put a single task on the VM capped by 10%%."); test_one_task(vm0); XBT_INFO(" "); XBT_INFO("# 8. Put two tasks on the VM capped by 10%%."); test_two_tasks(vm0, vm0); XBT_INFO(" "); XBT_INFO("# 9. Put a task on a PM and a task on the VM capped by 10%%."); test_two_tasks(pm0, vm0); XBT_INFO(" "); MSG_vm_destroy(vm0); vm0 = MSG_vm_create_core(pm0, "VM0"); s_vm_params_t params; memset(¶ms, 0, sizeof(params)); params.ramsize = 1L * 1000 * 1000 * 1000; // 1Gbytes MSG_host_set_params(vm0, ¶ms); MSG_vm_start(vm0); cpu_speed = MSG_host_get_speed(pm0); MSG_vm_start(vm0); XBT_INFO("# 10. Test migration"); const double computation_amount = cpu_speed * 10; XBT_INFO("# 10. (a) Put a task on a VM without any bound."); launch_worker(vm0, "worker0", computation_amount, 0, 0); MSG_process_sleep(1000); XBT_INFO(" "); XBT_INFO("# 10. (b) set 10%% bound to the VM, and then put a task on the VM."); MSG_vm_set_bound(vm0, cpu_speed / 10); launch_worker(vm0, "worker0", computation_amount, 0, 0); MSG_process_sleep(1000); XBT_INFO(" "); XBT_INFO("# 10. (c) migrate"); MSG_vm_migrate(vm0, pm1); XBT_INFO(" "); XBT_INFO("# 10. (d) Put a task again on the VM."); launch_worker(vm0, "worker0", computation_amount, 0, 0); MSG_process_sleep(1000); XBT_INFO(" "); MSG_vm_destroy(vm0); XBT_INFO("# 11. Change a bound dynamically."); test_dynamic_change(); return 0; }
JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_start(JNIEnv *env, jobject jvm) { msg_vm_t vm = jvm_get_native(env,jvm); MSG_vm_start(vm); }