extern "C" void dispatch_after_lambda(void) { MU_BEGIN_TEST(dispatch_after_lambda); dispatch_async(dispatch_get_main_queue(), [=]{ dispatch_time_t time_a_min = dispatch_time(0, 5.5*NSEC_PER_SEC); dispatch_time_t time_a = dispatch_time(0, 6.0*NSEC_PER_SEC); dispatch_time_t time_a_max = dispatch_time(0, 6.5*NSEC_PER_SEC); dispatch_time_t time_a_start = dispatch_time(0,0); dispatch_after(time_a, dispatch_get_current_queue(), [=]{ dispatch_time_t now_a = dispatch_time(0, 0); MU_MESSAGE("must finish between 5.5s and 6.5s: %f",(now_a-time_a_start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_a - time_a_min)); MU_ASSERT_TRUE(0<=(time_a_max - now_a)); dispatch_time_t time_b_min = dispatch_time(0, 1.5*NSEC_PER_SEC); dispatch_time_t time_b = dispatch_time(0, 2*NSEC_PER_SEC); dispatch_time_t time_b_max = dispatch_time(0, 2.5*NSEC_PER_SEC); dispatch_time_t time_b_start = dispatch_time(0,0); dispatch_after(time_b, dispatch_get_current_queue(), [=]{ dispatch_time_t now_b = dispatch_time(0, 0); MU_MESSAGE("must finish between 1.5s and 2.5s: %f",(now_b-time_b_start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_b - time_b_min)); MU_ASSERT_TRUE(0<=(time_b_max - now_b)); #if 1 // FIXME: Nesting three lambdas seems to be broken... dispatch_time_t time_c_min = dispatch_time(0, 0*NSEC_PER_SEC); dispatch_time_t time_c = dispatch_time(0, 0*NSEC_PER_SEC); dispatch_time_t time_c_max = dispatch_time(0, .5*NSEC_PER_SEC); dispatch_time_t time_c_start = dispatch_time(0, 0); dispatch_after(time_c, dispatch_get_current_queue(), [=]{ dispatch_time_t now_c = dispatch_time(0, 0); MU_MESSAGE("must finish between 0s and .5s: %f",(now_c-time_c_start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_c - time_c_min)); MU_ASSERT_TRUE(0<=(time_c_max - now_c)); dispatch_async_f(dispatch_get_current_queue(), NULL, done); }); #else dispatch_async_f(dispatch_get_current_queue(), NULL, done); #endif }); }); }); dispatch_main(); MU_FAIL("Should never reach this"); MU_END_TEST; }
static void do_test(void) { size_t i; char buf[1000]; count_down = COUNT; start = dispatch_time(0,0); for (i = 0; i < COUNT; i++) { #ifdef WIN32 _snprintf(buf, sizeof(buf), "com.example.starfish-node#%ld", i); #else snprintf(buf, sizeof(buf), "com.example.starfish-node#%ld", (long int)i); #endif queues[i] = dispatch_queue_create(buf, NULL); dispatch_suspend(queues[i]); } for (i = 0; i < COUNT; i++) { dispatch_async_f(queues[i], queues[i], start_node); } for (i = 0; i < COUNT; i++) { dispatch_resume(queues[i]); } }
int mdns_register(char *name, char *type, int port, char **txt) { struct mdns_group_entry *ge; AvahiStringList *txt_sl; int i; DPRINTF(E_DBG, L_MDNS, "Adding mDNS service %s/%s\n", name, type); ge = (struct mdns_group_entry *)malloc(sizeof(struct mdns_group_entry)); if (!ge) return -1; ge->name = strdup(name); ge->type = strdup(type); ge->port = port; txt_sl = NULL; for (i = 0; txt[i]; i++) { txt_sl = avahi_string_list_add(txt_sl, txt[i]); DPRINTF(E_DBG, L_MDNS, "Added key %s\n", txt[i]); } ge->txt = txt_sl; dispatch_async_f(mdns_sq, ge, mdns_register_task); return 0; }
static void ping(void *context) { dispatch_queue_t reply_q = (dispatch_queue_t)context; dispatch_async_f(reply_q, reply_q, pong); }
int main(void) { test_start("Dispatch After"); dispatch_async_f(dispatch_get_main_queue(), NULL, func_outer); dispatch_main(); }
int main(void) { dispatch_queue_t q = dispatch_get_main_queue(); test_start("Dispatch (Public) API"); test_ptr_notnull("dispatch_get_main_queue", q); dispatch_async_f(dispatch_get_main_queue(), NULL, work); dispatch_main(); }
static void dispatch_0(void* context) { time_delta_t* previous = (time_delta_t*)context; dispatch_time_t now_c = dispatch_time(0, 0); MU_MESSAGE("must finish between 0.0s and 0.5s: %f",(now_c-previous->start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_c - previous->min)); MU_ASSERT_TRUE(0<=(previous->max - now_c)); dispatch_async_f(dispatch_get_current_queue(), NULL, done); }
void dispatch_after_function() { MU_BEGIN_TEST(dispatch_after); dispatch_async_f(dispatch_get_main_queue(),NULL,dispatch_start); dispatch_main(); MU_FAIL("Should never reach this"); MU_END_TEST; }
void func_c(void* context) { dispatch_time_t now_c = dispatch_time(0, 0); UNREFERENCED_PARAMETER(context); test_time_less_than("can't finish faster than 0s", 0, now_c - time_c_min); test_time_less_than("must finish faster than .5s", 0, time_c_max - now_c); dispatch_async_f(dispatch_get_current_queue(), NULL, done); }
static void pong(void *context) { dispatch_queue_t this_q = (dispatch_queue_t)context; size_t replies = (size_t)dispatch_get_context(this_q); dispatch_set_context(this_q, (void *)--replies); if (!replies) { //MU_MESSAGE("collect from: %s\n", dispatch_queue_get_label(dispatch_get_current_queue())); dispatch_async_f(dispatch_get_main_queue(), NULL, collect); } }
static void start_node(void *context) { dispatch_queue_t this_q = (dispatch_queue_t)context; size_t i; dispatch_set_context(this_q, (void *)COUNT); for (i = 0; i < COUNT; i++) { dispatch_async_f(queues[i], this_q, ping); } }
PRIVATE void objc_collect_garbage_data(void(*cleanup)(void*), void *garbage) { if (0 == garbage_queue) { LOCK_RUNTIME_FOR_SCOPE(); if (0 == garbage_queue) { garbage_queue = dispatch_queue_create("ObjC deferred free queue", 0); } } dispatch_async_f(garbage_queue, garbage, cleanup); }
void submit_work(dispatch_queue_t queue, void* context) { int i; for (i = 0; i < BLOCKS; ++i) { dispatch_async_f(queue, context, cpubusy); } #if USE_SET_TARGET_QUEUE dispatch_release(as_do(queue)); #endif }
static void _dispatch_dealloc(dispatch_object_t dou) { dispatch_queue_t tq = dou._do->do_targetq; dispatch_function_t func = dou._do->do_finalizer; void *ctxt = dou._do->do_ctxt; _os_object_dealloc(dou._os_obj); if (func && ctxt) { dispatch_async_f(tq, ctxt, func); } _dispatch_release(tq); }
DISPATCH_NOINLINE static long _dispatch_group_wake(dispatch_semaphore_t dsema) { struct dispatch_sema_notify_s *next, *head, *tail = NULL; long rval; head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); if (head) { // snapshot before anything is notified/woken <rdar://problem/8554546> tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); } rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); if (rval) { // wake group waiters #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); do { kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); #endif } if (head) { // async group notify blocks do { dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); _dispatch_release(head->dsn_queue); next = fastpath(head->dsn_next); if (!next && head != tail) { while (!(next = fastpath(head->dsn_next))) { _dispatch_hardware_pause(); } } free(head); } while ((head = next)); _dispatch_release(dsema); } return 0; }
static VALUE rb_queue_dispatch_async(VALUE self, SEL sel, int argc, VALUE *argv) { rb_vm_block_t *block = get_prepared_block(); VALUE group; rb_scan_args(argc, argv, "01", &group); if (group != Qnil) { Check_Group(group); dispatch_group_async_f(RGroup(group)->group, RQueue(self)->queue, (void *)block, rb_block_dispatcher); } else { dispatch_async_f(RQueue(self)->queue, (void *)block, rb_block_dispatcher); } return Qnil; }
static void cascade(void* context) { uintptr_t idx, *idxptr = (uintptr_t*)context; if (done) return; idx = *idxptr + 1; if (idx < QUEUES) { *idxptr = idx; dispatch_async_f(queues[idx], context, cascade); } if (dispatch_atomic_dec(&iterations) == 0) { done = 1; histogram(); MU_PASS("Please check histogram to be sure"); } }
void exec_quip(SINGLE_QSP_ARG_DECL) { #ifdef USE_QS_QUEUE // This is iOS only! //dispatch_async_f(QS_QUEUE(THIS_QSP),qsp,exec_qs_cmds); if( QS_QUEUE(THIS_QSP) == dispatch_get_current_queue() ){ exec_qs_cmds(THIS_QSP); } else { dispatch_async_f(QS_QUEUE(THIS_QSP),THIS_QSP,exec_qs_cmds); } #else /* ! USE_QS_QUEUE */ exec_qs_cmds(THIS_QSP); #endif /* ! USE_QS_QUEUE */ }
void dispatch_api() { dispatch_queue_t q = NULL; MU_BEGIN_TEST(dispatch_api); q = dispatch_get_main_queue(); MU_DESC_ASSERT_NOT_NULL_HEX("dispatch_get_main_queue",q); dispatch_async_f(q, NULL, pass); q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0); MU_DESC_ASSERT_NOT_NULL_HEX("dispatch_get_global_queue", q); q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW,0); MU_DESC_ASSERT_NOT_NULL_HEX("dispatch_get_global_queue", q); q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH,0); MU_DESC_ASSERT_NOT_NULL_HEX("dispatch_get_global_queue", q); dispatch_main(); MU_END_TEST; }
void rwsched_dispatch_async_f(rwsched_tasklet_ptr_t sched_tasklet, rwsched_dispatch_queue_t queue, void *context, dispatch_function_t handler) { // Validate input paraemters RW_CF_TYPE_VALIDATE(sched_tasklet, rwsched_tasklet_ptr_t); rwsched_instance_ptr_t instance = sched_tasklet->instance; RW_CF_TYPE_VALIDATE(instance, rwsched_instance_ptr_t); RW_ASSERT_TYPE(queue, rwsched_dispatch_queue_t); // If libdispatch is enabled for the entire instance, then call the libdispatch routine if (instance->use_libdispatch_only) { RW_ASSERT(queue->header.libdispatch_object._dq); if (queue == instance->main_rwqueue && sched_tasklet->blocking_mode.blocked) { //RW_CRASH(); rwsched_dispatch_what_ptr_t what = (rwsched_dispatch_what_ptr_t) RW_MALLOC0_TYPE(sizeof(*what), rwsched_dispatch_what_ptr_t); /* always leaked! */ what->type = RWSCHED_DISPATCH_ASYNC; what->closure.handler = handler; what->closure.context = context; what->queue = queue; g_array_append_val(sched_tasklet->dispatch_what_array, what); } else { rwsched_dispatch_what_ptr_t what = (rwsched_dispatch_what_ptr_t) RW_MALLOC0_TYPE(sizeof(*what), rwsched_dispatch_what_ptr_t); what->type = RWSCHED_DISPATCH_ASYNC; what->closure.handler = handler; what->closure.context = context; what->queue = queue; rwsched_tasklet_ref(sched_tasklet); what->tasklet_info = sched_tasklet; dispatch_async_f(queue->header.libdispatch_object._dq, (void*)what, rwsched_dispatch_intercept); } return; } // Not yet implemented RW_CRASH(); }
static void _dispatch_data_destroy_buffer(const void* buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) { if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { free((void*)buffer); } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { // do nothing #if HAVE_MACH } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { mach_vm_size_t vm_size = size; mach_vm_address_t vm_addr = (uintptr_t)buffer; mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); #endif } else { if (!queue) { queue = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); } dispatch_async_f(queue, destructor, _dispatch_call_block_and_release); } }
void test_after(void) { __block dispatch_time_t time_a_min, time_a, time_a_max; __block dispatch_time_t time_b_min, time_b, time_b_max; __block dispatch_time_t time_c_min, time_c, time_c_max; dispatch_test_start("Dispatch After"); dispatch_async(dispatch_get_main_queue(), ^{ time_a_min = dispatch_time(0, 5.5*NSEC_PER_SEC); time_a = dispatch_time(0, 6*NSEC_PER_SEC); time_a_max = dispatch_time(0, 6.5*NSEC_PER_SEC); dispatch_after(time_a, dispatch_get_main_queue(), ^{ dispatch_time_t now_a = dispatch_time(0, 0); test_long_less_than("can't finish faster than 5.5s", 0, now_a - time_a_min); test_long_less_than("must finish faster than 6.5s", 0, time_a_max - now_a); time_b_min = dispatch_time(0, 1.5*NSEC_PER_SEC); time_b = dispatch_time(0, 2*NSEC_PER_SEC); time_b_max = dispatch_time(0, 2.5*NSEC_PER_SEC); dispatch_after(time_b, dispatch_get_main_queue(), ^{ dispatch_time_t now_b = dispatch_time(0, 0); test_long_less_than("can't finish faster than 1.5s", 0, now_b - time_b_min); test_long_less_than("must finish faster than 2.5s", 0, time_b_max - now_b); time_c_min = dispatch_time(0, 0*NSEC_PER_SEC); time_c = dispatch_time(0, 0*NSEC_PER_SEC); time_c_max = dispatch_time(0, .5*NSEC_PER_SEC); dispatch_after(time_c, dispatch_get_main_queue(), ^{ dispatch_time_t now_c = dispatch_time(0, 0); test_long_less_than("can't finish faster than 0s", 0, now_c - time_c_min); test_long_less_than("must finish faster than .5s", 0, time_c_max - now_c); dispatch_async_f(dispatch_get_main_queue(), NULL, done); }); }); });
void* rarch_main_ios(void* args) { struct rarch_main_wrap* argdata = (struct rarch_main_wrap*)args; int init_ret = rarch_main_init_wrap(argdata); ios_free_main_wrap(argdata); if (init_ret) { rarch_main_clear_state(); dispatch_async_f(dispatch_get_main_queue(), (void*)1, ios_rarch_exited); return 0; } #ifdef HAVE_RGUI char* system_directory = ios_get_rarch_system_directory(); strlcpy(g_extern.savestate_dir, system_directory, sizeof(g_extern.savestate_dir)); strlcpy(g_extern.savefile_dir, system_directory, sizeof(g_extern.savefile_dir)); menu_init(); g_extern.lifecycle_mode_state |= 1ULL << MODE_GAME; // If we started a ROM directly from command line, // push it to ROM history. if (!g_extern.libretro_dummy) menu_rom_history_push_current(); for (;;) { if (g_extern.system.shutdown) break; else if (g_extern.lifecycle_mode_state & (1ULL << MODE_LOAD_GAME)) { load_menu_game_prepare(); // If ROM load fails, we exit RetroArch. On console it might make more sense to go back to menu though ... if (load_menu_game()) g_extern.lifecycle_mode_state |= (1ULL << MODE_GAME); else { #ifdef RARCH_CONSOLE g_extern.lifecycle_mode_state |= (1ULL << MODE_MENU); #else // This needs to be here to tell the GUI thread that the emulator loop has stopped, // the (void*)1 makes it display the 'Failed to load game' message. dispatch_async_f(dispatch_get_main_queue(), (void*)1, ios_rarch_exited); return 1; #endif } g_extern.lifecycle_mode_state &= ~(1ULL << MODE_LOAD_GAME); } else if (g_extern.lifecycle_mode_state & (1ULL << MODE_GAME)) { while ((g_extern.is_paused && !g_extern.is_oneshot) ? rarch_main_idle_iterate() : rarch_main_iterate()) process_events(); g_extern.lifecycle_mode_state &= ~(1ULL << MODE_GAME); } else if (g_extern.lifecycle_mode_state & (1ULL << MODE_MENU)) { g_extern.lifecycle_mode_state |= 1ULL << MODE_MENU_PREINIT; while (!g_extern.system.shutdown && menu_iterate()) process_events(); g_extern.lifecycle_mode_state &= ~(1ULL << MODE_MENU); } else break; } g_extern.system.shutdown = false; menu_free(); if (g_extern.config_save_on_exit && *g_extern.config_path) config_save_file(g_extern.config_path); if (g_extern.main_is_init) rarch_main_deinit(); free(system_directory); #else while ((g_extern.is_paused && !g_extern.is_oneshot) ? rarch_main_idle_iterate() : rarch_main_iterate()); rarch_main_deinit(); #endif rarch_deinit_msg_queue(); #ifdef PERF_TEST rarch_perf_log(); #endif rarch_main_clear_state(); dispatch_async_f(dispatch_get_main_queue(), 0, ios_rarch_exited); return 0; }
void WorkQueue::scheduleWork(PassOwnPtr<WorkItem> item) { dispatch_async_f(m_dispatchQueue, item.leakPtr(), executeWorkItem); }
void apple_scheduler::schedule( TaskProc_t proc, void* param) { dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_async_f(queue, param, proc); }
void queue::async( operation *op ) { dispatch_async_f( d->native, op, _xdispatch_run_operation ); }
int main(int argc, char *argv[]) #endif { frontend_ctx = (frontend_ctx_driver_t*)frontend_ctx_init_first(); if (frontend_ctx && frontend_ctx->init) frontend_ctx->init(); rarch_main_clear_state(); #ifndef __APPLE__ rarch_get_environment(argc, argv); #endif #if !defined(RARCH_CONSOLE) #if defined(__APPLE__) struct rarch_main_wrap* argdata = (struct rarch_main_wrap*)args; int init_ret = rarch_main_init_wrap(argdata); apple_free_main_wrap(argdata); if (init_ret) { rarch_main_clear_state(); dispatch_async_f(dispatch_get_main_queue(), (void*)1, apple_rarch_exited); return 0; } #else rarch_init_msg_queue(); int init_ret; if ((init_ret = rarch_main_init(argc, argv))) return init_ret; #endif #endif #ifdef HAVE_MENU menu_init(); #ifndef __APPLE__ if (frontend_ctx && frontend_ctx->process_args) frontend_ctx->process_args(argc, argv); #endif #ifdef RARCH_CONSOLE g_extern.lifecycle_mode_state |= 1ULL << MODE_LOAD_GAME; #else g_extern.lifecycle_mode_state |= 1ULL << MODE_GAME; #endif #ifndef RARCH_CONSOLE // If we started a ROM directly from command line, // push it to ROM history. if (!g_extern.libretro_dummy) menu_rom_history_push_current(); #endif for (;;) { if (g_extern.system.shutdown) break; else if (g_extern.lifecycle_mode_state & (1ULL << MODE_LOAD_GAME)) { load_menu_game_prepare(); // If ROM load fails, we exit RetroArch. On console it might make more sense to go back to menu though ... if (load_menu_game()) g_extern.lifecycle_mode_state |= (1ULL << MODE_GAME); else { #if defined(RARCH_CONSOLE) || defined(__QNX__) g_extern.lifecycle_mode_state |= (1ULL << MODE_MENU); #else if (frontend_ctx && frontend_ctx->shutdown) frontend_ctx->shutdown(true); return 1; #endif } g_extern.lifecycle_mode_state &= ~(1ULL << MODE_LOAD_GAME); } else if (g_extern.lifecycle_mode_state & (1ULL << MODE_GAME)) { #ifdef RARCH_CONSOLE driver.input->poll(NULL); #endif if (driver.video_poke->set_aspect_ratio) driver.video_poke->set_aspect_ratio(driver.video_data, g_settings.video.aspect_ratio_idx); while ((g_extern.is_paused && !g_extern.is_oneshot) ? rarch_main_idle_iterate() : rarch_main_iterate()) { if (frontend_ctx && frontend_ctx->process_events) frontend_ctx->process_events(); if (!(g_extern.lifecycle_mode_state & (1ULL << MODE_GAME))) break; } g_extern.lifecycle_mode_state &= ~(1ULL << MODE_GAME); } else if (g_extern.lifecycle_mode_state & (1ULL << MODE_MENU)) { g_extern.lifecycle_mode_state |= 1ULL << MODE_MENU_PREINIT; // Menu should always run with vsync on. video_set_nonblock_state_func(false); if (driver.audio_data) audio_stop_func(); while (!g_extern.system.shutdown && menu_iterate()) { if (frontend_ctx && frontend_ctx->process_events) frontend_ctx->process_events(); if (!(g_extern.lifecycle_mode_state & (1ULL << MODE_MENU))) break; } driver_set_nonblock_state(driver.nonblock_state); if (driver.audio_data && !audio_start_func()) { RARCH_ERR("Failed to resume audio driver. Will continue without audio.\n"); g_extern.audio_active = false; } g_extern.lifecycle_mode_state &= ~(1ULL << MODE_MENU); } else break; } g_extern.system.shutdown = false; menu_free(); if (g_extern.config_save_on_exit && *g_extern.config_path) config_save_file(g_extern.config_path); #ifdef GEKKO /* Per-core input config saving */ config_save_keybinds(g_extern.input_config_path); #endif #ifdef RARCH_CONSOLE global_uninit_drivers(); #endif #else while ((g_extern.is_paused && !g_extern.is_oneshot) ? rarch_main_idle_iterate() : rarch_main_iterate()); #endif rarch_main_deinit(); rarch_deinit_msg_queue(); #ifdef PERF_TEST rarch_perf_log(); #endif #if defined(HAVE_LOGGER) logger_shutdown(); #elif defined(HAVE_FILE_LOGGER) if (g_extern.log_file) fclose(g_extern.log_file); g_extern.log_file = NULL; #endif if (frontend_ctx && frontend_ctx->deinit) frontend_ctx->deinit(); if (g_extern.lifecycle_mode_state & (1ULL << MODE_EXITSPAWN) && frontend_ctx && frontend_ctx->exitspawn) frontend_ctx->exitspawn(); rarch_main_clear_state(); if (frontend_ctx && frontend_ctx->shutdown) frontend_ctx->shutdown(false); return 0; }
void WorkQueue::dispatch(const Function<void()>& function) { dispatch_async_f(m_dispatchQueue, new Function<void()>(function), executeFunction); }
WEAK void halide_spawn_thread(void *user_context, void (*f)(void *), void *closure) { dispatch_async_f(dispatch_get_global_queue(0, 0), closure, f); }