int main(){ dispatch_queue_t logQueue = dispatch_queue_create("logging file access queue", DISPATCH_QUEUE_SERIAL); dispatch_queue_t workQueue = dispatch_queue_create("concurrent work queue", DISPATCH_QUEUE_CONCURRENT); dispatch_block_t logRequest = ^{ logData: /* write request meta data on the log */; }; dispatch_block_t handleRendering = ^{ while(NONDETERMINISTIC){ /* do some heavy rendering computations */ dispatch_async(logQueue, logRequest); } }; dispatch_block_t handleFileStorage = ^{ storeFile: /* store file */ dispatch_async(logQueue, logRequest); storage_system_busy = 0; }; while(1){ if(storage_system_busy == 0 && NONDETERMINISTIC){ assert(storage_system_busy == 0); // check for possible race conditions storage_system_busy = 1; dispatch_async(workQueue, handleFileStorage); } else if(NONDETERMINISTIC){ dispatch_async(workQueue, handleRendering); } } return 0; }
static void trg_torrent_add_url_response_cb(TrgTorrentAddUrlDialog * dlg, gint res_id, gpointer data) { TrgTorrentAddUrlDialogPrivate *priv = TRG_TORRENT_ADD_URL_DIALOG_GET_PRIVATE(dlg); if (res_id == GTK_RESPONSE_ACCEPT) { JsonNode *request; const gchar *entryText = gtk_entry_get_text(GTK_ENTRY(priv->urlEntry)); if (g_str_has_prefix(entryText, "magnet:") && !has_dht_support(dlg)) show_dht_not_enabled_warning(dlg); request = torrent_add_url(entryText, gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON(priv->startCheck))); dispatch_async(priv->client, request, on_generic_interactive_action, data); } gtk_widget_destroy(GTK_WIDGET(dlg)); }
int main(){ dispatch_queue_t serialQueue = dispatch_queue_create("queue", DISPATCH_QUEUE_SERIAL); dispatch_block_t b1 = ^{ assert(last_managed_block == 0); last_managed_block = 1; }; dispatch_block_t b2 = ^{ assert(last_managed_block == 1); last_managed_block = 2; }; dispatch_block_t b3 = ^{ assert(last_managed_block == 2); }; dispatch_async(serialQueue, b1); dispatch_async(serialQueue, b2); dispatch_async(serialQueue, b3); return 0; }
int request_enqueue(REQUEST *request) { dispatch_block_t block; block = ^{ request->process(request, fun); }; dispatch_async(thread_pool.queue, block); return 1; }
int request_enqueue(REQUEST *request) { dispatch_block_t block; block = ^{ radius_handle_request(request, fun); }; dispatch_async(thread_pool.queue, block); return 1; }
extern "C" void dispatch_after_lambda(void) { MU_BEGIN_TEST(dispatch_after_lambda); dispatch_async(dispatch_get_main_queue(), [=]{ dispatch_time_t time_a_min = dispatch_time(0, 5.5*NSEC_PER_SEC); dispatch_time_t time_a = dispatch_time(0, 6.0*NSEC_PER_SEC); dispatch_time_t time_a_max = dispatch_time(0, 6.5*NSEC_PER_SEC); dispatch_time_t time_a_start = dispatch_time(0,0); dispatch_after(time_a, dispatch_get_current_queue(), [=]{ dispatch_time_t now_a = dispatch_time(0, 0); MU_MESSAGE("must finish between 5.5s and 6.5s: %f",(now_a-time_a_start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_a - time_a_min)); MU_ASSERT_TRUE(0<=(time_a_max - now_a)); dispatch_time_t time_b_min = dispatch_time(0, 1.5*NSEC_PER_SEC); dispatch_time_t time_b = dispatch_time(0, 2*NSEC_PER_SEC); dispatch_time_t time_b_max = dispatch_time(0, 2.5*NSEC_PER_SEC); dispatch_time_t time_b_start = dispatch_time(0,0); dispatch_after(time_b, dispatch_get_current_queue(), [=]{ dispatch_time_t now_b = dispatch_time(0, 0); MU_MESSAGE("must finish between 1.5s and 2.5s: %f",(now_b-time_b_start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_b - time_b_min)); MU_ASSERT_TRUE(0<=(time_b_max - now_b)); #if 1 // FIXME: Nesting three lambdas seems to be broken... dispatch_time_t time_c_min = dispatch_time(0, 0*NSEC_PER_SEC); dispatch_time_t time_c = dispatch_time(0, 0*NSEC_PER_SEC); dispatch_time_t time_c_max = dispatch_time(0, .5*NSEC_PER_SEC); dispatch_time_t time_c_start = dispatch_time(0, 0); dispatch_after(time_c, dispatch_get_current_queue(), [=]{ dispatch_time_t now_c = dispatch_time(0, 0); MU_MESSAGE("must finish between 0s and .5s: %f",(now_c-time_c_start)/(float)NSEC_PER_SEC); MU_ASSERT_TRUE(0<=(now_c - time_c_min)); MU_ASSERT_TRUE(0<=(time_c_max - now_c)); dispatch_async_f(dispatch_get_current_queue(), NULL, done); }); #else dispatch_async_f(dispatch_get_current_queue(), NULL, done); #endif }); }); }); dispatch_main(); MU_FAIL("Should never reach this"); MU_END_TEST; }
static void trg_tracker_announce_edited(GtkCellRendererText * renderer, gchar * path, gchar * new_text, gpointer user_data) { TrgTrackersTreeViewPrivate *priv = TRG_TRACKERS_TREE_VIEW_GET_PRIVATE(user_data); GtkTreeModel *model = gtk_tree_view_get_model(GTK_TREE_VIEW(user_data)); gint64 torrentId = trg_trackers_model_get_torrent_id(TRG_TRACKERS_MODEL(model)); JsonArray *torrentIds = json_array_new(); JsonArray *trackerModifiers = json_array_new(); gint64 trackerId; JsonNode *req; JsonObject *args; GtkTreeIter iter; gchar *icon; gtk_tree_model_get_iter_from_string(model, &iter, path); gtk_list_store_set(GTK_LIST_STORE(model), &iter, TRACKERCOL_ANNOUNCE, new_text, -1); gtk_tree_model_get(model, &iter, TRACKERCOL_ID, &trackerId, TRACKERCOL_ICON, &icon, -1); json_array_add_int_element(torrentIds, torrentId); req = torrent_set(torrentIds); args = node_get_arguments(req); if (!g_strcmp0(icon, GTK_STOCK_ADD)) { json_array_add_string_element(trackerModifiers, new_text); json_object_set_array_member(args, "trackerAdd", trackerModifiers); } else { json_array_add_int_element(trackerModifiers, trackerId); json_array_add_string_element(trackerModifiers, new_text); json_object_set_array_member(args, "trackerReplace", trackerModifiers); } g_free(icon); dispatch_async(priv->client, req, on_trackers_update, user_data); }
static void trg_torrent_move_response_cb(GtkDialog * dlg, gint res_id, gpointer data) { TrgTorrentMoveDialogPrivate *priv = TRG_TORRENT_MOVE_DIALOG_GET_PRIVATE(dlg); if (res_id == GTK_RESPONSE_ACCEPT) { gchar *location = trg_destination_combo_get_dir(TRG_DESTINATION_COMBO (priv->location_combo)); JsonNode *request = torrent_set_location(priv->ids, location, gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON (priv->move_check))); g_free(location); trg_destination_combo_save_selection(TRG_DESTINATION_COMBO (priv->location_combo)); dispatch_async(priv->client, request, on_generic_interactive_action_response, data); } else { json_array_unref(priv->ids); } gtk_widget_destroy(GTK_WIDGET(dlg)); }
static typename util::detail::algorithm_result<ExPolicy, SegIter>::type segmented_for_each(Algo && algo, ExPolicy const& policy, SegIter first, SegIter last, F && f, Proj && proj, boost::mpl::false_) { typedef hpx::traits::segmented_iterator_traits<SegIter> traits; typedef typename traits::segment_iterator segment_iterator; typedef typename traits::local_iterator local_iterator_type; typedef util::detail::algorithm_result<ExPolicy, SegIter> result; typedef typename std::iterator_traits<SegIter>::iterator_category iterator_category; typedef typename boost::mpl::bool_<boost::is_same< iterator_category, std::input_iterator_tag >::value> forced_seq; segment_iterator sit = traits::segment(first); segment_iterator send = traits::segment(last); std::vector<future<local_iterator_type> > segments; segments.reserve(std::distance(sit, send)); if (sit == send) { // all elements are on the same partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::local(last); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj )); } } else { // handle the remaining part of the first partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::end(sit); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj )); } // handle all of the full partitions for (++sit; sit != send; ++sit) { beg = traits::begin(sit); end = traits::end(sit); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj )); } } // handle the beginning of the last partition beg = traits::begin(sit); end = traits::local(last); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj )); } } return result::get( dataflow( [=](std::vector<hpx::future<local_iterator_type> > && r) -> SegIter { // handle any remote exceptions, will throw on error std::list<boost::exception_ptr> errors; parallel::util::detail::handle_remote_exceptions< ExPolicy >::call(r, errors); return traits::compose(send, r.back().get()); }, std::move(segments))); }
static typename util::detail::algorithm_result<ExPolicy, SegIter>::type segmented_minormax(Algo && algo, ExPolicy const& policy, SegIter first, SegIter last, F && f, Proj && proj, std::false_type) { typedef hpx::traits::segmented_iterator_traits<SegIter> traits; typedef typename traits::segment_iterator segment_iterator; typedef typename traits::local_iterator local_iterator_type; typedef std::integral_constant<bool, !hpx::traits::is_forward_iterator<SegIter>::value > forced_seq; typedef util::detail::algorithm_result<ExPolicy, SegIter> result; segment_iterator sit = traits::segment(first); segment_iterator send = traits::segment(last); std::vector<future<SegIter> > segments; segments.reserve(std::distance(sit, send)); if (sit == send) { // all elements are on the same partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::local(last); if (beg != end) { segments.push_back( hpx::make_future<SegIter>( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj), [send](local_iterator_type const& out) -> SegIter { return traits::compose(send, out); })); } } else { // handle the remaining part of the first partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::end(sit); if (beg != end) { segments.push_back( hpx::make_future<SegIter>( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj), [sit](local_iterator_type const& out) -> SegIter { return traits::compose(sit, out); })); } // handle all of the full partitions for (++sit; sit != send; ++sit) { beg = traits::begin(sit); end = traits::end(sit); if (beg != end) { segments.push_back( hpx::make_future<SegIter>( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj), [sit](local_iterator_type const& out) -> SegIter { return traits::compose(sit, out); })); } } // handle the beginning of the last partition beg = traits::begin(sit); end = traits::local(last); if (beg != end) { segments.push_back( hpx::make_future<SegIter>( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, f, proj), [sit](local_iterator_type const& out) -> SegIter { return traits::compose(sit, out); })); } } return result::get( dataflow( [=](std::vector<hpx::future<SegIter> > && r) -> SegIter { // handle any remote exceptions, will throw on error std::list<boost::exception_ptr> errors; parallel::util::detail::handle_remote_exceptions< ExPolicy >::call(r, errors); std::vector<SegIter> res = hpx::util::unwrapped(std::move(r)); return Algo::sequential_minmax_element_ind( policy, res.begin(), res.size(), f, proj); }, std::move(segments))); }
static typename util::detail::algorithm_result<ExPolicy, T>::type segmented_transform_reduce(Algo && algo, ExPolicy const& policy, SegIter first, SegIter last, T && init, Reduce && red_op, Convert && conv_op, std::false_type) { typedef hpx::traits::segmented_iterator_traits<SegIter> traits; typedef typename traits::segment_iterator segment_iterator; typedef typename traits::local_iterator local_iterator_type; typedef util::detail::algorithm_result<ExPolicy, T> result; typedef std::integral_constant<bool, !hpx::traits::is_forward_iterator<SegIter>::value > forced_seq; segment_iterator sit = traits::segment(first); segment_iterator send = traits::segment(last); std::vector<shared_future<T> > segments; segments.reserve(std::distance(sit, send)); if (sit == send) { // all elements are on the same partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::local(last); if (beg != end) { segments.push_back( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, init, red_op, conv_op) ); } } else { // handle the remaining part of the first partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::end(sit); if (beg != end) { segments.push_back( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, init, red_op, conv_op) ); } // handle all of the full partitions for (++sit; sit != send; ++sit) { beg = traits::begin(sit); end = traits::end(sit); if (beg != end) { segments.push_back( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, init, red_op, conv_op) ); } } // handle the beginning of the last partition beg = traits::begin(sit); end = traits::local(last); if (beg != end) { segments.push_back( dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, init, red_op, conv_op) ); } } return result::get( dataflow( [=](std::vector<shared_future<T> > && r) -> T { // handle any remote exceptions, will throw on error std::list<boost::exception_ptr> errors; parallel::util::detail::handle_remote_exceptions< ExPolicy >::call(r, errors); // VS2015RC bails out if red_op is capture by ref return std::accumulate( r.begin(), r.end(), init, [=](T const& val, shared_future<T>& curr) { return red_op(val, curr.get()); }); }, std::move(segments))); }
static typename util::detail::algorithm_result< ExPolicy, typename std::iterator_traits<SegIter>::difference_type >::type segmented_count(Algo && algo, ExPolicy const& policy, SegIter first, SegIter last, T const& value, std::false_type) { typedef hpx::traits::segmented_iterator_traits<SegIter> traits; typedef typename traits::segment_iterator segment_iterator; typedef typename traits::local_iterator local_iterator_type; typedef std::integral_constant<bool, !hpx::traits::is_forward_iterator<SegIter>::value > forced_seq; typedef typename std::iterator_traits<SegIter>::difference_type value_type; typedef util::detail::algorithm_result<ExPolicy, value_type> result; segment_iterator sit = traits::segment(first); segment_iterator send = traits::segment(last); std::vector<shared_future<value_type> > segments; segments.reserve(std::distance(sit, send)); if (sit == send) { // all elements are on the same partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::local(last); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, value)); } } else { // handle the remaining part of the first partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::end(sit); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, value)); } // handle all of the full partitions for (++sit; sit != send; ++sit) { beg = traits::begin(sit); end = traits::end(sit); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, value)); } } // handle the beginning of the last partition beg = traits::begin(sit); end = traits::local(last); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, value)); } } return result::get( dataflow( hpx::util::unwrapped([=](std::vector<value_type> && r) { return std::accumulate(r.begin(), r.end(), value_type()); }), segments)); }
gboolean trg_client_update_session(TrgClient * tc, GSourceFunc callback, gpointer data) { return dispatch_async(tc, session_get(), callback, data); }
static typename util::detail::algorithm_result< ExPolicy, std::pair<SegIter, SegOutIter> >::type segmented_transfer(Algo && algo, ExPolicy const& policy, std::false_type, SegIter first, SegIter last, SegOutIter dest) { typedef hpx::traits::segmented_iterator_traits<SegIter> traits; typedef typename traits::segment_iterator segment_iterator; typedef typename traits::local_iterator local_iterator_type; typedef hpx::traits::segmented_iterator_traits<SegOutIter> output_traits; typedef typename output_traits::segment_iterator segment_output_iterator; typedef typename output_traits::local_iterator local_output_iterator_type; typedef std::pair< local_iterator_type, local_output_iterator_type > local_iterator_pair; typedef std::integral_constant<bool, !hpx::traits::is_forward_iterator<SegIter>::value > forced_seq; segment_iterator sit = traits::segment(first); segment_iterator send = traits::segment(last); segment_output_iterator sdest = traits::segment(dest); std::vector<shared_future<local_iterator_pair> > segments; segments.reserve(std::distance(sit, send)); if (sit == send) { // all elements are on the same partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::local(last); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, traits::local(dest))); } } else { // handle the remaining part of the first partition local_iterator_type beg = traits::local(first); local_iterator_type end = traits::end(sit); local_output_iterator_type out = traits::local(dest); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, out)); } // handle all of the full partitions for ((void) ++sit, ++sdest; sit != send; (void) ++sit, ++sdest) { beg = traits::begin(sit); end = traits::end(sit); out = traits::begin(sdest); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, out)); } } // handle the beginning of the last partition beg = traits::begin(sit); end = traits::local(last); if (beg != end) { segments.push_back(dispatch_async(traits::get_id(sit), algo, policy, forced_seq(), beg, end, traits::begin(sdest))); } } HPX_ASSERT(!segments.empty()); return util::detail::algorithm_result< ExPolicy, std::pair<SegIter, SegOutIter> >::get( lcos::local::dataflow( [=](std::vector<shared_future<local_iterator_pair> > && r) -> std::pair<SegIter, SegOutIter> { // handle any remote exceptions, will throw on error std::list<boost::exception_ptr> errors; parallel::util::detail::handle_remote_exceptions< ExPolicy >::call(r, errors); local_iterator_pair p = r.back().get(); return std::make_pair( output_traits::compose(sdest, p.first), output_traits::compose(sdest, p.second) ); }, std::move(segments))); }
static void vlogger(int severity, int domain, const char *fmt, va_list args) { va_list ap; char *msg; time_t t; int ret; dispatch_block_t logblock; if (!((1 << domain) & logdomains) || (severity > threshold)) return; t = time(NULL); va_copy(ap, args); ret = vsnprintf(NULL, 0, fmt, ap); va_end(ap); if (ret <= 0) return; msg = (char *)malloc(ret + 1); if (!msg) return; va_copy(ap, args); ret = vsnprintf(msg, ret + 1, fmt, ap); va_end(ap); if (ret < 0) { free(msg); return; } logblock = ^{ char stamp[32]; int ret; if (!logfile && !console) { free(msg); return; } if (logfile) { ret = strftime(stamp, sizeof(stamp), "%Y-%m-%d %H:%M:%S", localtime(&t)); if (ret == 0) stamp[0] = '\0'; fprintf(logfile, "[%s] %8s: %s", stamp, labels[domain], msg); fflush(logfile); } if (console) fprintf(stderr, "%8s: %s", labels[domain], msg); free(msg); }; if (logsync) dispatch_sync(logger_sq, logblock); else dispatch_async(logger_sq, logblock); }