void* DiscoveryClient::PeriodicRenew(void* arg) { DiscoveryClient* d = static_cast<DiscoveryClient*>(arg); int consecutive_renew_error = 0; int64_t init_sleep_s = FLAGS_discovery_renew_interval_s / 2 + butil::fast_rand_less_than(FLAGS_discovery_renew_interval_s / 2); if (bthread_usleep(init_sleep_s * 1000000) != 0) { if (errno == ESTOP) { return NULL; } } while (!bthread_stopped(bthread_self())) { if (consecutive_renew_error == FLAGS_discovery_reregister_threshold) { LOG(WARNING) << "Re-register since discovery renew error threshold reached"; // Do register until succeed or Cancel is called while (!bthread_stopped(bthread_self())) { if (d->DoRegister() == 0) { break; } bthread_usleep(FLAGS_discovery_renew_interval_s * 1000000); } consecutive_renew_error = 0; } if (d->DoRenew() != 0) { consecutive_renew_error++; continue; } consecutive_renew_error = 0; bthread_usleep(FLAGS_discovery_renew_interval_s * 1000000); } return NULL; }
void* check_sleep(void* pthread_task) { EXPECT_TRUE(bthread_self() != 0); // Create a no-signal task that other worker will not steal. The task will be // run if current bthread does context switch. bthread_attr_t attr = BTHREAD_ATTR_NORMAL | BTHREAD_NOSIGNAL; bthread_t th1; pthread_t run = 0; const pthread_t pid = pthread_self(); EXPECT_EQ(0, bthread_start_urgent(&th1, &attr, mark_run, &run)); if (pthread_task) { bthread_usleep(100000L); // due to NOSIGNAL, mark_run did not run. // FIXME: actually runs. someone is still stealing. // EXPECT_EQ((pthread_t)0, run); // bthread_usleep = usleep for BTHREAD_ATTR_PTHREAD EXPECT_EQ(pid, pthread_self()); // schedule mark_run bthread_flush(); } else { // start_urgent should jump to the new thread first, then back to // current thread. EXPECT_EQ(pid, run); // should run in the same pthread } EXPECT_EQ(0, bthread_join(th1, NULL)); if (pthread_task) { EXPECT_EQ(pid, pthread_self()); EXPECT_NE((pthread_t)0, run); // the mark_run should run. } return NULL; }
void bthread_exit(void* ret) { struct mcs_lock_qnode local_qn = {0}; bthread_once(&init_once,&_bthread_init); bthread_t t = bthread_self(); mcs_lock_lock(&work_queue_lock, &local_qn); threads_active--; if(threads_active == 0) exit(0); mcs_lock_unlock(&work_queue_lock, &local_qn); if(t) { t->arg = ret; t->finished = 1; if(t->detached) free(t); } vcore_entry(); }