RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies) { if (!cMillies) { /* pthread_yield() isn't part of SuS, thus this fun. */ #ifdef RT_OS_DARWIN pthread_yield_np(); #elif defined(RT_OS_FREEBSD) /* void pthread_yield */ pthread_yield(); #elif defined(RT_OS_SOLARIS) || defined(RT_OS_HAIKU) sched_yield(); #else if (!pthread_yield()) #endif return VINF_SUCCESS; } else { struct timespec ts; struct timespec tsrem = {0,0}; ts.tv_nsec = (cMillies % 1000) * 1000000; ts.tv_sec = cMillies / 1000; if (!nanosleep(&ts, &tsrem)) return VINF_SUCCESS; } return RTErrConvertFromErrno(errno); }
static void* streamerThread(void* arg) { pthread_mutex_lock(&s_condMutex); while (!s_shutdown) { switch (internalStreamerIdle()) { case StreamerResult_Pending: { #if defined(__APPLE__) pthread_yield_np(); #else pthread_yield(); #endif } break; default: { pthread_cond_wait(&s_cond, &s_condMutex); } break; } } pthread_mutex_unlock(&s_condMutex); s_shutdown = 1; pthread_exit(0); }
static VALUE rb_thread_run(VALUE thread, SEL sel) { rb_vm_thread_wakeup(GetThreadPtr(thread)); pthread_yield_np(); return thread; }
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- // Reset() //----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- void CFMachPortThread::Reset(CFMachPortCallBack portCallBack, void* userData) { // Wait for the thread to be running or invalid prior to tearing it down. while (kStarting == mState) pthread_yield_np(); if (kRunning == mState) { // Wait for the thread's run loop to be "waiting." This will avoid a small window of having set 'mState = kRunning' but not having invoked CFRunLoopRun() yet. while (not CFRunLoopIsWaiting(mRunLoop.GetCFObject())) pthread_yield_np(); // Stop the thread's run loop CFRunLoopStop(mRunLoop.GetCFObject()); // Set the thread's state to kStopping mState = kStopping; // Wait for the thread to run to completion (it will set the state to kInvalid) while (kStopping == mState) { (void) pthread_cond_signal(mStoppingCondition); pthread_yield_np(); } } mPort = MACH_PORT_NULL; mPortCallBack = 0; mUserData = 0; mPThread = 0; mRunLoop = 0; mState = kInvalid; if (0 != portCallBack) { typedef void* (*PThreadStart)(void*); mPortCallBack = portCallBack; mUserData = userData; mState = kStarting; int err = pthread_create(&mPThread, 0, reinterpret_cast<PThreadStart>(Start), this); ThrowIfError(err, CAException(err), "CMIO::PTA::CFMachPortThread::Reset: pthread_create() failed"); (void) pthread_detach(mPThread); } }
void Thread::yield() { #ifdef __APPLE__ pthread_yield_np(); #else pthread_yield(); #endif }
void Thread_Yield (void) { #if defined(HAVE_PTHREAD_YIELD) pthread_yield (); #elif defined(HAVE_PTHREAD_YIELD_NP) pthread_yield_np (); #endif }
void SDL_Delay (Uint32 ms) { #ifdef ENABLE_PTH pth_time_t tv; tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; pth_nap(tv); #else int was_error; #ifdef USE_NANOSLEEP struct timespec elapsed, tv; #else struct timeval tv; #ifndef SELECT_SETS_REMAINING Uint32 then, now, elapsed; #endif #endif /* Set the timeout interval - Linux only needs to do this once */ #ifdef SELECT_SETS_REMAINING tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; #elif defined(USE_NANOSLEEP) elapsed.tv_sec = ms/1000; elapsed.tv_nsec = (ms%1000)*1000000; #else then = SDL_GetTicks(); #endif do { errno = 0; #if _POSIX_THREAD_SYSCALL_SOFT pthread_yield_np(); #endif #ifdef USE_NANOSLEEP tv.tv_sec = elapsed.tv_sec; tv.tv_nsec = elapsed.tv_nsec; was_error = nanosleep(&tv, &elapsed); #else #ifndef SELECT_SETS_REMAINING /* Calculate the time interval left (in case of interrupt) */ now = SDL_GetTicks(); elapsed = (now-then); then = now; if ( elapsed >= ms ) { break; } ms -= elapsed; tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; #endif was_error = select(0, NULL, NULL, NULL, &tv); #endif /* USE_NANOSLEEP */ } while ( was_error && (errno == EINTR) ); #endif /* ENABLE_PTH */ }
void SNetThreadingYield(void) { #ifdef HAVE_PTHREAD_YIELD_NP (void) pthread_yield_np(); #else #ifdef HAVE_PTHREAD_YIELD (void) pthread_yield(); #endif #endif }
void Thread::yield() { #ifdef CVD_HAVE_PTHREAD_YIELD pthread_yield(); #elif defined(CVD_HAVE_PTHREAD_YIELD_NP) pthread_yield_np(); #else //#warning "Thread::yield() not implemented" #endif }
void isc_thread_yield(void) { #if defined(HAVE_SCHED_YIELD) sched_yield(); #elif defined( HAVE_PTHREAD_YIELD) pthread_yield(); #elif defined( HAVE_PTHREAD_YIELD_NP) pthread_yield_np(); #endif }
void tThread::yield() { #if __linux__ || __CYGWIN__ pthread_yield(); #elif __APPLE__ pthread_yield_np(); #elif __MINGW32__ // No op. #else #error What platform are you on!? #endif }
//----------------------------------------------------------------// void MOAIThreadImpl::Sleep () { #if defined( __APPLE__ ) pthread_yield_np (); #elif defined( __linux ) #if defined( ANDROID ) | defined( NACL ) sched_yield (); #else pthread_yield (); #endif #else #error "No pthread yield function defined for this platform." #endif }
static void ia_sync_start(ia *a) { #ifdef _POSIX_PRIORITY_SCHEDULING sched_yield(); #elif defined(__APPLE__) || defined(__MACH__) pthread_yield_np(); #else pthread_yield(); #endif int rc = pthread_barrier_wait(&a->barrier_start); if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { ia_log("error: pthread_barrier_wait %s (%d)", strerror(rc), rc); ia_fatal(__func__); } }
RTDECL(bool) RTThreadYield(void) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint64_t u64TS = ASMReadTSC(); #endif #ifdef RT_OS_DARWIN pthread_yield_np(); #elif defined(RT_OS_SOLARIS) || defined(RT_OS_HAIKU) sched_yield(); #else pthread_yield(); #endif #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) u64TS = ASMReadTSC() - u64TS; bool fRc = u64TS > 1500; LogFlow(("RTThreadYield: returning %d (%llu ticks)\n", fRc, u64TS)); #else bool fRc = true; /* PORTME: Add heuristics for determining whether the cpus was yielded. */ #endif return fRc; }
RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies) { LogFlow(("RTThreadSleep: cMillies=%d\n", cMillies)); if (!cMillies) { /* pthread_yield() isn't part of SuS, thus this fun. */ #ifdef RT_OS_DARWIN pthread_yield_np(); #elif defined(RT_OS_FREEBSD) /* void pthread_yield */ pthread_yield(); #elif defined(RT_OS_SOLARIS) || defined(RT_OS_HAIKU) sched_yield(); #else if (!pthread_yield()) #endif { LogFlow(("RTThreadSleep: returning %Rrc (cMillies=%d)\n", VINF_SUCCESS, cMillies)); return VINF_SUCCESS; } } else { struct timespec ts; struct timespec tsrem = {0,0}; ts.tv_nsec = (cMillies % 1000) * 1000000; ts.tv_sec = cMillies / 1000; if (!nanosleep(&ts, &tsrem)) { LogFlow(("RTThreadSleep: returning %Rrc (cMillies=%d)\n", VINF_SUCCESS, cMillies)); return VINF_SUCCESS; } } int rc = RTErrConvertFromErrno(errno); LogFlow(("RTThreadSleep: returning %Rrc (cMillies=%d)\n", rc, cMillies)); return rc; }
static VALUE thread_s_pass(VALUE klass, SEL sel) { pthread_yield_np(); return Qnil; }
int QDecodeStream::decoder_pps_psp(int platform,int encode) /* * platform : A5S66 HI3516A * encode : H264 H265 */ { int count = 25; int consume_no = user_no; char *buffer; buffer = (char *)malloc(sizeof(char)*1024*1024); consume->read_init(); while(count > 0) { count--; int len = 0; if(consume == NULL) { printf("current consume %d is not exist. \n",consume_no); return NULL; } len = consume->read_data_to_buffer(buffer); if(len < 0) { int count = sliding_window->consume_linklist_delete(consume_no); sliding_window->consume_linklist_isEmpty(); printf("delete no:%d,cur count:%d \n",consume_no,count); return NULL; } #if defined(Q_OS_WIN32) usleep(1000); #elif defined(Q_OS_MACX) pthread_yield_np(); #elif defined(Q_OS_UNIX) pthread_yield(); #endif //根据码流获取PPS和PSP unsigned int *nual_head; nual_head = (unsigned int *)buffer; int pps_start,pps_len = 0; int psp_start,psp_len = 0; for(int i = 0;i < len;i += 1) { nual_head = (unsigned int *)(buffer + i); //printf("nual head:%x \n",*(nual_head)); if((*nual_head) == 0x01000000) { printf("%02x \n",(unsigned char)(*(buffer+i+4))); if((*(buffer+i+4)) == 0x67) { pps_start = i; } if(*(buffer + i + 4) == 0x68) { psp_start = i; pps_len = psp_start - pps_start; psp_len = 8; break; } } } char *pps = (char *)malloc(sizeof(char) * pps_len); memcpy(pps,(buffer + pps_start),pps_len); char *psp = (char *)malloc(sizeof(char) * psp_len); memcpy(psp,(buffer + psp_start),psp_len); decode_h264.codecCtx->extradata = (uint8_t *)malloc(sizeof(uint8_t) *(pps_len + psp_len)); memcpy(decode_h264.codecCtx->extradata,pps,pps_len); memcpy(decode_h264.codecCtx->extradata + pps_len,psp,psp_len); decode_h264.codecCtx->extradata_size = pps_len + psp_len; printf("\n extern_data:"); for(int i = 0; i < pps_len + psp_len;i++) { if(i % 4 == 0) printf("\n"); printf("%02x ",decode_h264.codecCtx->extradata[i]); } printf("\n"); delete pps; delete psp; return (pps_len + psp_len); } return 0; }
/*************************************************** * 解码线程 *************************************************** */ void *QDecodeStream::getraw_run(void *ptr) { QDecodeStream *pthis = (QDecodeStream *)ptr; int consume_no = pthis->user_no; char *buffer; buffer = (char *)malloc(sizeof(char)*1024*1024); pthis->consume->read_init(); printf("decode pthread start! pid:%d \n",syscall(SYS_gettid)); //将该线程进行CPU绑定 //taskset -p mask pid: 0x3c = 0b00111100 即绑定cpu2 cpu3 cpu4 cpu5 char *cmd_system = (char *)malloc(sizeof(char)*1024); sprintf(cmd_system,"/bin/taskset -p 0x3c %d",syscall(SYS_gettid)); system(cmd_system); free(cmd_system); while(1) { int len = 0; if(pthis->consume == NULL) { printf("current consume %d is not exist. \n",consume_no); return NULL; } struct timeval start_time,stop_time,diff; if(pthis->debug_enable == 1) { gettimeofday(&start_time,0); } len = pthis->consume->read_data_to_buffer(buffer); if(pthis->debug_enable == 1) { gettimeofday(&stop_time,0); pthis->timeval_subtract(&diff,&start_time,&stop_time); printf("read data time:%d uS\n",diff.tv_usec); } if(len < 0) { int count = pthis->sliding_window->consume_linklist_delete(consume_no); pthis->sliding_window->consume_linklist_isEmpty(); printf("delete no:%d,cur count:%d \n",consume_no,count); return NULL; } pthread_testcancel(); if(pthis->debug_enable == 1) { gettimeofday(&start_time,0); } pthis->decode_main_frame((uint8_t *)buffer,len); if(pthis->debug_enable == 1) { gettimeofday(&stop_time,0); pthis->timeval_subtract(&diff,&start_time,&stop_time); printf("decode time:%d uS\n",diff.tv_usec); } #if defined(Q_OS_WIN32) usleep(1000); #elif defined(Q_OS_MACX) pthread_yield_np(); #elif defined(Q_OS_UNIX) pthread_yield(); #endif } }
static VALUE thread_join_m(VALUE self, SEL sel, int argc, VALUE *argv) { VALUE timeout; rb_scan_args(argc, argv, "01", &timeout); rb_vm_thread_t *t = GetThreadPtr(self); if (t->status != THREAD_DEAD) { if (timeout == Qnil) { // No timeout given: block until the thread finishes. //pthread_assert(pthread_join(t->thread, NULL)); struct timespec ts; ts.tv_sec = 0; ts.tv_nsec = 10000000; while (t->status != THREAD_DEAD) { nanosleep(&ts, NULL); pthread_yield_np(); if (t->status == THREAD_KILLED && t->wait_for_mutex_lock) { goto dead; } } } else { // Timeout given: sleep and check if the thread is dead. struct timeval tv = rb_time_interval(timeout); struct timespec ts; ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; while (ts.tv_nsec >= 1000000000) { ts.tv_sec += 1; ts.tv_nsec -= 1000000000; } while (ts.tv_sec > 0 || ts.tv_nsec > 0) { struct timespec its; again: if (ts.tv_nsec > 100000000) { ts.tv_nsec -= 100000000; its.tv_sec = 0; its.tv_nsec = 100000000; } else if (ts.tv_sec > 0) { ts.tv_sec -= 1; ts.tv_nsec += 1000000000; goto again; } else { its = ts; ts.tv_sec = ts.tv_nsec = 0; } nanosleep(&its, NULL); if (t->status == THREAD_DEAD) { goto dead; } if (t->status == THREAD_KILLED && t->wait_for_mutex_lock) { goto dead; } } return Qnil; } } dead: // If the thread was terminated because of an exception, we need to // propagate it. if (t->exception != Qnil) { t->joined_on_exception = true; rb_exc_raise(t->exception); } return self; }
void rb_thread_schedule(void) { pthread_yield_np(); }