int main(int argc, char** argv) { fl_lock_t lock; int r; lock=0; printf("starting locking basic tests...\n"); r=try_lock(&lock); printf(" try_lock should return 0 ... %d\n", r); printf(" lock should be 1 now ... %d\n", lock); r=try_lock(&lock); printf(" tsl should return -1 ... %d\n", r); printf(" lock should still be 1 now ... %d\n", lock); release_lock(&lock); printf(" release_lock: lock should be 0 now ... %d\n", lock); printf("try_lock once more...\n"); r=try_lock(&lock); printf(" try_lock should return 0 ... %d\n", r); printf(" lock should be 1 now ... %d\n", lock); release_lock(&lock); get_lock(&lock); printf(" get_lock, lock should be 1 now ... %d\n", lock); printf("\ndone.\n"); return 0; }
int main(int argc, char *argv[]) { sem_t *mutex; int i=1; pid_t pid = fork(); if(pid) { while(i<10) { printf("waiting [%d]......\n", getpid()); try_lock(&mutex); // printf("get mutex, process:%s, pid:%d, i:%d\n", argv[0], getpid(), i); printf("lock [%d]......\n", getpid()); sleep(1); i++; printf("unlock [%d]......\n", getpid()); try_unlock(mutex); } } else { while(i<10) { printf("waiting [%d]......\n", getpid()); try_lock(&mutex); // printf("get mutex, process:%s, pid:%d, i:%d\n", argv[0], getpid(), i); printf("lock [%d]......\n", getpid()); sleep(1); i++; printf("unlock [%d]......\n", getpid()); try_unlock(mutex); } } return 0; }
bool lock(chrono::micros timeout /* = micros(0) */) { bool locked = try_lock(); if (!locked) { auto start = clock::now_us(); do { locked = try_lock(); } while (!locked && clock::now_us() - start <= timeout); } return locked; }
int modify_logic(BW *bw,B *b) { if (last_time > b->check_time + CHECK_INTERVAL) { b->check_time = last_time; if (!nomodcheck && !b->gave_notice && check_mod(b)) { file_changed(bw,0,b,NULL); return 0; } } if (b != bw->b) { if (!b->didfirst) { /* This happens when we try to block move from a window which is not on the screen */ if (bw->o.mfirst) { msgnw(bw->parent,joe_gettext(_("Modify other window first for macro"))); return 0; } b->didfirst = 1; if (bw->o.mfirst) exmacro(bw->o.mfirst,1); } if (b->rdonly) { msgnw(bw->parent,joe_gettext(_("Other buffer is read only"))); if (joe_beep) ttputc(7); return 0; } else if (!b->changed && !b->locked) { if (!try_lock(bw,b)) return 0; } } else { if (!b->didfirst) { b->didfirst = 1; if (bw->o.mfirst) exmacro(bw->o.mfirst,1); } if (b->rdonly) { msgnw(bw->parent,joe_gettext(_("Read only"))); if (joe_beep) ttputc(7); return 0; } else if (!b->changed && !b->locked) { if (!try_lock(bw,b)) return 0; } } return 1; }
struct page* alloc_single_page(struct page *next) { struct page *p = NULL; /* pthread_t pt = pthread_self(); */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); /*if( single_pages[Hash(pt)%MAXLISTS].page_count == 0 ){*/ if (single_pages[list_id % MAXLISTS].page_count == 0) p = alloc_new(PAGE_GROUP_SIZE, NULL); add_single_pages(p); /*p = single_pages[Hash(pt)%MAXLISTS].pages;*/ p = single_pages[list_id % MAXLISTS].pages; /*single_pages[Hash(pt)%MAXLISTS].pages = p->next;*/ single_pages[list_id % MAXLISTS].pages = p->next; p->next = next; /*single_pages[Hash(pt)%MAXLISTS].page_count--;*/ single_pages[list_id % MAXLISTS].page_count--; /*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/ release_spinlock(&single_pages[list_id % MAXLISTS].lock); /*list_id++;*/ return p; }
struct page* alloc_pages(int n, struct page *next) { /* pthread_t pt = pthread_self(); */ struct page *ret_val, *p = NULL; assert(n >= K); list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); /*if( n > single_pages[Hash(pt)%MAXLISTS].page_count ){*/ if (n > single_pages[list_id % MAXLISTS].page_count) p = alloc_new(n + PAGE_GROUP_SIZE, NULL); add_single_pages(p); /*ret_val = single_pages[Hash(pt)%MAXLISTS].pages;*/ /*single_pages[Hash(pt)%MAXLISTS].pages =*/ /*single_pages[Hash(pt)%MAXLISTS].pages->next;*/ ret_val = single_pages[list_id % MAXLISTS].pages; single_pages[list_id % MAXLISTS].pages = single_pages[list_id % MAXLISTS].pages->next; ret_val->next = next; /*single_pages[Hash(pt)%MAXLISTS].page_count -= n;*/ single_pages[list_id % MAXLISTS].page_count -= n; /*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/ release_spinlock(&single_pages[list_id % MAXLISTS].lock); /*list_id++;*/ return ret_val; }
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id) { sys_mutex.trace("sys_mutex_trylock(mutex_id=0x%x)", mutex_id); const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex) { return mutex.try_lock(ppu.id); }); if (!mutex) { return CELL_ESRCH; } if (mutex.ret) { if (mutex.ret == CELL_EBUSY) { return not_an_error(CELL_EBUSY); } return mutex.ret; } return CELL_OK; }
void free_single_page(region_t r, struct page *p) /* Assumes freepages_lock held */ { /* pthread_t pt = pthread_self(); */ #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(MAPNB(p), PAGENB(p), FREEPAGE); #endif /* ifndef NMEMDEBUG */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); p->next = single_pages[list_id].pages; single_pages[list_id].pages = p; single_pages[list_id].page_count++; release_spinlock(&single_pages[list_id].lock); /*acquire_spinlock1( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[p->list_id].pages;*/ /*single_pages[p->list_id].pages = p;*/ /*single_pages[p->list_id].page_count++;*/ /*release_spinlock( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[Hash(pt)%MAXLISTS].pages;*/ /*single_pages[Hash(pt)%MAXLISTS].pages = p;*/ /*single_pages[Hash(pt)%MAXLISTS].page_count++;*/ }
void lock() { for( unsigned k = 0; !try_lock(); ++k ) { geofeatures_boost::detail::yield( k ); } }
void lock() { for( unsigned k = 0; !try_lock(); ++k ) { hexerboost::detail::yield( k ); } }
void test_try_lock() { boost::fibers::round_robin ds; boost::fibers::scheduling_algorithm( & ds); try_lock(); }
bool mutex::timed_lock( system_time const& abs_time) { if ( abs_time.is_infinity() ) { lock(); return true; } if ( get_system_time() >= abs_time) return false; for (;;) { if ( try_lock() ) break; if ( get_system_time() >= abs_time) return false; this_thread::interruption_point(); if ( this_task::runs_in_pool() ) this_task::yield(); else this_thread::yield(); this_thread::interruption_point(); } return true; }
void lock() { for( unsigned k = 0; !try_lock(); ++k ) { cppcms_boost::detail::yield( k ); } }
bool LockDirectory(const fs::path& directory, const std::string lockfile_name, bool probe_only) { std::lock_guard<std::mutex> ulock(cs_dir_locks); fs::path pathLockFile = directory / lockfile_name; // If a lock for this directory already exists in the map, don't try to re-lock it if (dir_locks.count(pathLockFile.string())) { return true; } // Create empty lock file if it doesn't exist. FILE* file = fsbridge::fopen(pathLockFile, "a"); if (file) fclose(file); try { auto lock = MakeUnique<boost::interprocess::file_lock>(pathLockFile.string().c_str()); if (!lock->try_lock()) { return false; } if (!probe_only) { // Lock successful and we're not just probing, put it into the map dir_locks.emplace(pathLockFile.string(), std::move(lock)); } } catch (const boost::interprocess::interprocess_exception& e) { return error("Error while attempting to lock directory %s: %s", directory.string(), e.what()); } return true; }
int okl4_mutex_trylock(okl4_mutex_t m) { L4_Word_t me = L4_Myself().raw; return !try_lock(m, me); }
Boolean Mutex::timed_lock(Uint32 milliseconds) { struct timeval now; struct timeval finish; struct timeval remaining; { Uint32 usec; gettimeofday(&finish, NULL); finish.tv_sec += (milliseconds / 1000 ); milliseconds %= 1000; usec = finish.tv_usec + ( milliseconds * 1000 ); finish.tv_sec += (usec / 1000000); finish.tv_usec = usec % 1000000; } while (!try_lock()) { gettimeofday(&now, NULL); if (Time::subtract(&remaining, &finish, &now)) { return false; } Threads::yield(); } return true; }
bool spin_lock::try_lock_until( const fc::time_point& abs_time ) { while( abs_time > time_point::now() ) { if( try_lock() ) return true; } return false; }
void lock() { for( unsigned k = 0; !try_lock(); ++k ) { network_boost::detail::yield( k ); } }
static int flash_nvram_write(uint32_t dst, void *src, uint32_t len) { int rc; if (!try_lock(&flash_lock)) return OPAL_BUSY; if (nvram_flash->busy) { rc = OPAL_BUSY; goto out; } /* TODO: When we have async jobs for PRD, turn this into one */ if ((dst + len) > nvram_size) { prerror("FLASH_NVRAM: write out of bound (0x%x,0x%x)\n", dst, len); rc = OPAL_PARAMETER; goto out; } nvram_flash->busy = true; unlock(&flash_lock); rc = blocklevel_write(nvram_flash->bl, nvram_offset + dst, src, len); lock(&flash_lock); nvram_flash->busy = false; out: unlock(&flash_lock); return rc; }
void clientmodel::updatetimer() { // get required lock upfront. this avoids the gui from getting stuck on // periodical polls if the core is holding the locks for a longer time - // for example, during a wallet rescan. try_lock(cs_main, lockmain); if (!lockmain) return; // some quantities (such as number of blocks) change so fast that we don't want to be notified for each change. // periodically check and update with a timer. int newnumblocks = getnumblocks(); qdatetime newblockdate = getlastblockdate(); // check for changed number of blocks we have, number of blocks peers claim to have, reindexing state and importing state if (cachednumblocks != newnumblocks || cachedblockdate != newblockdate || cachedreindexing != freindex || cachedimporting != fimporting) { cachednumblocks = newnumblocks; cachedblockdate = newblockdate; cachedreindexing = freindex; cachedimporting = fimporting; emit numblockschanged(newnumblocks, newblockdate); } emit byteschanged(gettotalbytesrecv(), gettotalbytessent()); }
static void process_queue() { if (next_req < 1) return; /* no locks queued */ while (try_lock()) ; }
void mutex_lock(mutex_t *m) { L4_Word_t me = L4_Myself().raw; while (!try_lock(m, me)) { L4_ThreadSwitch(L4_nilthread); } }
virtual lock_status timed_lock(mseconds_t milliseconds) { lock_status status = lock_failed; if (0 > milliseconds) { status = untimed_lock(); } else { #if defined(PTHREAD_MUTEX_HAS_TIMEDLOCK) attached_t detached = 0; if ((detached = this->attached_to())) { struct timespec until_time = timed_until_time(milliseconds); int err = 0; /*struct timespec until_time; clock_gettime(CLOCK_REALTIME, &until_time); until_time.tv_sec += milliseconds/1000; until_time.tv_nsec += (milliseconds%1000)*1000; */ if (500 > milliseconds) { IS_LOGGING_TRACE("pthread_mutex_timedlock(detached, &until_time)..."); } else { IS_LOGGING_DEBUG("pthread_mutex_timedlock(detached, &until_time)..."); } if (!(err = pthread_mutex_timedlock(detached, &until_time))) { if (500 > milliseconds) { IS_LOGGING_TRACE("...pthread_mutex_timedlock(detached, &until_time)"); } else { IS_LOGGING_DEBUG("...pthread_mutex_timedlock(detached, &until_time)"); } return lock_success; } else { if (ETIMEDOUT != (err)) { if (EINTR != (err)) { IS_LOGGING_ERROR("...failed err = " << err << " on pthread_mutex_timedlock(detached, &until_time)"); } else { IS_LOGGING_ERROR("...failed EINTR err = " << err << " on pthread_mutex_timedlock(detached, &until_time)"); return lock_interrupted; } } else { if (500 > milliseconds) { IS_LOGGING_TRACE("...failed ETIMEDOUT err = " << err << " on pthread_mutex_timedlock(detached, &until_time)"); } else { IS_LOGGING_DEBUG("...failed ETIMEDOUT err = " << err << " on pthread_mutex_timedlock(detached, &until_time)"); } return lock_busy; } } } #else // defined(PTHREAD_MUTEX_HAS_TIMEDLOCK) if (milliseconds) { IS_LOGGING_ERROR("...invalid pthread_mutex_timedlock(detached, ...)"); status = lock_invalid; } else { status = try_lock(); } #endif // defined(PTHREAD_MUTEX_HAS_TIMEDLOCK) } return status; }
bool zkmutex::lock() { pfi::concurrent::scoped_lock lk(m_); LOG(ERROR) << "not implemented:" << __func__; while(!has_lock_) { if(try_lock()) break; sleep(1); } return true; };
void CSpinLock::lock() { for (;; CSpinLockBackoff()) { if (lockable() && try_lock()) { break; } } }
bool EdenStateDir::acquireLock() { const auto lockPath = path_ + PathComponentPiece{kLockFileName}; auto lockFile = folly::File(lockPath.value(), O_WRONLY | O_CREAT | O_CLOEXEC); if (!lockFile.try_lock()) { return false; } takeoverLock(std::move(lockFile)); return true; }
void lock() { HPX_ITT_SYNC_PREPARE(this); for (std::size_t k = 0; !try_lock(); ++k) { } HPX_ITT_SYNC_ACQUIRED(this); util::register_lock(this); }
void okl4_mutex_lock(okl4_mutex_t m) { L4_Word_t me = L4_Myself().raw; L4_ThreadId_t holder; holder.raw = m->holder; while (!try_lock(m, me)) { L4_ThreadSwitch(holder); } }
void task_b () { int x = try_lock(bar,foo); if (x==-1) { std::cout << "task b\n"; // ... bar.unlock(); foo.unlock(); } else { std::cout << "[task b failed: mutex " << (x?"foo":"bar") << " locked]\n"; } }
int send_info(char *username, int type) { pid_t current_pid = getpid(); pid_t current_tid = gettid(); dbgov_proc_time item1; dbgov_iostat item2; get_proc_time(&item1, current_pid, current_tid); get_io_stat(&item2, current_pid, current_tid); #ifdef TEST //printf("Prepare info PID %d TID %d CPU %lld R+W %lld\n", current_pid, current_tid, item1.stime + item1.utime, item2.read_bytes+item2.write_bytes); #endif struct timespec tim; clock_gettime(CLOCK_REALTIME, &tim); client_data snd; snd.type = type; strlcpy(snd.username, username, sizeof(snd.username)); snd.pid = current_pid; snd.tid = current_tid; snd.read = item2.read_bytes; snd.write = item2.write_bytes; snd.cpu = item1.stime + item1.utime; snd.update_time = tim.tv_sec; snd.naoseconds = tim.tv_nsec; if (try_lock(&mtx_write)) return -1; /*if (!sd.status) { close(sd.socket); if (connect_to_server_in() < 0) { pthread_mutex_unlock(&mtx_write); return -1; } }*/ //pthread_mutex_unlock(&mtx_write); //if (try_lock(&mtx_write)) return -1; if (send(sd.socket, &snd, sizeof(client_data), 0) != sizeof(client_data)) { //close_sock_in(); pthread_mutex_unlock(&mtx_write); return -1; } pthread_mutex_unlock(&mtx_write); return 0; }