isc_result_t isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { REQUIRE(VALID_RWLOCK(rwl)); LOCK(&rwl->lock); REQUIRE(rwl->type == type); UNUSED(type); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PREUNLOCK, "preunlock"), rwl, type); #endif INSIST(rwl->active > 0); rwl->active--; if (rwl->active == 0) { if (rwl->original != isc_rwlocktype_none) { rwl->type = rwl->original; rwl->original = isc_rwlocktype_none; } if (rwl->type == isc_rwlocktype_read) { rwl->granted = 0; if (rwl->writers_waiting > 0) { rwl->type = isc_rwlocktype_write; SIGNAL(&rwl->writeable); } else if (rwl->readers_waiting > 0) { /* Does this case ever happen? */ BROADCAST(&rwl->readable); } } else { if (rwl->readers_waiting > 0) { if (rwl->writers_waiting > 0 && rwl->granted < rwl->write_quota) { SIGNAL(&rwl->writeable); } else { rwl->granted = 0; rwl->type = isc_rwlocktype_read; BROADCAST(&rwl->readable); } } else if (rwl->writers_waiting > 0) { rwl->granted = 0; SIGNAL(&rwl->writeable); } else { rwl->granted = 0; } } } INSIST(rwl->original == isc_rwlocktype_none); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTUNLOCK, "postunlock"), rwl, type); #endif UNLOCK(&rwl->lock); return (ISC_R_SUCCESS); }
static void unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { print_lock("Unlocking ", lck, " lock which is not locked by thread!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { print_lock("Unlocking required ", lck, " lock!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { print_lock("Unrequire on ", lck, " lock not required!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { print_lock("Require on ", lck, " lock already required!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void required_not_locked(lc_thread_t *thr, erts_lc_lock_t *lck) { print_lock("Required ", lck, " lock not locked!\n"); print_curr_locks(thr); lc_abort(); }
static void require_twice(lc_thread_t *thr, erts_lc_lock_t *lck) { print_lock("Require on ", lck, " lock already required!\n"); print_curr_locks(thr); lc_abort(); }
static void unrequire_of_not_required_lock(lc_thread_t *thr, erts_lc_lock_t *lck) { print_lock("Unrequire on ", lck, " lock not required!\n"); print_curr_locks(thr); lc_abort(); }
static void unlock_of_required_lock(lc_thread_t *thr, erts_lc_lock_t *lck) { print_lock("Unlocking required ", lck, " lock!\n"); print_curr_locks(thr); lc_abort(); }
static void unlock_of_not_locked(lc_thread_t *thr, erts_lc_lock_t *lck) { print_lock("Unlocking ", lck, " lock which is not locked by thread!\n"); print_curr_locks(thr); lc_abort(); }
static void required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { print_lock("Required ", lck, " lock not locked!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { print_lock("Lock order violation occured when locking ", lck, "!\n"); print_curr_locks(l_lcks); print_lock_order(); lc_abort(); }
static void lock_order_violation(lc_thread_t *thr, erts_lc_lock_t *lck) { print_lock("Lock order violation occured when locking ", lck, "!\n"); print_curr_locks(thr); print_lock_order(); lc_abort(); }
static void unlock_op_mismatch(lc_thread_t *thr, erts_lc_lock_t *lck, erts_lock_options_t options) { erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options)); print_lock("", lck, " lock which mismatch previous lock operation!\n"); print_curr_locks(thr); lc_abort(); }
static void lock_twice(char *prefix, lc_thread_t *thr, erts_lc_lock_t *lck, erts_lock_options_t options) { erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options)); print_lock(" ", lck, " lock which is already locked by thread!\n"); print_curr_locks(thr); lc_abort(); }
static void unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck, Uint16 op_flags) { erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags)); print_lock("", lck, " lock which mismatch previous lock operation!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck, Uint16 op_flags) { erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags)); print_lock(" ", lck, " lock which is already locked by thread!\n"); print_curr_locks(l_lcks); lc_abort(); }
static void type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck) { erts_fprintf(stderr, "Lock type order violation occured when "); print_lock(op, lck, "!\n"); ASSERT(l_lcks); print_curr_locks(l_lcks); lc_abort(); }
int main() { char InputPath[65535] ; getcwd(InputPath , 65535); //放要讀取檔案的資料夾路徑到InputPath字串裡 char szDir[65535]; char dir[65535]; WIN32_FIND_DATA FileData; HANDLE hList; sprintf(szDir, "%s\\*.nmf", InputPath ); if ( (hList = FindFirstFile(szDir, &FileData))==INVALID_HANDLE_VALUE ) { printf("No nemofiles be found.\n\n"); } else { //get first *.nmf //sprintf(dir, "%s\\%s", InputPath, FileData.cFileName); //printf("%s\n", dir); print_scanconfig(FileData.cFileName); print_lock(FileData.cFileName); while (1) { if (!FindNextFile(hList, &FileData)) { if (GetLastError() == ERROR_NO_MORE_FILES) break; } //get next *.nmf //sprintf(dir, "%s\\%s", InputPath, FileData.cFileName); //printf("%s\n", dir); print_scanconfig(FileData.cFileName); print_lock(FileData.cFileName); } } FindClose(hList); system("pause"); return 0; }
static void pcpui_trace_locks_handler(void *event, void *data) { struct pcpu_trace_event *te = (struct pcpu_trace_event*)event; const char *func_name; uintptr_t lock_addr = te->arg1; if (lock_addr > KERN_LOAD_ADDR) func_name = get_fn_name(lock_addr); else func_name = "Dynamic lock"; print_lock(); printk("Time %uus, lock %p (%s)\n", te->arg0, lock_addr, func_name); printk("\t"); spinlock_debug((spinlock_t*)lock_addr); print_unlock(); }
void vframe::print_lock_info(JavaThread* thread, bool youngest, outputStream *st) { ResourceMark rm; frame fr = get_frame(); // Shorthand notation // First, assume we have the monitor locked. If we haven't found an owned // monitor before and this is the first frame, then we need to see if the // thread is blocked. bool first = (youngest && thread->is_hint_blocked()); // Print out all monitors that we have locked or are trying to lock if( fr.is_interpreted_frame() ) { int x = fr.interpreter_frame_monitor_count(); // Not Correct; this always (re)prints the most recent X monitors for(int i=0;i<x;i++){ first = print_lock( first, ALWAYS_UNPOISON_OBJECTREF(thread->_lckstk_top[-i-1]), false, st ); } }else if(fr.is_native_frame()){ CodeBlob*cb=CodeCache::find_blob(fr.pc()); assert0( cb->is_native_method() ); methodCodeOop mco = cb->owner().as_methodCodeOop(); methodOop moop = mco->method().as_methodOop(); bool is_object_wait = youngest && moop->name() == vmSymbols::wait_name() && instanceKlass::cast(moop->method_holder())->name() == vmSymbols::java_lang_Object(); if( moop->is_synchronized() && moop->is_static() ) { first = print_lock( first, objectRef(Klass::cast(moop->method_holder())->java_mirror()), false, st ); } else if( is_object_wait ) { // For synchronized native methods, there should be a single lock. // For object.wait, there is a single oop argument being wait'd upon. const RegMap *lm = cb->oop_maps(); VOopReg::VR lck = lm->get_sole_oop(cb->rel_pc(fr.pc())); objectRef *loc = fr.reg_to_addr_oop(lck); first = print_lock( first, *loc, is_object_wait, st ); } else if( moop->is_synchronized() ) { // For synchronized native methods, there should be a single lock. const DebugScope *ds = scope(); DebugScopeValue::Name lck = ds->get_lock(0); objectRef *loc = (objectRef*)fr.reg_to_addr(DebugScopeValue::to_vreg(lck)); first = print_lock( first, *loc, is_object_wait, st ); } else if (thread->current_park_blocker() != NULL) { oop obj=thread->current_park_blocker(); first = print_lock( first, objectRef(obj), false, st ); } } else { // Hopefully a compiled frame const DebugScope *ds = scope(); for(uint i=0;i<ds->numlocks();i++){ DebugScopeValue::Name lck = ds->get_lock(i); first = print_lock( first, *fr.reg_to_addr(DebugScopeValue::to_vreg(lck)), false, st ); } } }
isc_result_t isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t prev_cnt; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PREUNLOCK, "preunlock"), rwl, type); #endif if (type == isc_rwlocktype_read) { prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); /* * If we're the last reader and any writers are waiting, wake * them up. We need to wake up all of them to ensure the * FIFO order. */ if (prev_cnt == READER_INCR && rwl->write_completions != rwl->write_requests) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } } else { isc_boolean_t wakeup_writers = ISC_TRUE; /* * Reset the flag, and (implicitly) tell other writers * we are done. */ (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE); (void)isc_atomic_xadd(&rwl->write_completions, 1); if (rwl->write_granted >= rwl->write_quota || rwl->write_requests == rwl->write_completions || (rwl->cnt_and_flag & ~WRITER_ACTIVE) != 0) { /* * We have passed the write quota, no writer is * waiting, or some readers are almost ready, pending * possible writers. Note that the last case can * happen even if write_requests != write_completions * (which means a new writer in the queue), so we need * to catch the case explicitly. */ LOCK(&rwl->lock); if (rwl->readers_waiting > 0) { wakeup_writers = ISC_FALSE; BROADCAST(&rwl->readable); } UNLOCK(&rwl->lock); } if (rwl->write_requests != rwl->write_completions && wakeup_writers) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTUNLOCK, "postunlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
static isc_result_t isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t cntflag; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { if (rwl->write_requests != rwl->write_completions) { /* there is a waiting or active writer */ LOCK(&rwl->lock); if (rwl->write_requests != rwl->write_completions) { rwl->readers_waiting++; WAIT(&rwl->readable, &rwl->lock); rwl->readers_waiting--; } UNLOCK(&rwl->lock); } cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); POST(cntflag); while (1) { if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0) break; /* A writer is still working */ LOCK(&rwl->lock); rwl->readers_waiting++; if ((rwl->cnt_and_flag & WRITER_ACTIVE) != 0) WAIT(&rwl->readable, &rwl->lock); rwl->readers_waiting--; UNLOCK(&rwl->lock); /* * Typically, the reader should be able to get a lock * at this stage: * (1) there should have been no pending writer when * the reader was trying to increment the * counter; otherwise, the writer should be in * the waiting queue, preventing the reader from * proceeding to this point. * (2) once the reader increments the counter, no * more writer can get a lock. * Still, it is possible another writer can work at * this point, e.g. in the following scenario: * A previous writer unlocks the writer lock. * This reader proceeds to point (1). * A new writer appears, and gets a new lock before * the reader increments the counter. * The reader then increments the counter. * The previous writer notices there is a waiting * reader who is almost ready, and wakes it up. * So, the reader needs to confirm whether it can now * read explicitly (thus we loop). Note that this is * not an infinite process, since the reader has * incremented the counter at this point. */ } /* * If we are temporarily preferred to writers due to the writer * quota, reset the condition (race among readers doesn't * matter). */ rwl->write_granted = 0; } else { isc_int32_t prev_writer; /* enter the waiting queue, and wait for our turn */ prev_writer = isc_atomic_xadd(&rwl->write_requests, 1); while (rwl->write_completions != prev_writer) { LOCK(&rwl->lock); if (rwl->write_completions != prev_writer) { WAIT(&rwl->writeable, &rwl->lock); UNLOCK(&rwl->lock); continue; } UNLOCK(&rwl->lock); break; } while (1) { cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, WRITER_ACTIVE); if (cntflag == 0) break; /* Another active reader or writer is working. */ LOCK(&rwl->lock); if (rwl->cnt_and_flag != 0) WAIT(&rwl->writeable, &rwl->lock); UNLOCK(&rwl->lock); } INSIST((rwl->cnt_and_flag & WRITER_ACTIVE) != 0); rwl->write_granted++; } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
isc_result_t isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t cntflag; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { /* If a writer is waiting or working, we fail. */ if (rwl->write_requests != rwl->write_completions) return (ISC_R_LOCKBUSY); /* Otherwise, be ready for reading. */ cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); if ((cntflag & WRITER_ACTIVE) != 0) { /* * A writer is working. We lose, and cancel the read * request. */ cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); /* * If no other readers are waiting and we've suspended * new writers in this short period, wake them up. */ if (cntflag == READER_INCR && rwl->write_completions != rwl->write_requests) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } return (ISC_R_LOCKBUSY); } } else { /* Try locking without entering the waiting queue. */ cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, WRITER_ACTIVE); if (cntflag != 0) return (ISC_R_LOCKBUSY); /* * XXXJT: jump into the queue, possibly breaking the writer * order. */ (void)isc_atomic_xadd(&rwl->write_completions, -1); rwl->write_granted++; } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
static isc_result_t doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) { isc_boolean_t skip = ISC_FALSE; isc_boolean_t done = ISC_FALSE; isc_result_t result = ISC_R_SUCCESS; REQUIRE(VALID_RWLOCK(rwl)); LOCK(&rwl->lock); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { if (rwl->readers_waiting != 0) skip = ISC_TRUE; while (!done) { if (!skip && ((rwl->active == 0 || (rwl->type == isc_rwlocktype_read && (rwl->writers_waiting == 0 || rwl->granted < rwl->read_quota))))) { rwl->type = isc_rwlocktype_read; rwl->active++; rwl->granted++; done = ISC_TRUE; } else if (nonblock) { result = ISC_R_LOCKBUSY; done = ISC_TRUE; } else { skip = ISC_FALSE; rwl->readers_waiting++; WAIT(&rwl->readable, &rwl->lock); rwl->readers_waiting--; } } } else { if (rwl->writers_waiting != 0) skip = ISC_TRUE; while (!done) { if (!skip && rwl->active == 0) { rwl->type = isc_rwlocktype_write; rwl->active = 1; rwl->granted++; done = ISC_TRUE; } else if (nonblock) { result = ISC_R_LOCKBUSY; done = ISC_TRUE; } else { skip = ISC_FALSE; rwl->writers_waiting++; WAIT(&rwl->writeable, &rwl->lock); rwl->writers_waiting--; } } } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif UNLOCK(&rwl->lock); return (result); }