/** * Locking routine. * @param type: as passed by user. * @param lock: as passed by user. * @param func: caller location. * @param file: caller location. * @param line: caller location. * @param tryfunc: the pthread_mutex_trylock or similar function. * @param timedfunc: the pthread_mutex_timedlock or similar function. * Uses absolute timeout value. * @param arg: what to pass to tryfunc and timedlock. * @param exclusive: if lock must be exclusive (only one allowed). * @param getwr: if attempts to get writelock (or readlock) for rwlocks. */ static void checklock_lockit(enum check_lock_type type, struct checked_lock* lock, const char* func, const char* file, int line, int (*tryfunc)(void*), int (*timedfunc)(void*, struct timespec*), void* arg, int exclusive, int getwr) { int err; int contend = 0; struct thr_check *thr = (struct thr_check*)pthread_getspecific( thr_debug_key); checktype(type, lock, func, file, line); if(!thr) lock_error(lock, func, file, line, "no thread info"); acquire_locklock(lock, func, file, line); lock->wait_count ++; thr->waiting = lock; if(exclusive && lock->hold_count > 0 && lock->holder == thr) lock_error(lock, func, file, line, "thread already owns lock"); if(type==check_lock_rwlock && getwr && lock->writeholder == thr) lock_error(lock, func, file, line, "thread already has wrlock"); LOCKRET(pthread_mutex_unlock(&lock->lock)); /* first try; if busy increase contention counter */ if((err=tryfunc(arg))) { struct timespec to; if(err != EBUSY) log_err("trylock: %s", strerror(err)); to.tv_sec = time(NULL) + CHECK_LOCK_TIMEOUT; to.tv_nsec = 0; if((err=timedfunc(arg, &to))) { if(err == ETIMEDOUT) lock_error(lock, func, file, line, "timeout possible deadlock"); log_err("timedlock: %s", strerror(err)); } contend ++; } /* got the lock */ acquire_locklock(lock, func, file, line); lock->contention_count += contend; lock->history_count++; if(exclusive && lock->hold_count > 0) lock_error(lock, func, file, line, "got nonexclusive lock"); if(type==check_lock_rwlock && getwr && lock->writeholder) lock_error(lock, func, file, line, "got nonexclusive wrlock"); if(type==check_lock_rwlock && getwr) lock->writeholder = thr; /* check the memory areas for unauthorized changes, * between last unlock time and current lock time. * we check while holding the lock (threadsafe). */ if(getwr || exclusive) prot_check(lock, func, file, line); finish_acquire_lock(thr, lock, func, file, line); LOCKRET(pthread_mutex_unlock(&lock->lock)); }
/** check if OK, free struct */ void checklock_destroy(enum check_lock_type type, struct checked_lock** lock, const char* func, const char* file, int line) { const size_t contention_interest = 1; /* promille contented locks */ struct checked_lock* e; if(!lock) return; e = *lock; if(!e) return; checktype(type, e, func, file, line); /* check if delete is OK */ acquire_locklock(e, func, file, line); if(e->hold_count != 0) lock_error(e, func, file, line, "delete while locked."); if(e->wait_count != 0) lock_error(e, func, file, line, "delete while waited on."); prot_check(e, func, file, line); *lock = NULL; /* use after free will fail */ LOCKRET(pthread_mutex_unlock(&e->lock)); /* contention, look at fraction in trouble. */ if(e->history_count > 1 && 1000*e->contention_count/e->history_count > contention_interest) { log_info("lock created %s %s %d has contention %u of %u (%d%%)", e->create_func, e->create_file, e->create_line, (unsigned int)e->contention_count, (unsigned int)e->history_count, (int)(100*e->contention_count/e->history_count)); } /* delete it */ LOCKRET(pthread_mutex_destroy(&e->lock)); prot_clear(e); /* since nobody holds the lock - see check above, no need to unlink * from the thread-held locks list. */ switch(e->type) { case check_lock_mutex: LOCKRET(pthread_mutex_destroy(&e->u.mutex)); break; case check_lock_spinlock: LOCKRET(pthread_spin_destroy(&e->u.spinlock)); break; case check_lock_rwlock: LOCKRET(pthread_rwlock_destroy(&e->u.rwlock)); break; default: log_assert(0); } memset(e, 0, sizeof(struct checked_lock)); free(e); }
/** alloc struct, init lock empty */ void checklock_init(enum check_lock_type type, struct checked_lock** lock, const char* func, const char* file, int line) { struct checked_lock* e = (struct checked_lock*)calloc(1, sizeof(struct checked_lock)); struct thr_check *thr = (struct thr_check*)pthread_getspecific( thr_debug_key); if(!e) fatal_exit("%s %s %d: out of memory", func, file, line); if(!thr) { /* this is called when log_init() calls lock_init() * functions, and the test check code has not yet * been initialised. But luckily, the checklock_start() * routine can be called multiple times without ill effect. */ checklock_start(); thr = (struct thr_check*)pthread_getspecific(thr_debug_key); } if(!thr) fatal_exit("%s %s %d: lock_init no thread info", func, file, line); *lock = e; e->type = type; e->create_func = func; e->create_file = file; e->create_line = line; e->create_thread = thr->num; e->create_instance = thr->locks_created++; ordercheck_lockcreate(thr, e); LOCKRET(pthread_mutex_init(&e->lock, NULL)); switch(e->type) { case check_lock_mutex: LOCKRET(pthread_mutex_init(&e->u.mutex, NULL)); break; case check_lock_spinlock: LOCKRET(pthread_spin_init(&e->u.spinlock, PTHREAD_PROCESS_PRIVATE)); break; case check_lock_rwlock: LOCKRET(pthread_rwlock_init(&e->u.rwlock, NULL)); break; default: log_assert(0); } }
/** init the main thread */ void checklock_start(void) { if(key_deleted) return; if(!key_created) { struct thr_check* thisthr = (struct thr_check*)calloc(1, sizeof(struct thr_check)); if(!thisthr) fatal_exit("thrcreate: out of memory"); key_created = 1; check_lock_pid = getpid(); LOCKRET(pthread_key_create(&thr_debug_key, NULL)); LOCKRET(pthread_setspecific(thr_debug_key, thisthr)); thread_infos[0] = thisthr; if(check_locking_order) open_lockorder(thisthr); } }
/** wait for thread with a timeout */ void checklock_thrjoin(pthread_t thread) { /* wait with a timeout */ if(signal(SIGALRM, joinalarm) == SIG_ERR) fatal_exit("signal(): %s", strerror(errno)); (void)alarm(CHECK_JOIN_TIMEOUT); LOCKRET(pthread_join(thread, NULL)); (void)alarm(0); }
/** allocate debug info and create thread */ void checklock_thrcreate(pthread_t* id, void* (*func)(void*), void* arg) { struct thr_check* thr = (struct thr_check*)calloc(1, sizeof(struct thr_check)); if(!thr) fatal_exit("thrcreate: out of memory"); if(!key_created) { checklock_start(); } thr->func = func; thr->arg = arg; LOCKRET(pthread_create(id, NULL, checklock_main, thr)); }
/** remove protected region */ void lock_unprotect(void* mangled, void* area) { struct checked_lock* lock = *(struct checked_lock**)mangled; struct protected_area* p, **prevp; if(!lock) return; acquire_locklock(lock, __func__, __FILE__, __LINE__); p = lock->prot; prevp = &lock->prot; while(p) { if(p->region == area) { *prevp = p->next; free(p->hold); free(p); LOCKRET(pthread_mutex_unlock(&lock->lock)); return; } prevp = &p->next; p = p->next; } LOCKRET(pthread_mutex_unlock(&lock->lock)); }
/** get memory held by lock */ size_t lock_get_mem(void* pp) { size_t s; struct checked_lock* lock = *(struct checked_lock**)pp; struct protected_area* p; s = sizeof(struct checked_lock); acquire_locklock(lock, __func__, __FILE__, __LINE__); for(p = lock->prot; p; p = p->next) { s += sizeof(struct protected_area); s += p->size; } LOCKRET(pthread_mutex_unlock(&lock->lock)); return s; }
/** stop checklocks */ void checklock_stop(void) { if(key_created) { int i; key_deleted = 1; if(check_locking_order) fclose(thread_infos[0]->order_info); free(thread_infos[0]); thread_infos[0] = NULL; for(i = 0; i < THRDEBUG_MAX_THREADS; i++) log_assert(thread_infos[i] == NULL); /* should have been cleaned up. */ LOCKRET(pthread_key_delete(thr_debug_key)); key_created = 0; } }
/** add protected region */ void lock_protect(void *p, void* area, size_t size) { struct checked_lock* lock = *(struct checked_lock**)p; struct protected_area* e = (struct protected_area*)malloc( sizeof(struct protected_area)); if(!e) fatal_exit("lock_protect: out of memory"); e->region = area; e->size = size; e->hold = malloc(size); if(!e->hold) fatal_exit("lock_protect: out of memory"); memcpy(e->hold, e->region, e->size); acquire_locklock(lock, __func__, __FILE__, __LINE__); e->next = lock->prot; lock->prot = e; LOCKRET(pthread_mutex_unlock(&lock->lock)); }
/** checklock thread main, Inits thread structure */ static void* checklock_main(void* arg) { struct thr_check* thr = (struct thr_check*)arg; void* ret; thr->id = pthread_self(); /* Hack to get same numbers as in log file */ thr->num = *(int*)(thr->arg); log_assert(thr->num < THRDEBUG_MAX_THREADS); /* as an aside, due to this, won't work for libunbound bg thread */ if(thread_infos[thr->num] != NULL) log_warn("thread warning, thr->num %d not NULL", thr->num); thread_infos[thr->num] = thr; LOCKRET(pthread_setspecific(thr_debug_key, thr)); if(check_locking_order) open_lockorder(thr); ret = thr->func(thr->arg); thread_infos[thr->num] = NULL; if(check_locking_order) fclose(thr->order_info); free(thr); return ret; }
/** check if OK, unlock */ void checklock_unlock(enum check_lock_type type, struct checked_lock* lock, const char* func, const char* file, int line) { struct thr_check *thr; if(key_deleted) return; thr = (struct thr_check*)pthread_getspecific(thr_debug_key); checktype(type, lock, func, file, line); if(!thr) lock_error(lock, func, file, line, "no thread info"); acquire_locklock(lock, func, file, line); /* was this thread even holding this lock? */ if(thr->holding_first != lock && lock->prev_held_lock[thr->num] == NULL) { lock_error(lock, func, file, line, "unlock nonlocked lock"); } if(lock->hold_count <= 0) lock_error(lock, func, file, line, "too many unlocks"); /* store this point as last touched by */ lock->holder = thr; lock->hold_count --; lock->holder_func = func; lock->holder_file = file; lock->holder_line = line; /* delete from thread holder list */ /* no need to lock other lockstructs, because they are all on the * held-locks list, and this thread holds their locks. * we only touch the thr->num members, so it is safe. */ if(thr->holding_first == lock) thr->holding_first = lock->next_held_lock[thr->num]; if(thr->holding_last == lock) thr->holding_last = lock->prev_held_lock[thr->num]; if(lock->next_held_lock[thr->num]) lock->next_held_lock[thr->num]->prev_held_lock[thr->num] = lock->prev_held_lock[thr->num]; if(lock->prev_held_lock[thr->num]) lock->prev_held_lock[thr->num]->next_held_lock[thr->num] = lock->next_held_lock[thr->num]; lock->next_held_lock[thr->num] = NULL; lock->prev_held_lock[thr->num] = NULL; if(type==check_lock_rwlock && lock->writeholder == thr) { lock->writeholder = NULL; prot_store(lock); } else if(type != check_lock_rwlock) { /* store memory areas that are protected, for later checks */ prot_store(lock); } LOCKRET(pthread_mutex_unlock(&lock->lock)); /* unlock it */ switch(type) { case check_lock_mutex: LOCKRET(pthread_mutex_unlock(&lock->u.mutex)); break; case check_lock_spinlock: LOCKRET(pthread_spin_unlock(&lock->u.spinlock)); break; case check_lock_rwlock: LOCKRET(pthread_rwlock_unlock(&lock->u.rwlock)); break; default: log_assert(0); } }
void* ub_thread_key_get(ub_thread_key_t key) { void* ret=NULL; LOCKRET(thr_getspecific(key, &ret)); return ret; }