isc_result_t isc_task_create(isc_taskmgr_t *manager, unsigned int quantum, isc_task_t **taskp) { isc_task_t *task; isc_boolean_t exiting; REQUIRE(VALID_MANAGER(manager)); REQUIRE(taskp != NULL && *taskp == NULL); task = isc_mem_get(manager->mctx, sizeof(*task)); if (task == NULL) return (ISC_R_NOMEMORY); XTRACE("isc_task_create"); task->manager = manager; if (isc_mutex_init(&task->lock) != ISC_R_SUCCESS) { isc_mem_put(manager->mctx, task, sizeof(*task)); UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_mutex_init() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); return (ISC_R_UNEXPECTED); } task->state = task_state_idle; task->references = 1; INIT_LIST(task->events); INIT_LIST(task->on_shutdown); task->quantum = quantum; task->flags = 0; task->now = 0; #ifdef ISC_TASK_NAMES memset(task->name, 0, sizeof(task->name)); task->tag = NULL; #endif INIT_LINK(task, link); INIT_LINK(task, ready_link); exiting = ISC_FALSE; LOCK(&manager->lock); if (!manager->exiting) { if (task->quantum == 0) task->quantum = manager->default_quantum; APPEND(manager->tasks, task, link); } else exiting = ISC_TRUE; UNLOCK(&manager->lock); if (exiting) { DESTROYLOCK(&task->lock); isc_mem_put(manager->mctx, task, sizeof(*task)); return (ISC_R_SHUTTINGDOWN); } task->magic = TASK_MAGIC; *taskp = task; return (ISC_R_SUCCESS); }
static isc_threadresult_t #ifdef _WIN32 /* XXXDCL */ WINAPI #endif run(void *uap) { isc__timermgr_t *manager = uap; isc_time_t now; isc_result_t result; LOCK(&manager->lock); while (!manager->done) { TIME_NOW(&now); XTRACETIME(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_RUNNING, "running"), now); dispatch(manager, &now); if (manager->nscheduled > 0) { XTRACETIME2(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_WAITUNTIL, "waituntil"), manager->due, now); result = WAITUNTIL(&manager->wakeup, &manager->lock, &manager->due); INSIST(result == ISC_R_SUCCESS || result == ISC_R_TIMEDOUT); } else { XTRACETIME(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_WAIT, "wait"), now); WAIT(&manager->wakeup, &manager->lock); } XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_WAKEUP, "wakeup")); } UNLOCK(&manager->lock); #ifdef OPENSSL_LEAKS ERR_remove_state(0); #endif return ((isc_threadresult_t)0); }
static void default_fatal_callback(const char *file, int line, const char *format, va_list args) { fprintf(stderr, "%s:%d: %s: ", file, line, isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FATALERROR, "fatal error")); vfprintf(stderr, format, args); fprintf(stderr, "\n"); fflush(stderr); }
static void print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) { fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRINTLOCK, "rwlock %p thread %lu %s(%s): %s, %u active, " "%u granted, %u rwaiting, %u wwaiting\n"), rwl, isc_thread_self(), operation, (type == isc_rwlocktype_read ? isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_READ, "read") : isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_WRITE, "write")), (rwl->type == isc_rwlocktype_read ? isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_READING, "reading") : isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_WRITING, "writing")), rwl->active, rwl->granted, rwl->readers_waiting, rwl->writers_waiting); }
static isc_threadresult_t #ifdef _WIN32 WINAPI #endif run(void *uap) { isc__taskmgr_t *manager = uap; XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_STARTING, "starting")); dispatch(manager); XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_EXITING, "exiting")); #ifdef OPENSSL_LEAKS ERR_remove_state(0); #endif return ((isc_threadresult_t)0); }
static void default_callback(const char *file, int line, isc_assertiontype_t type, const char *cond) { fprintf(stderr, "%s:%d: %s(%s) %s.\n", file, line, isc_assertion_typetotext(type), cond, isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); fflush(stderr); abort(); /* NOTREACHED */ }
unsigned int isc_sockaddr_hash(const isc_sockaddr_t *sockaddr, isc_boolean_t address_only) { unsigned int length = 0; const unsigned char *s = NULL; unsigned int h = 0; unsigned int g; unsigned int p = 0; const struct in6_addr *in6; REQUIRE(sockaddr != NULL); switch (sockaddr->type.sa.sa_family) { case AF_INET: s = (const unsigned char *)&sockaddr->type.sin.sin_addr; p = ntohs(sockaddr->type.sin.sin_port); length = sizeof(sockaddr->type.sin.sin_addr.s_addr); break; case AF_INET6: in6 = &sockaddr->type.sin6.sin6_addr; if (IN6_IS_ADDR_V4MAPPED(in6)) { s = (const unsigned char *)&in6[12]; length = sizeof(sockaddr->type.sin.sin_addr.s_addr); } else { s = (const unsigned char *)in6; length = sizeof(sockaddr->type.sin6.sin6_addr); } p = ntohs(sockaddr->type.sin6.sin6_port); break; default: UNEXPECTED_ERROR(__FILE__, __LINE__, "%s: %d", isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKADDR, ISC_MSG_UNKNOWNFAMILY, "unknown address family"), (int)sockaddr->type.sa.sa_family); s = (const unsigned char *)&sockaddr->type; length = sockaddr->length; p = 0; } h = isc_hash_calc(s, length, ISC_TRUE); if (!address_only) { g = isc_hash_calc((const unsigned char *)&p, sizeof(p), ISC_TRUE); h = h ^ g; /* XXX: we should concatenate h and p first */ } return (h); }
isc_result_t isc_condition_waituntil(isc_condition_t *c, isc_mutex_t *m, isc_time_t *t) { int presult; isc_result_t result; struct timespec ts; char strbuf[ISC_STRERRORSIZE]; REQUIRE(c != NULL && m != NULL && t != NULL); /* * POSIX defines a timespec's tv_sec as time_t. */ result = isc_time_secondsastimet(t, &ts.tv_sec); /* * If we have a range error ts.tv_sec is most probably a signed * 32 bit value. Set ts.tv_sec to INT_MAX. This is a kludge. */ if (result == ISC_R_RANGE) ts.tv_sec = INT_MAX; else if (result != ISC_R_SUCCESS) return (result); /*! * POSIX defines a timespec's tv_nsec as long. isc_time_nanoseconds * ensures its return value is < 1 billion, which will fit in a long. */ ts.tv_nsec = (long)isc_time_nanoseconds(t); do { #if ISC_MUTEX_PROFILE presult = pthread_cond_timedwait(c, &m->mutex, &ts); #else presult = pthread_cond_timedwait(c, m, &ts); #endif if (presult == 0) return (ISC_R_SUCCESS); if (presult == ETIMEDOUT) return (ISC_R_TIMEDOUT); } while (presult == EINTR); isc__strerror(presult, strbuf, sizeof(strbuf)); UNEXPECTED_ERROR(__FILE__, __LINE__, "pthread_cond_timedwait() %s %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_RETURNED, "returned"), strbuf); return (ISC_R_UNEXPECTED); }
isc_result_t isc_interfaceiter_create(isc_mem_t *mctx, isc_interfaceiter_t **iterp) { isc_interfaceiter_t *iter; isc_result_t result; char strbuf[ISC_STRERRORSIZE]; REQUIRE(mctx != NULL); REQUIRE(iterp != NULL); REQUIRE(*iterp == NULL); iter = isc_mem_get(mctx, sizeof(*iter)); if (iter == NULL) return (ISC_R_NOMEMORY); iter->mctx = mctx; iter->buf = NULL; iter->bufsize = 0; iter->ifaddrs = NULL; if (getifaddrs(&iter->ifaddrs) < 0) { isc__strerror(errno, strbuf, sizeof(strbuf)); UNEXPECTED_ERROR(__FILE__, __LINE__, isc_msgcat_get(isc_msgcat, ISC_MSGSET_IFITERGETIFADDRS, ISC_MSG_GETIFADDRS, "getting interface " "addresses: getifaddrs: %s"), strbuf); result = ISC_R_UNEXPECTED; goto failure; } /* * A newly created iterator has an undefined position * until isc_interfaceiter_first() is called. */ iter->pos = NULL; iter->result = ISC_R_FAILURE; iter->magic = IFITER_MAGIC; *iterp = iter; return (ISC_R_SUCCESS); failure: if (iter->ifaddrs != NULL) /* just in case */ freeifaddrs(iter->ifaddrs); isc_mem_put(mctx, iter, sizeof(*iter)); return (result); }
static void initialize_action(void) { isc_result_t result; RUNTIME_CHECK(isc_mutex_init(&lock) == ISC_R_SUCCESS); ISC_LIST_INIT(tables); result = register_table(ISC_RESULTCLASS_ISC, ISC_R_NRESULTS, text, isc_msgcat, ISC_RESULT_RESULTSET); if (result != ISC_R_SUCCESS) UNEXPECTED_ERROR(__FILE__, __LINE__, "register_table() %s: %u", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), result); }
static void try_ipv6pktinfo(void) { SOCKET s; int on; char strbuf[ISC_STRERRORSIZE]; isc_result_t result; int optname; result = isc_net_probeipv6(); if (result != ISC_R_SUCCESS) { ipv6pktinfo_result = result; return; } /* we only use this for UDP sockets */ s = socket(PF_INET6, SOCK_DGRAM, IPPROTO_UDP); if (s == INVALID_SOCKET) { isc__strerror(errno, strbuf, sizeof(strbuf)); UNEXPECTED_ERROR(__FILE__, __LINE__, "socket() %s: %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), strbuf); ipv6pktinfo_result = ISC_R_UNEXPECTED; return; } #ifdef IPV6_RECVPKTINFO optname = IPV6_RECVPKTINFO; #else optname = IPV6_PKTINFO; #endif on = 1; if (setsockopt(s, IPPROTO_IPV6, optname, (const char *) &on, sizeof(on)) < 0) { ipv6pktinfo_result = ISC_R_NOTFOUND; goto close; } ipv6pktinfo_result = ISC_R_SUCCESS; close: closesocket(s); return; }
void isc_sockaddr_setport(isc_sockaddr_t *sockaddr, in_port_t port) { switch (sockaddr->type.sa.sa_family) { case AF_INET: sockaddr->type.sin.sin_port = htons(port); break; case AF_INET6: sockaddr->type.sin6.sin6_port = htons(port); break; default: FATAL_ERROR(__FILE__, __LINE__, isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKADDR, ISC_MSG_UNKNOWNFAMILY, "unknown address family: %d"), (int)sockaddr->type.sa.sa_family); } }
unsigned int isc_sockaddr_hash(const isc_sockaddr_t *sockaddr, isc_boolean_t address_only) { unsigned int length = 0; const unsigned char *s = NULL; unsigned int h = 0; unsigned int p = 0; const struct in6_addr *in6; REQUIRE(sockaddr != NULL); switch (sockaddr->type.sa.sa_family) { case AF_INET: s = (const unsigned char *)&sockaddr->type.sin.sin_addr; p = ntohs(sockaddr->type.sin.sin_port); length = sizeof(sockaddr->type.sin.sin_addr.s_addr); break; case AF_INET6: in6 = &sockaddr->type.sin6.sin6_addr; s = (const unsigned char *)in6; if (IN6_IS_ADDR_V4MAPPED(in6)) { s += 12; length = sizeof(sockaddr->type.sin.sin_addr.s_addr); } else length = sizeof(sockaddr->type.sin6.sin6_addr); p = ntohs(sockaddr->type.sin6.sin6_port); break; default: UNEXPECTED_ERROR(__FILE__, __LINE__, isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKADDR, ISC_MSG_UNKNOWNFAMILY, "unknown address family: %d"), (int)sockaddr->type.sa.sa_family); s = (const unsigned char *)&sockaddr->type; length = sockaddr->length; p = 0; } h = isc_hash_function(s, length, ISC_TRUE, NULL); if (!address_only) h = isc_hash_function(&p, sizeof(p), ISC_TRUE, &h); return (h); }
void isc_sockaddr_format(const isc_sockaddr_t *sa, char *array, unsigned int size) { isc_result_t result; isc_buffer_t buf; isc_buffer_init(&buf, array, size); result = isc_sockaddr_totext(sa, &buf); if (result != ISC_R_SUCCESS) { /* * The message is the same as in netaddr.c. */ snprintf(array, size, isc_msgcat_get(isc_msgcat, ISC_MSGSET_NETADDR, ISC_MSG_UNKNOWNADDR, "<unknown address, family %u>"), sa->type.sa.sa_family); array[size - 1] = '\0'; } }
static void print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) { #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRINTLOCK2, "rwlock %p thread %lu %s(%s): " "write_requests=%u, write_completions=%u, " "cnt_and_flag=0x%x, readers_waiting=%u, " "write_granted=%u, write_quota=%u\n"), rwl, isc_thread_self(), operation, (type == isc_rwlocktype_read ? isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_READ, "read") : isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_WRITE, "write")), rwl->write_requests, rwl->write_completions, rwl->cnt_and_flag, rwl->readers_waiting, rwl->write_granted, rwl->write_quota); #else fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRINTLOCK, "rwlock %p thread %lu %s(%s): %s, %u active, " "%u granted, %u rwaiting, %u wwaiting\n"), rwl, isc_thread_self(), operation, (type == isc_rwlocktype_read ? isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_READ, "read") : isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_WRITE, "write")), (rwl->type == isc_rwlocktype_read ? isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_READING, "reading") : isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_WRITING, "writing")), rwl->active, rwl->granted, rwl->readers_waiting, rwl->writers_waiting); #endif }
in_port_t isc_sockaddr_getport(const isc_sockaddr_t *sockaddr) { in_port_t port = 0; switch (sockaddr->type.sa.sa_family) { case AF_INET: port = ntohs(sockaddr->type.sin.sin_port); break; case AF_INET6: port = ntohs(sockaddr->type.sin6.sin6_port); break; default: FATAL_ERROR(__FILE__, __LINE__, isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKADDR, ISC_MSG_UNKNOWNFAMILY, "unknown address family: %d"), (int)sockaddr->type.sa.sa_family); } return (port); }
static isc_result_t handle_signal(int sig, void (*handler)(int)) { struct sigaction sa; char strbuf[ISC_STRERRORSIZE]; memset(&sa, 0, sizeof(sa)); sa.sa_handler = handler; if (sigfillset(&sa.sa_mask) != 0 || sigaction(sig, &sa, NULL) < 0) { isc__strerror(errno, strbuf, sizeof(strbuf)); UNEXPECTED_ERROR(__FILE__, __LINE__, isc_msgcat_get(isc_msgcat, ISC_MSGSET_APP, ISC_MSG_SIGNALSETUP, "handle_signal() %d setup: %s"), sig, strbuf); return (ISC_R_UNEXPECTED); } return (ISC_R_SUCCESS); }
static inline isc_boolean_t task_shutdown(isc__task_t *task) { isc_boolean_t was_idle = ISC_FALSE; isc_event_t *event, *prev; /* * Caller must be holding the task's lock. */ XTRACE("task_shutdown"); if (! TASK_SHUTTINGDOWN(task)) { XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_SHUTTINGDOWN, "shutting down")); task->flags |= TASK_F_SHUTTINGDOWN; if (task->state == task_state_idle) { INSIST(EMPTY(task->events)); task->state = task_state_ready; was_idle = ISC_TRUE; } INSIST(task->state == task_state_ready || task->state == task_state_running); /* * Note that we post shutdown events LIFO. */ for (event = TAIL(task->on_shutdown); event != NULL; event = prev) { prev = PREV(event, ev_link); DEQUEUE(task->on_shutdown, event, ev_link); ENQUEUE(task->events, event, ev_link); } } return (was_idle); }
/* coverity[+kill] */ static void default_callback(const char *file, int line, isc_assertiontype_t type, const char *cond) { void *tracebuf[BACKTRACE_MAXFRAME]; int i, nframes; const char *logsuffix = "."; const char *fname; isc_result_t result; result = isc_backtrace_gettrace(tracebuf, BACKTRACE_MAXFRAME, &nframes); if (result == ISC_R_SUCCESS && nframes > 0) logsuffix = ", back trace"; fprintf(stderr, "%s:%d: %s(%s) %s%s\n", file, line, isc_assertion_typetotext(type), cond, isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), logsuffix); if (result == ISC_R_SUCCESS) { for (i = 0; i < nframes; i++) { unsigned long offset; fname = NULL; result = isc_backtrace_getsymbol(tracebuf[i], &fname, &offset); if (result == ISC_R_SUCCESS) { fprintf(stderr, "#%d %p in %s()+0x%lx\n", i, tracebuf[i], fname, offset); } else { fprintf(stderr, "#%d %p in ??\n", i, tracebuf[i]); } } } fflush(stderr); }
isc_result_t isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t cntflag; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { /* If a writer is waiting or working, we fail. */ if (rwl->write_requests != rwl->write_completions) return (ISC_R_LOCKBUSY); /* Otherwise, be ready for reading. */ cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); if ((cntflag & WRITER_ACTIVE) != 0) { /* * A writer is working. We lose, and cancel the read * request. */ cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); /* * If no other readers are waiting and we've suspended * new writers in this short period, wake them up. */ if (cntflag == READER_INCR && rwl->write_completions != rwl->write_requests) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } return (ISC_R_LOCKBUSY); } } else { /* Try locking without entering the waiting queue. */ cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, WRITER_ACTIVE); if (cntflag != 0) return (ISC_R_LOCKBUSY); /* * XXXJT: jump into the queue, possibly breaking the writer * order. */ (void)isc_atomic_xadd(&rwl->write_completions, -1); rwl->write_granted++; } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
/*** *** Task Manager. ***/ static void dispatch(isc_taskmgr_t *manager) { isc_task_t *task; #ifndef ISC_PLATFORM_USETHREADS unsigned int total_dispatch_count = 0; isc_tasklist_t ready_tasks; #endif /* ISC_PLATFORM_USETHREADS */ REQUIRE(VALID_MANAGER(manager)); /* * Again we're trying to hold the lock for as short a time as possible * and to do as little locking and unlocking as possible. * * In both while loops, the appropriate lock must be held before the * while body starts. Code which acquired the lock at the top of * the loop would be more readable, but would result in a lot of * extra locking. Compare: * * Straightforward: * * LOCK(); * ... * UNLOCK(); * while (expression) { * LOCK(); * ... * UNLOCK(); * * Unlocked part here... * * LOCK(); * ... * UNLOCK(); * } * * Note how if the loop continues we unlock and then immediately lock. * For N iterations of the loop, this code does 2N+1 locks and 2N+1 * unlocks. Also note that the lock is not held when the while * condition is tested, which may or may not be important, depending * on the expression. * * As written: * * LOCK(); * while (expression) { * ... * UNLOCK(); * * Unlocked part here... * * LOCK(); * ... * } * UNLOCK(); * * For N iterations of the loop, this code does N+1 locks and N+1 * unlocks. The while expression is always protected by the lock. */ #ifndef ISC_PLATFORM_USETHREADS ISC_LIST_INIT(ready_tasks); #endif LOCK(&manager->lock); while (!FINISHED(manager)) { #ifdef ISC_PLATFORM_USETHREADS /* * For reasons similar to those given in the comment in * isc_task_send() above, it is safe for us to dequeue * the task while only holding the manager lock, and then * change the task to running state while only holding the * task lock. */ while ((EMPTY(manager->ready_tasks) || manager->exclusive_requested) && !FINISHED(manager)) { XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_WAIT, "wait")); WAIT(&manager->work_available, &manager->lock); XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_AWAKE, "awake")); } #else /* ISC_PLATFORM_USETHREADS */ if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM || EMPTY(manager->ready_tasks)) break; #endif /* ISC_PLATFORM_USETHREADS */ XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_WORKING, "working")); task = HEAD(manager->ready_tasks); if (task != NULL) { unsigned int dispatch_count = 0; isc_boolean_t done = ISC_FALSE; isc_boolean_t requeue = ISC_FALSE; isc_boolean_t finished = ISC_FALSE; isc_event_t *event; INSIST(VALID_TASK(task)); /* * Note we only unlock the manager lock if we actually * have a task to do. We must reacquire the manager * lock before exiting the 'if (task != NULL)' block. */ DEQUEUE(manager->ready_tasks, task, ready_link); manager->tasks_running++; UNLOCK(&manager->lock); LOCK(&task->lock); INSIST(task->state == task_state_ready); task->state = task_state_running; XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_RUNNING, "running")); isc_stdtime_get(&task->now); do { if (!EMPTY(task->events)) { event = HEAD(task->events); DEQUEUE(task->events, event, ev_link); /* * Execute the event action. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_EXECUTE, "execute action")); if (event->ev_action != NULL) { UNLOCK(&task->lock); (event->ev_action)(task,event); LOCK(&task->lock); } dispatch_count++; #ifndef ISC_PLATFORM_USETHREADS total_dispatch_count++; #endif /* ISC_PLATFORM_USETHREADS */ } if (task->references == 0 && EMPTY(task->events) && !TASK_SHUTTINGDOWN(task)) { isc_boolean_t was_idle; /* * There are no references and no * pending events for this task, * which means it will not become * runnable again via an external * action (such as sending an event * or detaching). * * We initiate shutdown to prevent * it from becoming a zombie. * * We do this here instead of in * the "if EMPTY(task->events)" block * below because: * * If we post no shutdown events, * we want the task to finish. * * If we did post shutdown events, * will still want the task's * quantum to be applied. */ was_idle = task_shutdown(task); INSIST(!was_idle); } if (EMPTY(task->events)) { /* * Nothing else to do for this task * right now. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_EMPTY, "empty")); if (task->references == 0 && TASK_SHUTTINGDOWN(task)) { /* * The task is done. */ XTRACE(isc_msgcat_get( isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_DONE, "done")); finished = ISC_TRUE; task->state = task_state_done; } else task->state = task_state_idle; done = ISC_TRUE; } else if (dispatch_count >= task->quantum) { /* * Our quantum has expired, but * there is more work to be done. * We'll requeue it to the ready * queue later. * * We don't check quantum until * dispatching at least one event, * so the minimum quantum is one. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_QUANTUM, "quantum")); task->state = task_state_ready; requeue = ISC_TRUE; done = ISC_TRUE; } } while (!done); UNLOCK(&task->lock); if (finished) task_finished(task); LOCK(&manager->lock); manager->tasks_running--; #ifdef ISC_PLATFORM_USETHREADS if (manager->exclusive_requested && manager->tasks_running == 1) { SIGNAL(&manager->exclusive_granted); } #endif /* ISC_PLATFORM_USETHREADS */ if (requeue) { /* * We know we're awake, so we don't have * to wakeup any sleeping threads if the * ready queue is empty before we requeue. * * A possible optimization if the queue is * empty is to 'goto' the 'if (task != NULL)' * block, avoiding the ENQUEUE of the task * and the subsequent immediate DEQUEUE * (since it is the only executable task). * We don't do this because then we'd be * skipping the exit_requested check. The * cost of ENQUEUE is low anyway, especially * when you consider that we'd have to do * an extra EMPTY check to see if we could * do the optimization. If the ready queue * were usually nonempty, the 'optimization' * might even hurt rather than help. */ #ifdef ISC_PLATFORM_USETHREADS ENQUEUE(manager->ready_tasks, task, ready_link); #else ENQUEUE(ready_tasks, task, ready_link); #endif } } } #ifndef ISC_PLATFORM_USETHREADS ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link); #endif UNLOCK(&manager->lock); }
isc_result_t isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers, unsigned int default_quantum, isc_taskmgr_t **managerp) { isc_result_t result; unsigned int i, started = 0; isc_taskmgr_t *manager; /* * Create a new task manager. */ REQUIRE(workers > 0); REQUIRE(managerp != NULL && *managerp == NULL); #ifndef ISC_PLATFORM_USETHREADS UNUSED(i); UNUSED(started); UNUSED(workers); if (taskmgr != NULL) { taskmgr->refs++; *managerp = taskmgr; return (ISC_R_SUCCESS); } #endif /* ISC_PLATFORM_USETHREADS */ manager = isc_mem_get(mctx, sizeof(*manager)); if (manager == NULL) return (ISC_R_NOMEMORY); manager->magic = TASK_MANAGER_MAGIC; manager->mctx = NULL; result = isc_mutex_init(&manager->lock); if (result != ISC_R_SUCCESS) goto cleanup_mgr; #ifdef ISC_PLATFORM_USETHREADS manager->workers = 0; manager->threads = isc_mem_allocate(mctx, workers * sizeof(isc_thread_t)); if (manager->threads == NULL) { result = ISC_R_NOMEMORY; goto cleanup_lock; } if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) { UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_condition_init() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); result = ISC_R_UNEXPECTED; goto cleanup_threads; } if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) { UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_condition_init() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); result = ISC_R_UNEXPECTED; goto cleanup_workavailable; } #endif /* ISC_PLATFORM_USETHREADS */ if (default_quantum == 0) default_quantum = DEFAULT_DEFAULT_QUANTUM; manager->default_quantum = default_quantum; INIT_LIST(manager->tasks); INIT_LIST(manager->ready_tasks); manager->tasks_running = 0; manager->exclusive_requested = ISC_FALSE; manager->exiting = ISC_FALSE; isc_mem_attach(mctx, &manager->mctx); #ifdef ISC_PLATFORM_USETHREADS LOCK(&manager->lock); /* * Start workers. */ for (i = 0; i < workers; i++) { if (isc_thread_create(run, manager, &manager->threads[manager->workers]) == ISC_R_SUCCESS) { manager->workers++; started++; } } UNLOCK(&manager->lock); if (started == 0) { manager_free(manager); return (ISC_R_NOTHREADS); } isc_thread_setconcurrency(workers); #else /* ISC_PLATFORM_USETHREADS */ manager->refs = 1; taskmgr = manager; #endif /* ISC_PLATFORM_USETHREADS */ *managerp = manager; return (ISC_R_SUCCESS); #ifdef ISC_PLATFORM_USETHREADS cleanup_workavailable: (void)isc_condition_destroy(&manager->work_available); cleanup_threads: isc_mem_free(mctx, manager->threads); cleanup_lock: DESTROYLOCK(&manager->lock); #endif cleanup_mgr: isc_mem_put(mctx, manager, sizeof(*manager)); return (result); }
ISC_TIMERFUNC_SCOPE void isc__timermgr_destroy(isc_timermgr_t **managerp) { isc__timermgr_t *manager; isc_mem_t *mctx; /* * Destroy a timer manager. */ REQUIRE(managerp != NULL); manager = (isc__timermgr_t *)*managerp; REQUIRE(VALID_MANAGER(manager)); LOCK(&manager->lock); #ifdef USE_SHARED_MANAGER manager->refs--; if (manager->refs > 0) { UNLOCK(&manager->lock); *managerp = NULL; return; } timermgr = NULL; #endif /* USE_SHARED_MANAGER */ #ifndef USE_TIMER_THREAD isc__timermgr_dispatch((isc_timermgr_t *)manager); #endif REQUIRE(EMPTY(manager->timers)); manager->done = ISC_TRUE; #ifdef USE_TIMER_THREAD XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_SIGNALDESTROY, "signal (destroy)")); SIGNAL(&manager->wakeup); #endif /* USE_TIMER_THREAD */ UNLOCK(&manager->lock); #ifdef USE_TIMER_THREAD /* * Wait for thread to exit. */ if (isc_thread_join(manager->thread, NULL) != ISC_R_SUCCESS) UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_thread_join() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); #endif /* USE_TIMER_THREAD */ /* * Clean up. */ #ifdef USE_TIMER_THREAD (void)isc_condition_destroy(&manager->wakeup); #endif /* USE_TIMER_THREAD */ DESTROYLOCK(&manager->lock); isc_heap_destroy(&manager->heap); manager->common.impmagic = 0; manager->common.magic = 0; mctx = manager->mctx; isc_mem_put(mctx, manager, sizeof(*manager)); isc_mem_detach(&mctx); *managerp = NULL; #ifdef USE_SHARED_MANAGER timermgr = NULL; #endif }
ISC_TIMERFUNC_SCOPE isc_result_t isc__timermgr_create(isc_mem_t *mctx, isc_timermgr_t **managerp) { isc__timermgr_t *manager; isc_result_t result; /* * Create a timer manager. */ REQUIRE(managerp != NULL && *managerp == NULL); #ifdef USE_SHARED_MANAGER if (timermgr != NULL) { timermgr->refs++; *managerp = (isc_timermgr_t *)timermgr; return (ISC_R_SUCCESS); } #endif /* USE_SHARED_MANAGER */ manager = isc_mem_get(mctx, sizeof(*manager)); if (manager == NULL) return (ISC_R_NOMEMORY); manager->common.impmagic = TIMER_MANAGER_MAGIC; manager->common.magic = ISCAPI_TIMERMGR_MAGIC; manager->common.methods = (isc_timermgrmethods_t *)&timermgrmethods; manager->mctx = NULL; manager->done = ISC_FALSE; INIT_LIST(manager->timers); manager->nscheduled = 0; isc_time_settoepoch(&manager->due); manager->heap = NULL; result = isc_heap_create(mctx, sooner, set_index, 0, &manager->heap); if (result != ISC_R_SUCCESS) { INSIST(result == ISC_R_NOMEMORY); isc_mem_put(mctx, manager, sizeof(*manager)); return (ISC_R_NOMEMORY); } result = isc_mutex_init(&manager->lock); if (result != ISC_R_SUCCESS) { isc_heap_destroy(&manager->heap); isc_mem_put(mctx, manager, sizeof(*manager)); return (result); } isc_mem_attach(mctx, &manager->mctx); #ifdef USE_TIMER_THREAD if (isc_condition_init(&manager->wakeup) != ISC_R_SUCCESS) { isc_mem_detach(&manager->mctx); DESTROYLOCK(&manager->lock); isc_heap_destroy(&manager->heap); isc_mem_put(mctx, manager, sizeof(*manager)); UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_condition_init() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); return (ISC_R_UNEXPECTED); } if (isc_thread_create(run, manager, &manager->thread) != ISC_R_SUCCESS) { isc_mem_detach(&manager->mctx); (void)isc_condition_destroy(&manager->wakeup); DESTROYLOCK(&manager->lock); isc_heap_destroy(&manager->heap); isc_mem_put(mctx, manager, sizeof(*manager)); UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_thread_create() %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed")); return (ISC_R_UNEXPECTED); } #endif #ifdef USE_SHARED_MANAGER manager->refs = 1; timermgr = manager; #endif /* USE_SHARED_MANAGER */ *managerp = (isc_timermgr_t *)manager; return (ISC_R_SUCCESS); }
isc_result_t isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t prev_cnt; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PREUNLOCK, "preunlock"), rwl, type); #endif if (type == isc_rwlocktype_read) { prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); /* * If we're the last reader and any writers are waiting, wake * them up. We need to wake up all of them to ensure the * FIFO order. */ if (prev_cnt == READER_INCR && rwl->write_completions != rwl->write_requests) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } } else { isc_boolean_t wakeup_writers = ISC_TRUE; /* * Reset the flag, and (implicitly) tell other writers * we are done. */ (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE); (void)isc_atomic_xadd(&rwl->write_completions, 1); if (rwl->write_granted >= rwl->write_quota || rwl->write_requests == rwl->write_completions || (rwl->cnt_and_flag & ~WRITER_ACTIVE) != 0) { /* * We have passed the write quota, no writer is * waiting, or some readers are almost ready, pending * possible writers. Note that the last case can * happen even if write_requests != write_completions * (which means a new writer in the queue), so we need * to catch the case explicitly. */ LOCK(&rwl->lock); if (rwl->readers_waiting > 0) { wakeup_writers = ISC_FALSE; BROADCAST(&rwl->readable); } UNLOCK(&rwl->lock); } if (rwl->write_requests != rwl->write_completions && wakeup_writers) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTUNLOCK, "postunlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
static void dispatch(isc__timermgr_t *manager, isc_time_t *now) { isc_boolean_t done = ISC_FALSE, post_event, need_schedule; isc_timerevent_t *event; isc_eventtype_t type = 0; isc__timer_t *timer; isc_result_t result; isc_boolean_t idle; /*! * The caller must be holding the manager lock. */ while (manager->nscheduled > 0 && !done) { timer = isc_heap_element(manager->heap, 1); INSIST(timer->type != isc_timertype_inactive); if (isc_time_compare(now, &timer->due) >= 0) { if (timer->type == isc_timertype_ticker) { type = ISC_TIMEREVENT_TICK; post_event = ISC_TRUE; need_schedule = ISC_TRUE; } else if (timer->type == isc_timertype_limited) { int cmp; cmp = isc_time_compare(now, &timer->expires); if (cmp >= 0) { type = ISC_TIMEREVENT_LIFE; post_event = ISC_TRUE; need_schedule = ISC_FALSE; } else { type = ISC_TIMEREVENT_TICK; post_event = ISC_TRUE; need_schedule = ISC_TRUE; } } else if (!isc_time_isepoch(&timer->expires) && isc_time_compare(now, &timer->expires) >= 0) { type = ISC_TIMEREVENT_LIFE; post_event = ISC_TRUE; need_schedule = ISC_FALSE; } else { idle = ISC_FALSE; LOCK(&timer->lock); if (!isc_time_isepoch(&timer->idle) && isc_time_compare(now, &timer->idle) >= 0) { idle = ISC_TRUE; } UNLOCK(&timer->lock); if (idle) { type = ISC_TIMEREVENT_IDLE; post_event = ISC_TRUE; need_schedule = ISC_FALSE; } else { /* * Idle timer has been touched; * reschedule. */ XTRACEID(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_IDLERESCHED, "idle reschedule"), timer); post_event = ISC_FALSE; need_schedule = ISC_TRUE; } } if (post_event) { XTRACEID(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_POSTING, "posting"), timer); /* * XXX We could preallocate this event. */ event = (isc_timerevent_t *)isc_event_allocate(manager->mctx, timer, type, timer->action, timer->arg, sizeof(*event)); if (event != NULL) { event->due = timer->due; isc_task_send(timer->task, ISC_EVENT_PTR(&event)); } else UNEXPECTED_ERROR(__FILE__, __LINE__, "%s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_EVENTNOTALLOC, "couldn't " "allocate event")); } timer->index = 0; isc_heap_delete(manager->heap, 1); manager->nscheduled--; if (need_schedule) { result = schedule(timer, now, ISC_FALSE); if (result != ISC_R_SUCCESS) UNEXPECTED_ERROR(__FILE__, __LINE__, "%s: %u", isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_SCHEDFAIL, "couldn't schedule " "timer"), result); } } else { manager->due = timer->due; done = ISC_TRUE; } } }
static inline isc_result_t schedule(isc__timer_t *timer, isc_time_t *now, isc_boolean_t signal_ok) { isc_result_t result; isc__timermgr_t *manager; isc_time_t due; int cmp; #ifdef USE_TIMER_THREAD isc_boolean_t timedwait; #endif /*! * Note: the caller must ensure locking. */ REQUIRE(timer->type != isc_timertype_inactive); #ifndef USE_TIMER_THREAD UNUSED(signal_ok); #endif /* USE_TIMER_THREAD */ manager = timer->manager; #ifdef USE_TIMER_THREAD /*! * If the manager was timed wait, we may need to signal the * manager to force a wakeup. */ timedwait = ISC_TF(manager->nscheduled > 0 && isc_time_seconds(&manager->due) != 0); #endif /* * Compute the new due time. */ if (timer->type != isc_timertype_once) { result = isc_time_add(now, &timer->interval, &due); if (result != ISC_R_SUCCESS) return (result); if (timer->type == isc_timertype_limited && isc_time_compare(&timer->expires, &due) < 0) due = timer->expires; } else { if (isc_time_isepoch(&timer->idle)) due = timer->expires; else if (isc_time_isepoch(&timer->expires)) due = timer->idle; else if (isc_time_compare(&timer->idle, &timer->expires) < 0) due = timer->idle; else due = timer->expires; } /* * Schedule the timer. */ if (timer->index > 0) { /* * Already scheduled. */ cmp = isc_time_compare(&due, &timer->due); timer->due = due; switch (cmp) { case -1: isc_heap_increased(manager->heap, timer->index); break; case 1: isc_heap_decreased(manager->heap, timer->index); break; case 0: /* Nothing to do. */ break; } } else { timer->due = due; result = isc_heap_insert(manager->heap, timer); if (result != ISC_R_SUCCESS) { INSIST(result == ISC_R_NOMEMORY); return (ISC_R_NOMEMORY); } manager->nscheduled++; } XTRACETIMER(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_SCHEDULE, "schedule"), timer, due); /* * If this timer is at the head of the queue, we need to ensure * that we won't miss it if it has a more recent due time than * the current "next" timer. We do this either by waking up the * run thread, or explicitly setting the value in the manager. */ #ifdef USE_TIMER_THREAD /* * This is a temporary (probably) hack to fix a bug on tru64 5.1 * and 5.1a. Sometimes, pthread_cond_timedwait() doesn't actually * return when the time expires, so here, we check to see if * we're 15 seconds or more behind, and if we are, we signal * the dispatcher. This isn't such a bad idea as a general purpose * watchdog, so perhaps we should just leave it in here. */ if (signal_ok && timedwait) { isc_interval_t fifteen; isc_time_t then; isc_interval_set(&fifteen, 15, 0); result = isc_time_add(&manager->due, &fifteen, &then); if (result == ISC_R_SUCCESS && isc_time_compare(&then, now) < 0) { SIGNAL(&manager->wakeup); signal_ok = ISC_FALSE; isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_TIMER, ISC_LOG_WARNING, "*** POKED TIMER ***"); } } if (timer->index == 1 && signal_ok) { XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TIMER, ISC_MSG_SIGNALSCHED, "signal (schedule)")); SIGNAL(&manager->wakeup); } #else /* USE_TIMER_THREAD */ if (timer->index == 1 && isc_time_compare(&timer->due, &manager->due) < 0) manager->due = timer->due; #endif /* USE_TIMER_THREAD */ return (ISC_R_SUCCESS); }
static isc_result_t doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) { isc_boolean_t skip = ISC_FALSE; isc_boolean_t done = ISC_FALSE; isc_result_t result = ISC_R_SUCCESS; REQUIRE(VALID_RWLOCK(rwl)); LOCK(&rwl->lock); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { if (rwl->readers_waiting != 0) skip = ISC_TRUE; while (!done) { if (!skip && ((rwl->active == 0 || (rwl->type == isc_rwlocktype_read && (rwl->writers_waiting == 0 || rwl->granted < rwl->read_quota))))) { rwl->type = isc_rwlocktype_read; rwl->active++; rwl->granted++; done = ISC_TRUE; } else if (nonblock) { result = ISC_R_LOCKBUSY; done = ISC_TRUE; } else { skip = ISC_FALSE; rwl->readers_waiting++; WAIT(&rwl->readable, &rwl->lock); rwl->readers_waiting--; } } } else { if (rwl->writers_waiting != 0) skip = ISC_TRUE; while (!done) { if (!skip && rwl->active == 0) { rwl->type = isc_rwlocktype_write; rwl->active = 1; rwl->granted++; done = ISC_TRUE; } else if (nonblock) { result = ISC_R_LOCKBUSY; done = ISC_TRUE; } else { skip = ISC_FALSE; rwl->writers_waiting++; WAIT(&rwl->writeable, &rwl->lock); rwl->writers_waiting--; } } } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif UNLOCK(&rwl->lock); return (result); }
isc_result_t isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota, unsigned int write_quota) { isc_result_t result; REQUIRE(rwl != NULL); /* * In case there's trouble initializing, we zero magic now. If all * goes well, we'll set it to RWLOCK_MAGIC. */ rwl->magic = 0; rwl->spins = 0; #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) rwl->write_requests = 0; rwl->write_completions = 0; rwl->cnt_and_flag = 0; rwl->readers_waiting = 0; rwl->write_granted = 0; if (read_quota != 0) { UNEXPECTED_ERROR(__FILE__, __LINE__, "read quota is not supported"); } if (write_quota == 0) write_quota = RWLOCK_DEFAULT_WRITE_QUOTA; rwl->write_quota = write_quota; #else rwl->type = isc_rwlocktype_read; rwl->original = isc_rwlocktype_none; rwl->active = 0; rwl->granted = 0; rwl->readers_waiting = 0; rwl->writers_waiting = 0; if (read_quota == 0) read_quota = RWLOCK_DEFAULT_READ_QUOTA; rwl->read_quota = read_quota; if (write_quota == 0) write_quota = RWLOCK_DEFAULT_WRITE_QUOTA; rwl->write_quota = write_quota; #endif result = isc_mutex_init(&rwl->lock); if (result != ISC_R_SUCCESS) return (result); result = isc_condition_init(&rwl->readable); if (result != ISC_R_SUCCESS) { UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_condition_init(readable) %s: %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), isc_result_totext(result)); result = ISC_R_UNEXPECTED; goto destroy_lock; } result = isc_condition_init(&rwl->writeable); if (result != ISC_R_SUCCESS) { UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_condition_init(writeable) %s: %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), isc_result_totext(result)); result = ISC_R_UNEXPECTED; goto destroy_rcond; } rwl->magic = RWLOCK_MAGIC; return (ISC_R_SUCCESS); destroy_rcond: (void)isc_condition_destroy(&rwl->readable); destroy_lock: DESTROYLOCK(&rwl->lock); return (result); }
static void try_ipv6only(void) { #ifdef IPV6_V6ONLY SOCKET s; int on; char strbuf[ISC_STRERRORSIZE]; #endif isc_result_t result; result = isc_net_probeipv6(); if (result != ISC_R_SUCCESS) { ipv6only_result = result; return; } #ifndef IPV6_V6ONLY ipv6only_result = ISC_R_NOTFOUND; return; #else /* check for TCP sockets */ s = socket(PF_INET6, SOCK_STREAM, 0); if (s == INVALID_SOCKET) { isc__strerror(errno, strbuf, sizeof(strbuf)); UNEXPECTED_ERROR(__FILE__, __LINE__, "socket() %s: %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), strbuf); ipv6only_result = ISC_R_UNEXPECTED; return; } on = 1; if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&on, sizeof(on)) < 0) { ipv6only_result = ISC_R_NOTFOUND; goto close; } closesocket(s); /* check for UDP sockets */ s = socket(PF_INET6, SOCK_DGRAM, 0); if (s == INVALID_SOCKET) { isc__strerror(errno, strbuf, sizeof(strbuf)); UNEXPECTED_ERROR(__FILE__, __LINE__, "socket() %s: %s", isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_FAILED, "failed"), strbuf); ipv6only_result = ISC_R_UNEXPECTED; return; } on = 1; if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&on, sizeof(on)) < 0) { ipv6only_result = ISC_R_NOTFOUND; goto close; } ipv6only_result = ISC_R_SUCCESS; close: closesocket(s); return; #endif /* IPV6_V6ONLY */ }