/** * Destroy the blocking I/O context. * * THREADS: REENTRANT * * @param B BAKA thread/global state. * @param bpi. The context info to destroy. */ static void bk_polling_io_destroy(bk_s B, struct bk_polling_io *bpi) { BK_ENTRY(B, __FUNCTION__, __FILE__, "libbk"); struct polling_io_data *pid; if (!bpi) { bk_error_printf(B, BK_ERR_ERR, "Illegal arguments\n"); BK_VRETURN(B); } while((pid=pidlist_minimum(bpi->bpi_data))) { if (pidlist_delete(bpi->bpi_data, pid) != DICT_OK) break; pid_destroy(B, pid); } pidlist_destroy(bpi->bpi_data); #ifdef BK_USING_PTHREADS pthread_mutex_destroy(&bpi->bpi_lock); pthread_cond_destroy(&bpi->bpi_wrcond); pthread_cond_destroy(&bpi->bpi_rdcond); #endif /* BK_USING_PTHREADS */ free(bpi); BK_VRETURN(B); }
/** * Flush out the polling cache. Very similar to ioh flush. Flush all data buf and EOF messages. * * THREADS: MT-SAFE * * @param B BAKA thread/global state. * @param bpi The @a bk_polling_io to use. * @param flags Flags for future use. * @return <i>-1</i> on failure.<br> * @return <i>0</i> on success. */ static int polling_io_flush(bk_s B, struct bk_polling_io *bpi, bk_flags flags) { BK_ENTRY(B, __FUNCTION__, __FILE__, "libbk"); struct polling_io_data *pid, *npid; if (!bpi) { bk_error_printf(B, BK_ERR_ERR,"Illegal arguments\n"); BK_RETURN(B, -1); } pid = pidlist_minimum(bpi->bpi_data); while(pid) { npid = pidlist_successor(bpi->bpi_data, pid); if (pid->pid_data->ptr || pid->pid_status == BkIohStatusIohReadEOF) { if (pidlist_delete(bpi->bpi_data, pid) != DICT_OK) { bk_error_printf(B, BK_ERR_ERR, "Could not delete pid from bpi data list: %s\n", pidlist_error_reason(bpi->bpi_data, NULL)); break; } if (pid->pid_data) { #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ /* * We're removing data which *hasn't'* been read by the user so we * reduce *both* the amount of data on the queue and our tell * position (in the io_poll routine we *increased* the later. */ bpi->bpi_size -= pid->pid_data->len; bpi->bpi_tell -= pid->pid_data->len; #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ // <WARNING>Requires unthrottle as no-op if not throttled.</WARNING> if (bpi->bpi_ioh->ioh_readq.biq_queuemax && bpi->bpi_size < bpi->bpi_ioh->ioh_readq.biq_queuemax) { bk_polling_io_unthrottle(B, bpi, 0); } } pid_destroy(B, pid); } pid = npid; } BK_RETURN(B,0); }
/** * Flush all data associated with polling stuff. * * THREADS: MT-SAFE * * @param B BAKA thread/global state. * @param bpi @a bk_polling_io to flush. * @param flags Flags for the future. */ void bk_polling_io_flush(bk_s B, struct bk_polling_io *bpi, bk_flags flags) { BK_ENTRY(B, __FUNCTION__, __FILE__, "libbk"); struct polling_io_data *pid, *npid; if (!bpi) { bk_error_printf(B, BK_ERR_ERR,"Illegal arguments\n"); BK_VRETURN(B); } pid = pidlist_maximum(bpi->bpi_data); while(pid) { npid = pidlist_successor(bpi->bpi_data, pid); // Only nuke data vbufs. if (pid->pid_data->ptr || pid->pid_status == BkIohStatusIohReadEOF) { // If we're flushing off an EOF, then clear the fact that we have seen EOF. if (pid->pid_status == BkIohStatusIohReadEOF) { #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ BK_FLAG_CLEAR(bpi->bpi_flags, BPI_FLAG_SAW_EOF); #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ } if (pidlist_delete(bpi->bpi_data, pid) != DICT_OK) { bk_error_printf(B, BK_ERR_ERR, "Could not delete pid from list: %s\n", pidlist_error_reason(bpi->bpi_data, NULL)); } // Failure here will not kill the loop since we've already grabbed successor pid_destroy(B, pid); } pid = npid; } BK_VRETURN(B); }
int main(int argc, char *argv[]) { CREW crew; int i; int count = 0; int res; BOOLEAN result; char ** keys; void * statusp; C = new_conf(); parse_cmdline_cfg(C, argc, argv); parse_cfgfile(C); parse_cmdline(C, argc, argv); RUNNER R = new_runner(conf_get_user(C), conf_get_group(C)); runas(R); runner_destroy(R); sigmasker(); if (is_daemon(C)) { res = fork(); if (res == -1 ){ // ERRROR NOTIFY(FATAL, "%s: [error] unable to run in the background\n", program_name); } else if (res == 0) { // CHILD PID P = new_pid(conf_get_pidfile(C)); HASH H = conf_get_items(C); count = conf_get_count(C); keys = hash_get_keys_delim(H, ':'); if ((crew = new_crew(count, count, FALSE)) == NULL) { NOTIFY(FATAL, "%s: [error] unable to allocate memory for %d log files", program_name, count); } set_pid(P, getpid()); pid_destroy(P); for (i = 0; i < count && crew_get_shutdown(crew) != TRUE; i++) { FIDO F = new_fido(C, keys[i]); result = crew_add(crew, (void*)start, F); if (result == FALSE) { NOTIFY(FATAL, "%s: [error] unable to spawn additional threads", program_name); } } crew_join(crew, TRUE, &statusp); conf_destroy(C); } else { // PARENT } } else { HASH H = conf_get_items(C); count = conf_get_count(C); keys = hash_get_keys_delim(H, ':'); if ((crew = new_crew(count, count, FALSE)) == NULL) { NOTIFY(FATAL, "%s: [error] unable to allocate memory for %d log files", program_name, count); } for (i = 0; i < count && crew_get_shutdown(crew) != TRUE; i++) { FIDO F = new_fido(C, keys[i]); result = crew_add(crew, (void*)start, F); } crew_join(crew, TRUE, &statusp); conf_destroy(C); } exit(EXIT_SUCCESS); } /* end of int main **/
/** * Do one polling read. You will return when data is available, or * when the timeout has expired, or when the channel has been canceled. * * THREADS: MT-SAFE * * @param B BAKA thread/global state. * @param bpi The polling state to use. * @param datap Data to pass up to the user (copyout). * @param statup Status to pass up to the user (copyout). * @param timeout Maximum time to wait in milliseconds (0->forever, -1->no wait) * @param flags flags for bk_run_once (e.g. BK_RUN_ONCE_FLAG_DONT_BLOCK) * @return <i>-1</i> on failure.<br> * @return <i>0</i> on success (with data). * @return <i>positive</i> on no progress. */ int bk_polling_io_read(bk_s B, struct bk_polling_io *bpi, bk_vptr **datap, bk_ioh_status_e *status, time_t timeout, bk_flags flags) { BK_ENTRY(B, __FUNCTION__, __FILE__, "libbk"); struct polling_io_data *pid; int ret = 0; int timedout = 0; if (!bpi || !datap || !status) { bk_error_printf(B, BK_ERR_ERR,"Illegal arguments\n"); BK_RETURN(B, -1); } *datap = NULL; *status = BkIohStatusNoStatus; #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ if (timeout > 0 #ifdef BK_USING_PTHREADS && (BK_FLAG_ISCLEAR(bpi->bpi_flags, BPI_FLAG_THREADED) || !BK_GENERAL_FLAG_ISTHREADON(B)) #endif /* BK_USING_PTHREADS */ ) { if (bk_run_enqueue_delta(B, bpi->bpi_ioh->ioh_run, timeout, bpi_rdtimeout, bpi, &bpi->bpi_rdtimeoutevent, 0) < 0) { bk_error_printf(B, BK_ERR_ERR, "Could not enqueue new pollio timeout event\n"); ret = -1; goto unlockexit; } } while (!(pid = pidlist_minimum(bpi->bpi_data)) && !timedout) { if (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_SAW_EOF)) { *status = BkIohStatusIohReadEOF; ret = 1; goto unlockexit; } if (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_READ_DEAD) || BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_IOH_DEAD) || bk_polling_io_is_canceled(B, bpi, 0)) { bk_error_printf(B, BK_ERR_ERR, "Reading from dead/canceled channel\n"); ret = -1; goto unlockexit; } if (timeout == -1 || timedout) { bk_error_printf(B, BK_ERR_ERR, "No further progress possible\n"); ret = 1; goto unlockexit; } #ifdef BK_USING_PTHREADS if (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_THREADED) && BK_GENERAL_FLAG_ISTHREADON(B) && !bk_run_on_iothread(B, bpi->bpi_ioh->ioh_run)) { struct timespec ts; struct timeval tv; if (timeout == 0) { pthread_cond_wait(&bpi->bpi_rdcond, &bpi->bpi_lock); } else { int tret; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec + timeout / 1000; ts.tv_nsec = tv.tv_usec * 1000 + (timeout % 1000) * 1000000; bk_debug_printf_and(B, 64, "Entering read timed condition wait %d.%09d, pid %d\n", (int)ts.tv_sec, (int)ts.tv_nsec, getpid()); if (((tret = pthread_cond_timedwait(&bpi->bpi_rdcond, &bpi->bpi_lock, &ts)) < 0) && (tret == ETIMEDOUT)) timedout++; bk_debug_printf_and(B, 64, "Exiting read timed condition wait: %d, %d\n", tret, errno); } } else #endif /* BK_USING_PTHREADS */ { #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ if (bk_run_once(B, bpi->bpi_ioh->ioh_run, flags) < 0) { bk_error_printf(B, BK_ERR_ERR, "polling bk_run_once failed severely\n"); ret = -1; goto unlockexit; } #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ if (timeout > 0 && !bpi->bpi_rdtimeoutevent) { bk_debug_printf_and(B, 1, "Received timeout on bpi:%p (fd: %d)\n", bpi, bpi->bpi_ioh->ioh_fdin); timedout++; } } } if (pid) { if (pid->pid_data) { bpi->bpi_tell += pid->pid_data->len; bpi->bpi_size -= pid->pid_data->len; *datap = pid->pid_data; pid->pid_data = NULL; if (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_SELF_THROTTLE) && bpi->bpi_size <= bpi->bpi_ioh->ioh_readq.biq_queuemax/2) { BK_FLAG_CLEAR(bpi->bpi_flags, BPI_FLAG_SELF_THROTTLE); bk_polling_io_unthrottle(B, bpi, POLLIO_ALREADY_LOCKED); } } else { ret = 1; } *status = pid->pid_status; if (pidlist_delete(bpi->bpi_data, pid) != DICT_OK) { bk_error_printf(B, BK_ERR_ERR, "Could not delete pid from list: %s\n", pidlist_error_reason(bpi->bpi_data, NULL)); } pid_destroy(B, pid); } else { bk_debug_printf_and(B, 1, "Returning timeout on bpi %p\n", bpi); ret = 1; } unlockexit: // Dequeue timeout event if necessary if (timeout > 0 && bpi->bpi_rdtimeoutevent && !timedout) { bk_run_dequeue(B, bpi->bpi_ioh->ioh_run, bpi->bpi_rdtimeoutevent, BK_RUN_DEQUEUE_EVENT); bpi->bpi_rdtimeoutevent = NULL; } #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ BK_RETURN(B, ret); }
/** * The on demand I/O subsystem's ioh handler. Remember this handles both * reads and writes. * * THREADS: MT-SAFE * * @param B BAKA thread/global state. * @param opaque My local data. * @param ioh ioh over which the data arrived. * @param state What's going on in the world. */ static void polling_io_ioh_handler(bk_s B, bk_vptr *data, void *args, struct bk_ioh *ioh, bk_ioh_status_e status) { BK_ENTRY(B, __FUNCTION__, __FILE__, "libbk"); struct bk_polling_io *bpi = args; struct polling_io_data *pid = NULL; int (*clc_add)(dict_h dll, dict_obj obj) = dll_append; // Can't use #define here... if (!bpi || !ioh) { bk_error_printf(B, BK_ERR_ERR,"Illegal arguments\n"); BK_VRETURN(B); } if (!(pid = pid_create(B))) { bk_error_printf(B, BK_ERR_ERR, "Could not allocate pid: %s\n", strerror(errno)); goto error; } bk_debug_printf_and(B, 64, "IOH polling handler: %d (in: %d, out %d)\n", status, ioh->ioh_fdin, ioh->ioh_fdout); switch (status) { case BkIohStatusReadComplete: case BkIohStatusIncompleteRead: { /* * NB: We *assume* (safely, we believe) that in the cases of * BkIohStatusReadComplete and BkIohStatusIncompleteRead that data[1] * exists (ie we can reference data[1].ptr without fear of a core * dump). Since we're optimizing here, we thus don't bother checking * for data[0].ptr (which might otherwise seem required for "safe" * programming) */ if (!data[1].ptr && IOH_DATA_SEIZE_PERMITTED(ioh)) { /* * This checks the most common case where the one buffer has been * passed up. In this case we "seize" the data (which has been * copied at the ioh level) and order the ioh level *not* to free * it (data[0].ptr = NULL). This way we avoid the issue of * coalescion entirely. */ if (!(BK_MALLOC(pid->pid_data))) { bk_error_printf(B, BK_ERR_ERR, "Could not allocate vptr for I/O data: %s\n", strerror(errno)); goto error; } *pid->pid_data = data[0]; data[0].ptr = NULL; } else { // If, OTOH, we 2 or more data bufs, then we coalesce them with copy. if (!(pid->pid_data = bk_ioh_coalesce(B, data, NULL, BK_IOH_COALESCE_FLAG_MUST_COPY, NULL))) { bk_error_printf(B, BK_ERR_ERR, "Could not coalesce relay data\n"); goto error; } } } bk_debug_printf_and(B, 2, "Dequeued %d bytes from descriptor %d\n", pid->pid_data[0].len, ioh->ioh_fdin); break; case BkIohStatusIohClosing: if (BK_FLAG_ISCLEAR(bpi->bpi_flags, BPI_FLAG_DONT_DESTROY)) { // <BUG>There would appear to be a problem here since io_destroy does not notify user, so user will think he can use the bpi</BUG> bk_polling_io_destroy(B, bpi); if (pid) pid_destroy(B, pid); BK_VRETURN(B); } #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ // The ioh is now dead. bk_debug_printf_and(B, 128,"Polling IOH is closing\n"); BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_IOH_DEAD); #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ break; case BkIohStatusIohReadError: #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_READ_DEAD); #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ break; case BkIohStatusIohWriteError: #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ bpi->bpi_wroutstanding--; BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_WRITE_DEAD); bk_error_printf(B, BK_ERR_ERR, "Polling write failed at IOH level\n"); #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ break; case BkIohStatusIohReadEOF: #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_SAW_EOF); #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ break; case BkIohStatusWriteComplete: case BkIohStatusWriteAborted: #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ bpi->bpi_wroutstanding--; bpi->bpi_wrbytes -= data->len; bk_debug_printf_and(B, 2, "Dequeued %d bytes for outstanding total of %d\n", data->len, bpi->bpi_wrbytes); #ifdef BK_USING_PTHREADS bk_debug_printf_and(B, 64, "Broadcasting write timed condition wait\n"); pthread_cond_broadcast(&bpi->bpi_wrcond); if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ free(data->ptr); free(data); pid_destroy(B, pid); data = NULL; BK_VRETURN(B); break; case BkIohStatusIohSeekSuccess: polling_io_flush(B, bpi, 0); // Intentional fall through. case BkIohStatusIohSeekFailed: clc_add = dll_insert; // Put seek messages on front. Can't use #define break; // No default so gcc can catch missing cases. case BkIohStatusNoStatus: bk_error_printf(B, BK_ERR_ERR, "Uninitialized status\n"); goto error; } pid->pid_status = status; if (pid->pid_data) { #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ bpi->bpi_size += pid->pid_data->len; /* * Pause reading if buffer is full. <TODO> if file open for only writing * mark the case so we don't bother.</TODO> */ if (ioh->ioh_readq.biq_queuemax && bpi->bpi_size >= ioh->ioh_readq.biq_queuemax) { BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_SELF_THROTTLE); bk_polling_io_throttle(B, bpi, POLLIO_ALREADY_LOCKED); } #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ } if ((*clc_add)(bpi->bpi_data, pid) != DICT_OK) { bk_error_printf(B, BK_ERR_ERR, "Could not append data to data list: %s\n", pidlist_error_reason(bpi->bpi_data, NULL)); goto error; } #ifdef BK_USING_PTHREADS bk_debug_printf_and(B, 64, "Broadcasting read timed condition wait\n"); pthread_cond_broadcast(&bpi->bpi_rdcond); #endif /* BK_USING_PTHREADS */ BK_VRETURN(B); error: if (pid) pid_destroy(B, pid); BK_VRETURN(B); }
/** * Close up on demand I/O. * * <WARNING> * For various and sundry reasons it has become clear that ioh should be * closed here too. Therefore if you are using polling io then you should * surrender control of the ioh (or more to the point control it via the * polling routines). In this respect polling io is like b_relay.c. * </WARNING> * * THREADS: MT-SAFE * * @param B BAKA thread/global state. * @param bpi The on demand state to close. * @param flags BK_POLLING_LINGER so that pending data is written<br> * BK_POLLING_DONT_LINGER to return immediately, regardless of pending * output. */ void bk_polling_io_close(bk_s B, struct bk_polling_io *bpi, bk_flags flags) { BK_ENTRY(B, __FUNCTION__, __FILE__, "libbk"); struct polling_io_data *pid; if (!bpi) { bk_error_printf(B, BK_ERR_ERR,"Illegal arguments\n"); BK_VRETURN(B); } bk_debug_printf_and(B, 64, "Closing bpi for fd %d/%d\n", bpi->bpi_ioh->ioh_fdin, bpi->bpi_ioh->ioh_fdout); #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_lock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_CLOSING); if (BK_FLAG_ISSET(flags, BK_POLLING_LINGER)) { BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_LINGER); } if (BK_FLAG_ISSET(flags, BK_POLLING_DONT_LINGER) || BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_WRITE_DEAD)) { BK_FLAG_CLEAR(bpi->bpi_flags, BPI_FLAG_LINGER); BK_FLAG_SET(bpi->bpi_flags, BPI_FLAG_DONT_LINGER); } // Nuke everything from the cached read list while (pid = pidlist_minimum(bpi->bpi_data)) { if (pidlist_delete(bpi->bpi_data, pid) != DICT_OK) break; pid_destroy(B, pid); } #ifdef BK_USING_PTHREADS if (BK_GENERAL_FLAG_ISTHREADON(B) && pthread_mutex_unlock(&bpi->bpi_lock) != 0) abort(); #endif /* BK_USING_PTHREADS */ if (BK_FLAG_ISCLEAR(bpi->bpi_flags, BPI_FLAG_IOH_DEAD)) bk_ioh_shutdown(B, bpi->bpi_ioh, SHUT_RD, 0); // bpi_ioh may have been nuked after read shutdown if (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_IOH_DEAD)) { bk_polling_io_destroy(B, bpi); } else { if (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_LINGER)) { while (bpi->bpi_wroutstanding && BK_FLAG_ISCLEAR(bpi->bpi_flags, BPI_FLAG_WRITE_DEAD)) { if (bk_run_once(B, bpi->bpi_ioh->ioh_run, BK_RUN_ONCE_FLAG_DONT_BLOCK) < 0) { bk_error_printf(B, BK_ERR_ERR, "polling bk_run_once failed severely\n"); break; } } } BK_FLAG_CLEAR(bpi->bpi_flags, BPI_FLAG_DONT_DESTROY); bk_ioh_close(B, bpi->bpi_ioh, (BK_FLAG_ISSET(bpi->bpi_flags, BPI_FLAG_DONT_LINGER)?BK_IOH_ABORT:0)); } BK_VRETURN(B); }