static int localproxy_select_result(int fd, int event) { Local_Proxy_Socket s; char buf[20480]; int ret; if (!(s = find234(localproxy_by_fromfd, &fd, localproxy_fromfd_find)) && !(s = find234(localproxy_by_tofd, &fd, localproxy_tofd_find)) ) return 1; /* boggle */ if (event == 1) { assert(fd == s->from_cmd); ret = read(fd, buf, sizeof(buf)); if (ret < 0) { return plug_closing(s->plug, strerror(errno), errno, 0); } else if (ret == 0) { return plug_closing(s->plug, NULL, 0, 0); } else { return plug_receive(s->plug, 0, buf, ret); } } else if (event == 2) { assert(fd == s->to_cmd); if (localproxy_try_send(s)) plug_sent(s->plug, bufchain_size(&s->pending_output_data)); return 1; } return 1; }
static int agent_select_result(int fd, int event) { int ret; struct agent_connection *conn; assert(event == 1); /* not selecting for anything but R */ conn = find234(agent_connections, &fd, agent_connfind); if (!conn) { uxsel_del(fd); return 1; } ret = read(fd, conn->retbuf+conn->retlen, conn->retsize-conn->retlen); if (ret <= 0) { if (conn->retbuf != conn->sizebuf) sfree(conn->retbuf); conn->retbuf = NULL; conn->retlen = 0; goto done; } conn->retlen += ret; if (conn->retsize == 4 && conn->retlen == 4) { conn->retsize = GET_32BIT(conn->retbuf); if (conn->retsize <= 0) { conn->retbuf = NULL; conn->retlen = 0; goto done; } conn->retsize += 4; assert(conn->retbuf == conn->sizebuf); conn->retbuf = snewn(conn->retsize, char); memcpy(conn->retbuf, conn->sizebuf, 4); }
xj_jconf xj_jcon_check_jconf(xj_jcon jbc, char* id) { str sid; xj_jconf jcf = NULL, p = NULL; if(!jbc || !id || !jbc->nrjconf) return NULL; #ifdef XJ_EXTRA_DEBUG DBG("XJAB: xj_jcon_get_jconf: looking for conference\n"); #endif sid.s = id; sid.len = strlen(id); if((jcf = xj_jconf_new(&sid))==NULL) return NULL; if(xj_jconf_init_jab(jcf)) goto clean; if((p = find234(jbc->jconf, (void*)jcf, NULL)) != NULL) { #ifdef XJ_EXTRA_DEBUG DBG("XJAB: xj_jcon_get_jconf: conference found\n"); #endif xj_jconf_free(jcf); return p; } clean: #ifdef XJ_EXTRA_DEBUG DBG("XJAB: xj_jcon_get_jconf: conference not found\n"); #endif xj_jconf_free(jcf); return NULL; }
struct sftp_request *sftp_find_request(struct sftp_packet *pktin) { unsigned id; struct sftp_request *req; if (!pktin) { fxp_internal_error("did not receive a valid SFTP packet\n"); return NULL; } id = get_uint32(pktin); if (get_err(pktin)) { fxp_internal_error("did not receive a valid SFTP packet\n"); return NULL; } req = find234(sftp_requests, &id, sftp_reqfind); if (!req || !req->registered) { fxp_internal_error("request ID mismatch\n"); return NULL; } del234(sftp_requests, req); return req; }
void uxsel_del(int fd) { struct fd *oldfd = find234(fds, &fd, uxsel_fd_findcmp); if (oldfd) { if (oldfd->id) uxsel_input_remove(oldfd->id); del234(fds, oldfd); sfree(oldfd); } }
static int serial_select_result(int fd, int event) { Serial serial; char buf[4096]; int ret; int finished = FALSE; serial = find234(serial_by_fd, &fd, serial_find_by_fd); if (!serial) return 1; /* spurious event; keep going */ if (event == 1) { ret = read(serial->fd, buf, sizeof(buf)); if (ret == 0) { /* * Shouldn't happen on a real serial port, but I'm open * to the idea that there might be two-way devices we * can treat _like_ serial ports which can return EOF. */ finished = TRUE; } else if (ret < 0) { #ifdef EAGAIN if (errno == EAGAIN) return 1; /* spurious */ #endif #ifdef EWOULDBLOCK if (errno == EWOULDBLOCK) return 1; /* spurious */ #endif perror("read serial port"); exit(1); } else if (ret > 0) { serial->inbufsize = from_backend(serial->frontend, 0, buf, ret); serial_uxsel_setup(serial); /* might acquire backlog and freeze */ } } else if (event == 2) { /* * Attempt to send data down the pty. */ serial_try_write(serial); } if (finished) { serial_close(serial); serial->finished = TRUE; notify_remote_exit(serial->frontend); } return !finished; }
int pollwrap_get_fd_events(pollwrapper *pw, int fd) { pollwrap_fdtopos *f2p, f2p_find; assert(fd >= 0); f2p_find.fd = fd; f2p = find234(pw->fdtopos, &f2p_find, NULL); if (!f2p) return 0; return pw->fds[f2p->pos].revents; }
int select_result(int fd, int event) { struct fd *fdstruct = find234(fds, &fd, uxsel_fd_findcmp); /* * Apparently this can sometimes be NULL. Can't see how, but I * assume it means I need to ignore the event since it's on an * fd I've stopped being interested in. Sigh. */ if (fdstruct) return fdstruct->callback(fd, event); else return 1; }
static struct sftp_request *sftp_alloc_request(void) { unsigned low, high, mid; int tsize; struct sftp_request *r; if (sftp_requests == NULL) sftp_requests = newtree234(sftp_reqcmp); /* * First-fit allocation of request IDs: always pick the lowest * unused one. To do this, binary-search using the counted * B-tree to find the largest ID which is in a contiguous * sequence from the beginning. (Precisely everything in that * sequence must have ID equal to its tree index plus * REQUEST_ID_OFFSET.) */ tsize = count234(sftp_requests); low = -1; high = tsize; while (high - low > 1) { mid = (high + low) / 2; r = index234(sftp_requests, mid); if (r->id == mid + REQUEST_ID_OFFSET) low = mid; /* this one is fine */ else high = mid; /* this one is past it */ } /* * Now low points to either -1, or the tree index of the * largest ID in the initial sequence. */ { unsigned i = low + 1 + REQUEST_ID_OFFSET; assert(NULL == find234(sftp_requests, &i, sftp_reqfind)); } /* * So the request ID we need to create is * low + 1 + REQUEST_ID_OFFSET. */ r = snew(struct sftp_request); r->id = low + 1 + REQUEST_ID_OFFSET; r->registered = false; r->userdata = NULL; add234(sftp_requests, r); return r; }
xj_jconf xj_jcon_get_jconf(xj_jcon jbc, str* sid, char dl) { xj_jconf jcf = NULL, p; if(!jbc || !sid || !sid->s || sid->len <= 0) return NULL; #ifdef XJ_EXTRA_DEBUG DBG("XJAB: xj_jcon_get_jconf: looking for conference\n"); #endif if((jcf = xj_jconf_new(sid))==NULL) return NULL; if(xj_jconf_init_sip(jcf, jbc->jkey->id, dl)) goto clean; if(jbc->nrjconf && (p = find234(jbc->jconf, (void*)jcf, NULL)) != NULL) { #ifdef XJ_EXTRA_DEBUG DBG("XJAB: xj_jcon_get_jconf: conference found\n"); #endif xj_jconf_free(jcf); return p; } if(jbc->nrjconf >= XJ_MAX_JCONF) goto clean; if(jbc->nrjconf==0) if(jbc->jconf==NULL) if((jbc->jconf = newtree234(xj_jconf_cmp)) == NULL) goto clean; if((p = add234(jbc->jconf, (void*)jcf)) != NULL) { #ifdef XJ_EXTRA_DEBUG DBG("XJAB: xj_jcon_get_jconf: new conference created\n"); #endif jbc->nrjconf++; return p; } clean: DBG("XJAB: xj_jcon_get_jconf: error looking for conference\n"); xj_jconf_free(jcf); return NULL; }
static int macrolookup(tree234 * macros, input * in, wchar_t * name, filepos * pos) { macro m, *gotit; m.name = name; gotit = find234(macros, &m, NULL); if (gotit) { macrostack *expansion = mknew(macrostack); expansion->next = in->stack; expansion->text = gotit->text; expansion->pos = *pos; /* structure copy */ expansion->ptr = 0; expansion->npushback = in->npushback; in->stack = expansion; return TRUE; } else return FALSE; }
/** * set the flag of the connection identified by 'jkey' * */ int xj_wlist_set_flag(xj_wlist jwl, xj_jkey jkey, int fl) { int i; xj_jkey p = NULL; if(jwl==NULL || jkey==NULL || jkey->id==NULL || jkey->id->s==NULL) return -1; #ifdef XJ_EXTRA_DEBUG LM_DBG("looking for <%.*s>" " having id=%d\n", jkey->id->len, jkey->id->s, jkey->hash); #endif i = 0; while(i < jwl->len) { lock_set_get(jwl->sems, i); if(jwl->workers[i].pid <= 0) { lock_set_release(jwl->sems, i); i++; continue; } if((p=find234(jwl->workers[i].sip_ids, (void*)jkey, NULL)) != NULL) { p->flag = fl; lock_set_release(jwl->sems, i); #ifdef XJ_EXTRA_DEBUG LM_DBG("the connection for <%.*s>" " marked with flag=%d", jkey->id->len, jkey->id->s, fl); #endif return jwl->workers[i].wpipe; } lock_set_release(jwl->sems, i); i++; } #ifdef XJ_EXTRA_DEBUG LM_DBG("entry does not exist for <%.*s>\n", jkey->id->len, jkey->id->s); #endif return -1; }
/* * Call to run any timers whose time has reached the present. * Returns the time (in ticks) expected until the next timer after * that triggers. */ int run_timers(long anow, long *next) { struct timer *first; init_timers(); now = GETTICKCOUNT(); while (1) { first = (struct timer *)index234(timers, 0); if (!first) return FALSE; /* no timers remaining */ if (find234(timer_contexts, first->ctx, NULL) == NULL) { /* * This timer belongs to a context that has been * expired. Delete it without running. */ delpos234(timers, 0); sfree(first); } else if (first->now - now <= 0 || now - (first->when_set - 10) < 0) { /* * This timer is active and has reached its running * time. Run it. */ delpos234(timers, 0); first->fn(first->ctx, first->now); sfree(first); } else { /* * This is the first still-active timer that is in the * future. Return how long it has yet to go. */ *next = first->now; return TRUE; } } }
void pollwrap_add_fd_events(pollwrapper *pw, int fd, int events) { pollwrap_fdtopos *f2p, f2p_find; assert(fd >= 0); f2p_find.fd = fd; f2p = find234(pw->fdtopos, &f2p_find, NULL); if (!f2p) { sgrowarray(pw->fds, pw->fdsize, pw->nfd); size_t index = pw->nfd++; pw->fds[index].fd = fd; pw->fds[index].events = pw->fds[index].revents = 0; f2p = snew(pollwrap_fdtopos); f2p->fd = fd; f2p->pos = index; pollwrap_fdtopos *added = add234(pw->fdtopos, f2p); assert(added == f2p); } pw->fds[f2p->pos].events |= events; }
/** * return communication pipe with the worker that will process the message for * the id 'sid' only if it exists, or -1 if error * - jwl : pointer to the workers list * - sid : id of the entity (connection to Jabber - usually SHOULD be FROM * header of the incoming SIP message) * - p : will point to the SHM location of the 'sid' in jwl */ int xj_wlist_check(xj_wlist jwl, xj_jkey jkey, xj_jkey *p) { int i; if(jwl==NULL || jkey==NULL || jkey->id==NULL || jkey->id->s==NULL) return -1; i = 0; *p = NULL; while(i < jwl->len) { lock_set_get(jwl->sems, i); if(jwl->workers[i].pid <= 0) { lock_set_release(jwl->sems, i); i++; continue; } if((*p = find234(jwl->workers[i].sip_ids, (void*)jkey, NULL)) != NULL) { lock_set_release(jwl->sems, i); #ifdef XJ_EXTRA_DEBUG LM_DBG("entry exists for <%.*s> in the" " pool of <%d> [%d]\n",jkey->id->len, jkey->id->s, jwl->workers[i].pid,i); #endif return jwl->workers[i].wpipe; } lock_set_release(jwl->sems, i); i++; } #ifdef XJ_EXTRA_DEBUG LM_DBG("entry does not exist for <%.*s>\n", jkey->id->len, jkey->id->s); #endif return -1; }
const char *winsock_error_string(int error) { const char prefix[] = "Network error: "; struct errstring *es; /* * Error codes we know about and have historically had reasonably * sensible error messages for. */ switch (error) { case WSAEACCES: return "Network error: Permission denied"; case WSAEADDRINUSE: return "Network error: Address already in use"; case WSAEADDRNOTAVAIL: return "Network error: Cannot assign requested address"; case WSAEAFNOSUPPORT: return "Network error: Address family not supported by protocol family"; case WSAEALREADY: return "Network error: Operation already in progress"; case WSAECONNABORTED: return "Network error: Software caused connection abort"; case WSAECONNREFUSED: return "Network error: Connection refused"; case WSAECONNRESET: return "Network error: Connection reset by peer"; case WSAEDESTADDRREQ: return "Network error: Destination address required"; case WSAEFAULT: return "Network error: Bad address"; case WSAEHOSTDOWN: return "Network error: Host is down"; case WSAEHOSTUNREACH: return "Network error: No route to host"; case WSAEINPROGRESS: return "Network error: Operation now in progress"; case WSAEINTR: return "Network error: Interrupted function call"; case WSAEINVAL: return "Network error: Invalid argument"; case WSAEISCONN: return "Network error: Socket is already connected"; case WSAEMFILE: return "Network error: Too many open files"; case WSAEMSGSIZE: return "Network error: Message too long"; case WSAENETDOWN: return "Network error: Network is down"; case WSAENETRESET: return "Network error: Network dropped connection on reset"; case WSAENETUNREACH: return "Network error: Network is unreachable"; case WSAENOBUFS: return "Network error: No buffer space available"; case WSAENOPROTOOPT: return "Network error: Bad protocol option"; case WSAENOTCONN: return "Network error: Socket is not connected"; case WSAENOTSOCK: return "Network error: Socket operation on non-socket"; case WSAEOPNOTSUPP: return "Network error: Operation not supported"; case WSAEPFNOSUPPORT: return "Network error: Protocol family not supported"; case WSAEPROCLIM: return "Network error: Too many processes"; case WSAEPROTONOSUPPORT: return "Network error: Protocol not supported"; case WSAEPROTOTYPE: return "Network error: Protocol wrong type for socket"; case WSAESHUTDOWN: return "Network error: Cannot send after socket shutdown"; case WSAESOCKTNOSUPPORT: return "Network error: Socket type not supported"; case WSAETIMEDOUT: return "Network error: Connection timed out"; case WSAEWOULDBLOCK: return "Network error: Resource temporarily unavailable"; case WSAEDISCON: return "Network error: Graceful shutdown in progress"; } /* * Generic code to handle any other error. * * Slightly nasty hack here: we want to return a static string * which the caller will never have to worry about freeing, but on * the other hand if we call FormatMessage to get it then it will * want to either allocate a buffer or write into one we own. * * So what we do is to maintain a tree234 of error strings we've * already used. New ones are allocated from the heap, but then * put in this tree and kept forever. */ if (!errstrings) errstrings = newtree234(errstring_compare); es = find234(errstrings, &error, errstring_find); if (!es) { int bufsize, bufused; es = snew(struct errstring); es->error = error; /* maximum size for FormatMessage is 64K */ bufsize = 65535 + sizeof(prefix); es->text = snewn(bufsize, char); strcpy(es->text, prefix); bufused = strlen(es->text); if (!FormatMessage((FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS), NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), es->text + bufused, bufsize - bufused, NULL)) { sprintf(es->text + bufused, "Windows error code %d (and FormatMessage returned %u)", error, (unsigned int)GetLastError()); } else { int len = strlen(es->text); if (len > 0 && es->text[len-1] == '\n') es->text[len-1] = '\0'; } es->text = sresize(es->text, strlen(es->text) + 1, char); add234(errstrings, es); }
void handle_got_event(HANDLE event) #endif { struct handle *h; assert(handles_by_evtomain); h = find234(handles_by_evtomain, &event, handle_find_evtomain); if (!h) { /* * This isn't an error condition. If two or more event * objects were signalled during the same select operation, * and processing of the first caused the second handle to * be closed, then it will sometimes happen that we receive * an event notification here for a handle which is already * deceased. In that situation we simply do nothing. */ #ifdef MPEXT return 0; #else return; #endif } if (h->u.g.moribund) { /* * A moribund handle is one which we have either already * signalled to die, or are waiting until its current I/O op * completes to do so. Either way, it's treated as already * dead from the external user's point of view, so we ignore * the actual I/O result. We just signal the thread to die if * we haven't yet done so, or destroy the handle if not. */ if (h->u.g.done) { handle_destroy(h); } else { h->u.g.done = TRUE; h->u.g.busy = TRUE; SetEvent(h->u.g.ev_from_main); } #ifdef MPEXT return 0; #else return; #endif } switch (h->type) { int backlog; case HT_INPUT: h->u.i.busy = FALSE; /* * A signal on an input handle means data has arrived. */ if (h->u.i.len == 0) { /* * EOF, or (nearly equivalently) read error. */ h->u.i.defunct = TRUE; h->u.i.gotdata(h, NULL, -h->u.i.readerr); } else { backlog = h->u.i.gotdata(h, h->u.i.buffer, h->u.i.len); handle_throttle(&h->u.i, backlog); } #ifdef MPEXT return 1; #else break; #endif case HT_OUTPUT: h->u.o.busy = FALSE; /* * A signal on an output handle means we have completed a * write. Call the callback to indicate that the output * buffer size has decreased, or to indicate an error. */ if (h->u.o.writeerr) { /* * Write error. Send a negative value to the callback, * and mark the thread as defunct (because the output * thread is terminating by now). */ h->u.o.defunct = TRUE; h->u.o.sentdata(h, -h->u.o.writeerr); } else { bufchain_consume(&h->u.o.queued_data, h->u.o.lenwritten); h->u.o.sentdata(h, bufchain_size(&h->u.o.queued_data)); handle_try_output(&h->u.o); } #ifdef MPEXT return 0; #else break; #endif case HT_FOREIGN: /* Just call the callback. */ h->u.f.callback(h->u.f.ctx); #ifdef MPEXT return 0; #else break; #endif } #ifdef MPEXT return 0; #endif }
/* * Call to run any timers whose time has reached the present. * Returns the time (in ticks) expected until the next timer after * that triggers. */ int run_timers(long anow, long *next) { struct timer *first; init_timers(); #ifdef TIMING_SYNC /* * In this ifdef I put some code which deals with the * possibility that `anow' disagrees with GETTICKCOUNT by a * significant margin. Our strategy for dealing with it differs * depending on platform, because on some platforms * GETTICKCOUNT is more likely to be right whereas on others * `anow' is a better gold standard. */ { long tnow = GETTICKCOUNT(); if (tnow + TICKSPERSEC/50 - anow < 0 || anow + TICKSPERSEC/50 - tnow < 0 ) { #if defined TIMING_SYNC_ANOW /* * If anow is accurate and the tick count is wrong, * this is likely to be because the tick count is * derived from the system clock which has changed (as * can occur on Unix). Therefore, we resolve this by * inventing an offset which is used to adjust all * future output from GETTICKCOUNT. * * A platform which defines TIMING_SYNC_ANOW is * expected to have also defined this offset variable * in (its platform-specific adjunct to) putty.h. * Therefore we can simply reference it here and assume * that it will exist. */ tickcount_offset += anow - tnow; #elif defined TIMING_SYNC_TICKCOUNT /* * If the tick count is more likely to be accurate, we * simply use that as our time value, which may mean we * run no timers in this call (because we got called * early), or alternatively it may mean we run lots of * timers in a hurry because we were called late. */ anow = tnow; #else /* * Any platform which defines TIMING_SYNC must also define one of the two * auxiliary symbols TIMING_SYNC_ANOW and TIMING_SYNC_TICKCOUNT, to * indicate which measurement to trust when the two disagree. */ #error TIMING_SYNC definition incomplete #endif } } #endif now = anow; while (1) { first = (struct timer *)index234(timers, 0); if (!first) return FALSE; /* no timers remaining */ if (find234(timer_contexts, first->ctx, NULL) == NULL) { /* * This timer belongs to a context that has been * expired. Delete it without running. */ delpos234(timers, 0); sfree(first); } else if (first->now - now <= 0) { /* * This timer is active and has reached its running * time. Run it. */ delpos234(timers, 0); first->fn(first->ctx, first->now); sfree(first); } else { /* * This is the first still-active timer that is in the * future. Return how long it has yet to go. */ *next = first->now; return TRUE; } } }
/** * return communication pipe with the worker that will process the message for * the id 'sid', or -1 if error * - jwl : pointer to the workers list * - sid : id of the entity (connection to Jabber - usually SHOULD be FROM * header of the incoming SIP message) * - p : will point to the SHM location of the 'sid' in jwl */ int xj_wlist_get(xj_wlist jwl, xj_jkey jkey, xj_jkey *p) { int i = 0, pos = -1, min = 100000; xj_jkey msid = NULL; if(jwl==NULL || jkey==NULL || jkey->id==NULL || jkey->id->s==NULL) return -1; *p = NULL; while(i < jwl->len) { lock_set_get(jwl->sems, i); if(jwl->workers[i].pid <= 0) { lock_set_release(jwl->sems, i); i++; continue; } if((*p = find234(jwl->workers[i].sip_ids, (void*)jkey, NULL))!=NULL) { if(pos >= 0) lock_set_release(jwl->sems, pos); lock_set_release(jwl->sems, i); #ifdef XJ_EXTRA_DEBUG LM_DBG("entry already exists for <%.*s> in the" " pool of <%d> [%d]\n",jkey->id->len, jkey->id->s, jwl->workers[i].pid,i); #endif return jwl->workers[i].wpipe; } if(min > jwl->workers[i].nr) { if(pos >= 0) lock_set_release(jwl->sems, pos); pos = i; min = jwl->workers[i].nr; } else lock_set_release(jwl->sems, i); i++; } if(pos >= 0 && jwl->workers[pos].nr < jwl->maxj) { jwl->workers[pos].nr++; msid = (xj_jkey)_M_SHM_MALLOC(sizeof(t_xj_jkey)); if(msid == NULL) goto error; msid->id = (str*)_M_SHM_MALLOC(sizeof(str)); if(msid->id == NULL) { _M_SHM_FREE(msid); goto error; } msid->id->s = (char*)_M_SHM_MALLOC(jkey->id->len); if(msid->id == NULL) { _M_SHM_FREE(msid->id); _M_SHM_FREE(msid); goto error; } if((*p = add234(jwl->workers[pos].sip_ids, msid)) != NULL) { msid->id->len = jkey->id->len; memcpy(msid->id->s, jkey->id->s, jkey->id->len); msid->hash = jkey->hash; msid->flag = XJ_FLAG_OPEN; lock_set_release(jwl->sems, pos); #ifdef XJ_EXTRA_DEBUG LM_DBG("new entry for <%.*s> in the pool of" " <%d> - [%d]\n", jkey->id->len, jkey->id->s, jwl->workers[pos].pid, pos); #endif return jwl->workers[pos].wpipe; } _M_SHM_FREE(msid->id->s); _M_SHM_FREE(msid->id); _M_SHM_FREE(msid); } error: if(pos >= 0) lock_set_release(jwl->sems, pos); LM_DBG("cannot create a new entry for <%.*s>\n", jkey->id->len, jkey->id->s); return -1; }
void handle_got_event(HANDLE event) { struct handle *h; assert(handles_by_evtomain); h = find234(handles_by_evtomain, &event, handle_find_evtomain); if (!h) { /* * This isn't an error condition. If two or more event * objects were signalled during the same select operation, * and processing of the first caused the second handle to * be closed, then it will sometimes happen that we receive * an event notification here for a handle which is already * deceased. In that situation we simply do nothing. */ return; } if (h->u.g.moribund) { /* * A moribund handle is already treated as dead from the * external user's point of view, so do nothing with the * actual event. Just signal the thread to die if * necessary, or destroy the handle if not. */ if (h->u.g.done) { handle_destroy(h); } else { h->u.g.done = TRUE; h->u.g.busy = TRUE; SetEvent(h->u.g.ev_from_main); } return; } if (!h->output) { int backlog; h->u.i.busy = FALSE; /* * A signal on an input handle means data has arrived. */ if (h->u.i.len == 0) { /* * EOF, or (nearly equivalently) read error. */ h->u.i.gotdata(h, NULL, -h->u.i.readerr); h->u.i.defunct = TRUE; } else { backlog = h->u.i.gotdata(h, h->u.i.buffer, h->u.i.len); handle_throttle(&h->u.i, backlog); } } else { h->u.o.busy = FALSE; /* * A signal on an output handle means we have completed a * write. Call the callback to indicate that the output * buffer size has decreased, or to indicate an error. */ if (h->u.o.writeerr) { /* * Write error. Send a negative value to the callback, * and mark the thread as defunct (because the output * thread is terminating by now). */ h->u.o.sentdata(h, -h->u.o.writeerr); h->u.o.defunct = TRUE; } else { bufchain_consume(&h->u.o.queued_data, h->u.o.lenwritten); h->u.o.sentdata(h, bufchain_size(&h->u.o.queued_data)); handle_try_output(&h->u.o); } } }
/* * Back-end utility: find the indextag with a given name. */ indextag *index_findtag(indexdata *idx, wchar_t *name) { return find234(idx->tags, name, compare_to_find_tag); }