/* precondition: there must be something for us to write. */ static int write_vec(xcb_connection_t *c, struct iovec **vector, int *count) { int n; assert(!c->out.queue_len); n = writev(c->fd, *vector, *count); if(n < 0 && errno == EAGAIN) return 1; if(n <= 0) { _xcb_conn_shutdown(c); return 0; } for(; *count; --*count, ++*vector) { int cur = (*vector)->iov_len; if(cur > n) cur = n; (*vector)->iov_len -= cur; (*vector)->iov_base = (char *) (*vector)->iov_base + cur; n -= cur; if((*vector)->iov_len) break; } if(!*count) *vector = 0; assert(n == 0); return 1; }
/* precondition: there must be something for us to write. */ static int write_vec(xcb_connection_t *c, struct iovec **vector, int *count) { int n; assert(!c->out.queue_len); #ifdef _WIN32 int i = 0; int ret = 0,err = 0; struct iovec *vec; n = 0; /* Could use the WSASend win32 function for scatter/gather i/o but setting up the WSABUF struct from an iovec would require more work and I'm not sure of the benefit....works for now */ vec = *vector; while(i < *count) { ret = send(c->fd,vec->iov_base,vec->iov_len,0); if(ret == SOCKET_ERROR) { err = WSAGetLastError(); if(err == WSAEWOULDBLOCK) { return 1; } } n += ret; *vec++; i++; } #else n = writev(c->fd, *vector, *count); if(n < 0 && errno == EAGAIN) return 1; #endif /* _WIN32 */ if(n <= 0) { _xcb_conn_shutdown(c, XCB_CONN_ERROR); return 0; } for(; *count; --*count, ++*vector) { int cur = (*vector)->iov_len; if(cur > n) cur = n; (*vector)->iov_len -= cur; (*vector)->iov_base = (char *) (*vector)->iov_base + cur; n -= cur; if((*vector)->iov_len) break; } if(!*count) *vector = 0; assert(n == 0); return 1; }
int _xcb_conn_wait(xcb_connection_t *c, pthread_cond_t *cond, struct iovec **vector, int *count) { int ret; #if USE_POLL struct pollfd fd; #else fd_set rfds, wfds; #endif /* If the thing I should be doing is already being done, wait for it. */ if(count ? c->out.writing : c->in.reading) { pthread_cond_wait(cond, &c->iolock); return 1; } #if USE_POLL memset(&fd, 0, sizeof(fd)); fd.fd = c->fd; fd.events = POLLIN; #else FD_ZERO(&rfds); FD_SET(c->fd, &rfds); #endif ++c->in.reading; #if USE_POLL if(count) { fd.events |= POLLOUT; ++c->out.writing; } #else FD_ZERO(&wfds); if(count) { FD_SET(c->fd, &wfds); ++c->out.writing; } #endif pthread_mutex_unlock(&c->iolock); do { #if USE_POLL ret = poll(&fd, 1, -1); #else ret = select(c->fd + 1, &rfds, &wfds, 0, 0); #endif } while (ret == -1 && errno == EINTR); if(ret < 0) { _xcb_conn_shutdown(c); ret = 0; } pthread_mutex_lock(&c->iolock); if(ret) { #if USE_POLL if((fd.revents & POLLIN) == POLLIN) #else if(FD_ISSET(c->fd, &rfds)) #endif ret = ret && _xcb_in_read(c); #if USE_POLL if((fd.revents & POLLOUT) == POLLOUT) #else if(FD_ISSET(c->fd, &wfds)) #endif ret = ret && write_vec(c, vector, count); } if(count) --c->out.writing; --c->in.reading; return ret; }
unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req) { uint64_t request; uint32_t prefix[2]; int veclen = req->count; enum workarounds workaround = WORKAROUND_NONE; if(c->has_error) return 0; assert(c != 0); assert(vector != 0); assert(req->count > 0); if(!(flags & XCB_REQUEST_RAW)) { static const char pad[3]; unsigned int i; uint16_t shortlen = 0; size_t longlen = 0; assert(vector[0].iov_len >= 4); /* set the major opcode, and the minor opcode for extensions */ if(req->ext) { const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext); if(!(extension && extension->present)) { _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED); return 0; } ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode; ((uint8_t *) vector[0].iov_base)[1] = req->opcode; } else ((uint8_t *) vector[0].iov_base)[0] = req->opcode; /* put together the length field, possibly using BIGREQUESTS */ for(i = 0; i < req->count; ++i) { longlen += vector[i].iov_len; if(!vector[i].iov_base) { vector[i].iov_base = (char *) pad; assert(vector[i].iov_len <= sizeof(pad)); } } assert((longlen & 3) == 0); longlen >>= 2; if(longlen <= c->setup->maximum_request_length) { /* we don't need BIGREQUESTS. */ shortlen = longlen; longlen = 0; } else if(longlen > xcb_get_maximum_request_length(c)) { _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED); return 0; /* server can't take this; maybe need BIGREQUESTS? */ } /* set the length field. */ ((uint16_t *) vector[0].iov_base)[1] = shortlen; if(!shortlen) { prefix[0] = ((uint32_t *) vector[0].iov_base)[0]; prefix[1] = ++longlen; vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1; vector[0].iov_len -= sizeof(uint32_t); --vector, ++veclen; vector[0].iov_base = prefix; vector[0].iov_len = sizeof(prefix); } } flags &= ~XCB_REQUEST_RAW; /* do we need to work around the X server bug described in glx.xml? */ /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane * configuration, but that should be handled here anyway. */ if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") && ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) || req->opcode == 21)) workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG; /* get a sequence number and arrange for delivery. */ pthread_mutex_lock(&c->iolock); /* wait for other writing threads to get out of my way. */ while(c->out.writing) pthread_cond_wait(&c->out.cond, &c->iolock); get_socket_back(c); /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without * a reply. */ if(req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2) send_sync(c); /* Also send sync_req (could use NoOp) at 32-bit wrap to avoid having * applications see sequence 0 as that is used to indicate * an error in sending the request */ if((unsigned int) (c->out.request + 1) == 0) send_sync(c); /* The above send_sync calls could drop the I/O lock, but this * thread will still exclude any other thread that tries to write, * so the sequence number postconditions still hold. */ send_request(c, req->isvoid, workaround, flags, vector, veclen); request = c->has_error ? 0 : c->out.request; pthread_mutex_unlock(&c->iolock); return request; }