static void create_icf_block_hdr(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; struct level_2_buf *level_buf = (struct level_2_buf *)stream->level_buf; struct BitBuf2 *write_buf = &state->bitbuf; struct BitBuf2 write_buf_tmp; uint32_t out_size = stream->avail_out; uint8_t *end_out = stream->next_out + out_size; /* Write EOB in icf_buf */ state->hist.ll_hist[256] = 1; level_buf->icf_buf_next->lit_len = 0x100; level_buf->icf_buf_next->lit_dist = NULL_DIST_SYM; level_buf->icf_buf_next->dist_extra = 0; level_buf->icf_buf_next++; state->has_eob_hdr = stream->end_of_stream && !stream->avail_in; if (end_out - stream->next_out >= ISAL_DEF_MAX_HDR_SIZE) { /* Determine whether this is the final block */ if (stream->gzip_flag == IGZIP_GZIP) write_gzip_header_stateless(stream); set_buf(write_buf, stream->next_out, stream->avail_out); create_hufftables_icf(write_buf, &level_buf->encode_tables, &state->hist, state->has_eob_hdr); state->state = ZSTATE_FLUSH_ICF_BUFFER; stream->next_out = buffer_ptr(write_buf); stream->total_out += buffer_used(write_buf); stream->avail_out -= buffer_used(write_buf); } else { /* Start writing into temporary buffer */ write_buf_tmp.m_bits = write_buf->m_bits; write_buf_tmp.m_bit_count = write_buf->m_bit_count; write_buf->m_bits = 0; write_buf->m_bit_count = 0; set_buf(&write_buf_tmp, level_buf->deflate_hdr, ISAL_DEF_MAX_HDR_SIZE); create_hufftables_icf(&write_buf_tmp, &level_buf->encode_tables, &state->hist, state->has_eob_hdr); level_buf->deflate_hdr_count = buffer_used(&write_buf_tmp); level_buf->deflate_hdr_extra_bits = write_buf_tmp.m_bit_count; flush(&write_buf_tmp); state->state = ZSTATE_HDR; } }
static int write_deflate_header_stateless(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; struct isal_hufftables *hufftables = stream->hufftables; uint32_t count; if (hufftables->deflate_hdr_count + 8 >= stream->avail_out) return STATELESS_OVERFLOW; memcpy(stream->next_out, hufftables->deflate_hdr, hufftables->deflate_hdr_count); stream->avail_out -= hufftables->deflate_hdr_count; stream->total_out += hufftables->deflate_hdr_count; stream->next_out += hufftables->deflate_hdr_count; set_buf(&state->bitbuf, stream->next_out, stream->avail_out); write_bits(&state->bitbuf, hufftables->deflate_hdr[hufftables->deflate_hdr_count], hufftables->deflate_hdr_extra_bits); count = buffer_used(&state->bitbuf); stream->next_out = buffer_ptr(&state->bitbuf); stream->avail_out -= count; stream->total_out += count; return COMP_OK; }
static void flush_icf_block(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; struct level_2_buf *level_buf = (struct level_2_buf *)stream->level_buf; struct BitBuf2 *write_buf = &state->bitbuf; struct deflate_icf *icf_buf_encoded_next; set_buf(write_buf, stream->next_out, stream->avail_out); #if defined (USE_BITBUF8) || (USE_BITBUF_ELSE) if (!is_full(write_buf)) flush_bits(write_buf); #endif icf_buf_encoded_next = encode_deflate_icf(level_buf->icf_buf_start + state->count, level_buf->icf_buf_next, write_buf, &level_buf->encode_tables); state->count = icf_buf_encoded_next - level_buf->icf_buf_start; stream->next_out = buffer_ptr(write_buf); stream->total_out += buffer_used(write_buf); stream->avail_out -= buffer_used(write_buf); if (level_buf->icf_buf_next <= icf_buf_encoded_next) { state->count = 0; if (stream->avail_in == 0 && stream->end_of_stream) state->state = ZSTATE_TRL; else if (stream->avail_in == 0 && stream->flush != NO_FLUSH) state->state = ZSTATE_SYNC_FLUSH; else state->state = ZSTATE_NEW_HDR; } }
/* 线程工作函数 */ void *thread_fun(void *arg) { int ch = (int)arg; for (;;) set_buf((char)ch); // sleep(1); return NULL; }
void rope<ValueType, Policy>::iterator_base::set_cache(iterator_base &iter) { size_type pos(iter.current_pos); if (pos >= iter.root->size) { iter.buf_cur = 0; return; } std::array<node_ptr, Policy::max_rope_depth + 1> path; /* Bit vector marking right turns in the path. */ decltype(iter.path_directions) dirns; int cur_depth(-1); size_type cur_start_pos(0); auto cur_rope(iter.root); while (true) { ++cur_depth; path[cur_depth] = cur_rope; if (cur_rope->tag == rope_tag::concat) { auto c(node::concat::extra(cur_rope)); size_type left_len(c->left->size); dirns <<= 1; if (pos >= cur_start_pos + left_len) { dirns.set(0, true); cur_rope = c->right; cur_start_pos += left_len; } else cur_rope = c->left; } else { iter.leaf_pos = cur_start_pos; break; } } { int i(-1); int j(cur_depth + 1 - Policy::path_cache_len); if (j < 0) j = 0; while (j <= cur_depth) iter.path_end[++i] = path[j++]; iter.path_index = i; } iter.path_directions = dirns; set_buf(iter); }
static void flush_write_buffer(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; int bytes = 0; if (stream->avail_out >= 8) { set_buf(&state->bitbuf, stream->next_out, stream->avail_out); flush(&state->bitbuf); stream->next_out = buffer_ptr(&state->bitbuf); bytes = buffer_used(&state->bitbuf); stream->avail_out -= bytes; stream->total_out += bytes; state->state = ZSTATE_NEW_HDR; } }
static int write_deflate_header_unaligned_stateless(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; struct isal_hufftables *hufftables = stream->hufftables; unsigned int count; uint64_t bit_count; uint64_t *header_next; uint64_t *header_end; uint64_t header_bits; if (state->bitbuf.m_bit_count == 0) return write_deflate_header_stateless(stream); if (hufftables->deflate_hdr_count + 16 >= stream->avail_out) return STATELESS_OVERFLOW; set_buf(&state->bitbuf, stream->next_out, stream->avail_out); header_next = (uint64_t *) hufftables->deflate_hdr; header_end = header_next + hufftables->deflate_hdr_count / 8; header_bits = *header_next; if (stream->end_of_stream == 0) header_bits--; else state->has_eob_hdr = 1; header_next++; /* Write out Complete Header bits */ for (; header_next <= header_end; header_next++) { write_bits(&state->bitbuf, header_bits, 32); header_bits >>= 32; write_bits(&state->bitbuf, header_bits, 32); header_bits = *header_next; } bit_count = (hufftables->deflate_hdr_count & 0x7) * 8 + hufftables->deflate_hdr_extra_bits; if (bit_count > MAX_BITBUF_BIT_WRITE) { write_bits(&state->bitbuf, header_bits, MAX_BITBUF_BIT_WRITE); header_bits >>= MAX_BITBUF_BIT_WRITE; bit_count -= MAX_BITBUF_BIT_WRITE; }
static void sync_flush(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; uint64_t bits_to_write = 0xFFFF0000, bits_len; uint64_t code = 0, len = 0, bytes; int flush_size; if (stream->avail_out >= 8) { set_buf(&state->bitbuf, stream->next_out, stream->avail_out); if (!state->has_eob) get_lit_code(stream->hufftables, 256, &code, &len); flush_size = (-(state->bitbuf.m_bit_count + len + 3)) % 8; bits_to_write <<= flush_size + 3; bits_len = 32 + len + flush_size + 3; #ifdef USE_BITBUFB /* Write Bits Always */ state->state = ZSTATE_NEW_HDR; #else /* Not Write Bits Always */ state->state = ZSTATE_FLUSH_WRITE_BUFFER; #endif state->has_eob = 0; if (len > 0) bits_to_write = (bits_to_write << len) | code; write_bits(&state->bitbuf, bits_to_write, bits_len); bytes = buffer_used(&state->bitbuf); stream->next_out = buffer_ptr(&state->bitbuf); stream->avail_out -= bytes; stream->total_out += bytes; if (stream->flush == FULL_FLUSH) { /* Clear match history so there are no cross * block length distance pairs */ reset_match_history(stream); } } }
static void sync_flush(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; uint64_t bits_to_write = 0xFFFF0000, bits_len; uint64_t bytes; int flush_size; if (stream->avail_out >= 8) { set_buf(&state->bitbuf, stream->next_out, stream->avail_out); flush_size = (-(state->bitbuf.m_bit_count + 3)) % 8; bits_to_write <<= flush_size + 3; bits_len = 32 + flush_size + 3; #ifdef USE_BITBUFB /* Write Bits Always */ state->state = ZSTATE_NEW_HDR; #else /* Not Write Bits Always */ state->state = ZSTATE_FLUSH_WRITE_BUFFER; #endif state->has_eob = 0; write_bits(&state->bitbuf, bits_to_write, bits_len); bytes = buffer_used(&state->bitbuf); stream->next_out = buffer_ptr(&state->bitbuf); stream->avail_out -= bytes; stream->total_out += bytes; if (stream->flush == FULL_FLUSH) { /* Clear match history so there are no cross * block length distance pairs */ state->file_start -= state->b_bytes_processed; state->b_bytes_valid -= state->b_bytes_processed; state->b_bytes_processed = 0; reset_match_history(stream); } } }
static int write_deflate_header_stateless(struct isal_zstream *stream) { struct isal_zstate *state = &stream->internal_state; struct isal_hufftables *hufftables = stream->hufftables; uint64_t hdr_extra_bits = hufftables->deflate_hdr[hufftables->deflate_hdr_count]; uint32_t count; if (hufftables->deflate_hdr_count + 8 >= stream->avail_out) return STATELESS_OVERFLOW; memcpy(stream->next_out, hufftables->deflate_hdr, hufftables->deflate_hdr_count); if (stream->end_of_stream == 0) { if (hufftables->deflate_hdr_count > 0) *stream->next_out -= 1; else hdr_extra_bits -= 1; } else state->has_eob_hdr = 1; stream->avail_out -= hufftables->deflate_hdr_count; stream->total_out += hufftables->deflate_hdr_count; stream->next_out += hufftables->deflate_hdr_count; set_buf(&state->bitbuf, stream->next_out, stream->avail_out); write_bits(&state->bitbuf, hdr_extra_bits, hufftables->deflate_hdr_extra_bits); count = buffer_used(&state->bitbuf); stream->next_out = buffer_ptr(&state->bitbuf); stream->avail_out -= count; stream->total_out += count; state->state = ZSTATE_BODY; return COMP_OK; }
static void write_constant_compressed_stateless(struct isal_zstream *stream, uint32_t repeated_length) { /* Assumes repeated_length is at least 1. * Assumes the input end_of_stream is either 0 or 1. */ struct isal_zstate *state = &stream->internal_state; uint32_t rep_bits = ((repeated_length - 1) / 258) * 2; uint32_t rep_bytes = rep_bits / 8; uint32_t rep_extra = (repeated_length - 1) % 258; uint32_t bytes; uint32_t repeated_char = *stream->next_in; uint8_t *start_in = stream->next_in; /* Guarantee there is enough space for the header even in the worst case */ if (stream->avail_out < HEADER_LENGTH + MAX_FIXUP_CODE_LENGTH + rep_bytes + 8) return; /* Assumes the repeated char is either 0 or 0xFF. */ memcpy(stream->next_out, repeated_char_header[repeated_char & 1], HEADER_LENGTH); if (stream->avail_in == repeated_length && stream->end_of_stream > 0) { stream->next_out[0] |= 1; state->has_eob_hdr = 1; state->has_eob = 1; state->state = ZSTATE_TRL; } else { state->state = ZSTATE_NEW_HDR; } memset(stream->next_out + HEADER_LENGTH, 0, rep_bytes); stream->avail_out -= HEADER_LENGTH + rep_bytes; stream->next_out += HEADER_LENGTH + rep_bytes; stream->total_out += HEADER_LENGTH + rep_bytes; set_buf(&state->bitbuf, stream->next_out, stream->avail_out); /* These two lines are basically a modified version of init. */ state->bitbuf.m_bits = 0; state->bitbuf.m_bit_count = rep_bits % 8; /* Add smaller repeat codes as necessary. Code280 can describe repeat * lengths of 115-130 bits. Code10 can describe repeat lengths of 10 * bits. If more than 230 bits, fill code with two code280s. Else if * more than 115 repeates, fill with code10s until one code280 can * finish the rest of the repeats. Else, fill with code10s and * literals */ if (rep_extra > 115) { while (rep_extra > 130 && rep_extra < 230) { write_bits(&state->bitbuf, CODE_10, CODE_10_LENGTH); rep_extra -= 10; } if (rep_extra >= 230) { write_bits(&state->bitbuf, CODE_280 | ((rep_extra / 2 - 115) << CODE_280_LENGTH), CODE_280_TOTAL_LENGTH); rep_extra -= rep_extra / 2; } write_bits(&state->bitbuf, CODE_280 | ((rep_extra - 115) << CODE_280_LENGTH), CODE_280_TOTAL_LENGTH); } else { while (rep_extra >= 10) { write_bits(&state->bitbuf, CODE_10, CODE_10_LENGTH); rep_extra -= 10; } for (; rep_extra > 0; rep_extra--) write_bits(&state->bitbuf, CODE_LIT, CODE_LIT_LENGTH); } write_bits(&state->bitbuf, END_OF_BLOCK, END_OF_BLOCK_LEN); stream->next_in += repeated_length; stream->avail_in -= repeated_length; stream->total_in += repeated_length; bytes = buffer_used(&state->bitbuf); stream->next_out = buffer_ptr(&state->bitbuf); stream->avail_out -= bytes; stream->total_out += bytes; if (stream->gzip_flag) state->crc = crc32_gzip(state->crc, start_in, stream->next_in - start_in); return; }
static void initialize_binding(expression * e) { initialize(e->data.bind.val); set_buf(e, e->data.bind.val->buf); }
static void initialize_variable(expression * e) { set_buf(e, e->data.var.bind->data.bind.val->buf); }
int mount_nfs4(const char *source, const char *target, int mount_flags, const char *nfs_options) { typedef struct { const char *name; int *ptr; } num_opt_def_t; typedef struct { const char *name; int flag; } bool_opt_def_t; char p_options[MAX_LINE_LEN], *token, *saveptr, *opt_val, *endptr; long val; struct sockaddr_in server_addr = { 0 }; char ip_addr[16] = "127.0.0.1"; char hostname[MAX_LINE_LEN] = { 0 }; char mnt_path[MAX_LINE_LEN] = { 0 }; struct nfs4_mount_data data = { 0 }; int bg = 0, retry = -1; int auth_pseudoflavor = AUTH_UNIX; time_t timeout; int r; int dummy; int had_warning; num_opt_def_t num_opt_defs[] = { { "rsize", &data.rsize }, { "wsize", &data.wsize }, { "timeo", &data.timeo }, { "retrans", &data.retrans }, { "acregmin", &data.acregmin }, { "acregmax", &data.acregmax }, { "acdirmin", &data.acdirmin }, { "acdirmax", &data.acdirmax }, { "retry", &retry }, { "vers", &dummy }, { NULL, NULL } }; #define INVERTED 0x10000 bool_opt_def_t bool_opt_defs[] = { { "bg", 0 }, { "fg", INVERTED }, { "soft", NFS4_MOUNT_SOFT }, { "hard", NFS4_MOUNT_SOFT | INVERTED }, { "intr", NFS4_MOUNT_INTR }, { "cto", NFS4_MOUNT_NOCTO | INVERTED }, { "ac", NFS4_MOUNT_NOAC | INVERTED }, { "sharedcache", NFS4_MOUNT_UNSHARED | INVERTED }, { NULL, 0 } }; num_opt_def_t *num_opt_def; bool_opt_def_t *bool_opt_def; set_buf(p_options, MAX_LINE_LEN, nfs_options, NULL); data.retrans = 3; data.acregmin = 3; data.acregmax = 60; data.acdirmin = 30; data.acdirmax = 60; data.proto = IPPROTO_TCP; opt_val = strchr((char *)source, ':'); if (!opt_val) panic(0, "nfs mount: directory to mount not in host:dir format: ", source, NULL); strncpy(hostname, source, MIN(MAX_LINE_LEN - 1, opt_val - source)); set_buf(mnt_path, MAX_LINE_LEN, opt_val + 1, NULL); server_addr.sin_family = AF_INET; server_addr.sin_port = htons(NFS_PORT); if (!small_inet_aton(hostname, &server_addr.sin_addr)) panic(0, "nfs mount: only IP addresses supported for mounting NFS servers, got ", hostname, " instead.", NULL); for (token = strtok_r(p_options, ",", &saveptr); token != NULL; token = strtok_r(NULL, ",", &saveptr)) { opt_val = strchr(token, '='); if (opt_val) { *opt_val = '\0'; opt_val++; if (strcmp(token, "proto") == 0) { if (strcmp(opt_val, "tcp") == 0) data.proto = IPPROTO_TCP; else if (strcmp(opt_val, "udp") == 0) data.proto = IPPROTO_UDP; else panic(0, "nfs mount: invalid proto option specified (valid values are: tcp, udp)", NULL); continue; } else if (strcmp(token, "clientaddr") == 0) { /* FIXME */ panic(0, "nfs mount: clientaddr not supported yet", NULL); } else if (strcmp(token, "sec") == 0) { if (strcmp(opt_val + 1, "sys") != 0) panic(0, "nfs mount: only sec=sys is supported", NULL); continue; } if (!*opt_val) panic(0, "nfs mount: invalid empty option ", token, " specified", NULL); endptr = NULL; val = strtol(opt_val, &endptr, 10); if (!endptr || !*endptr) panic(0, "nfs mount: option ", token, " requires a number, got ", opt_val, " instead.", NULL); if (strcmp(token, "port") == 0) { server_addr.sin_port = htons((int)val); continue; } if (strcmp(token, "actimeo") == 0) { data.acregmin = data.acregmax = data.acdirmin = data.acdirmax = (int)val; continue; } for (num_opt_def = num_opt_defs; num_opt_def->name; num_opt_def++) { if (strcmp(token, num_opt_def->name) == 0) { *num_opt_def->ptr = (int)val; break; } } if (!num_opt_def->name) panic(0, "nfs mount: invalid option ", token, "=", opt_val, NULL); } else { val = 1; if (strncmp(token, "no", 2) == 0) { opt_val = token + 2; val = 0; } else { opt_val = token; } if (strcmp(opt_val, "bg") == 0) { bg = 1; } else if (strcmp(opt_val, "fg") == 0) { bg = 0; } else { for (bool_opt_def = bool_opt_defs; bool_opt_def->name; bool_opt_def++) { if (strcmp(opt_val, bool_opt_def->name) == 0) { /* != is logical XOR in C */ val = val != !!(bool_opt_def->flag & INVERTED); if (val) data.flags |= (bool_opt_def->flag & NFS4_MOUNT_FLAGMASK); else data.flags &= ~(bool_opt_def->flag & NFS4_MOUNT_FLAGMASK); break; } } if (!bool_opt_def->name) panic(0, "nfs mount: invalid option ", token, NULL); } } } if (bg) { warn("nfs mount: background mounts unsupported for / and /usr, defaulting to foreground", NULL); bg = 0; } if (retry == -1) retry = 2; data.auth_flavourlen = 1; data.auth_flavours = &auth_pseudoflavor; data.mnt_path.data = mnt_path; data.mnt_path.len = strlen(mnt_path); data.hostname.data = hostname; data.hostname.len = strlen(hostname); data.host_addr = (struct sockaddr *)&server_addr; data.host_addrlen = sizeof(server_addr); timeout = time(NULL) + 60 * retry; data.version = NFS4_MOUNT_VERSION; had_warning = 0; for (;;) { r = nfs4_ping(AF_INET, data.proto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, (struct sockaddr *)&server_addr, sizeof(server_addr), MOUNT_TIMEOUT, ip_addr, sizeof(ip_addr)); if (r == 0) break; if (time(NULL) >= timeout) { if (r < 0 && r != -ETIMEDOUT) panic(r, "nfs mount: failed to mount ", source, NULL); else panic(0, "nfs mount: timeout while trying to mount ", source, NULL); } if (!had_warning) { had_warning = 1; if (r >= 0) r = -ETIMEDOUT; warn("nfs mount: waiting for response from NFS server ", hostname, ": ", strerror(-r), NULL); } /* Wait a bit before retrying, otherwise we will flood the network... */ if (r < 0 && r != -ETIMEDOUT) sleep(1); } data.client_addr.data = ip_addr; data.client_addr.len = strlen(ip_addr); r = mount(source, target, "nfs4", mount_flags, &data); if (r < 0) return -errno; return r; }
int nfs4_ping(int domain, int type, struct sockaddr *dest, socklen_t dest_len, int timeout, char *ip_addr, size_t ip_addr_len) { /* So we don't really want to implement the whole RPC protocol * for NFSv4 (would be too much code), and since we need to do * a NULLPROC only anyway, where we know how the request and * response have to look like on a byte level, we just store * the packets here. If the response match, everything * succeeded. * * Also, we are going to blatantly assume that the NULLPROC * requests/responses are always going to fit into a single * RPC fragment. Otherwise, our code would get quite a bit * more complicated. */ char nullproc_request[] = { 0x80, 0x00, 0x00, 0x28, /* last fragment, fragment length: 40 */ 0x00, 0x00, 0x00, 0x00, /* xid, will be overwritten */ 0x00, 0x00, 0x00, 0x00, /* message type: call */ 0x00, 0x00, 0x00, 0x02, /* RPC Version: 2 */ 0x00, 0x01, 0x86, 0xa3, /* NFS */ 0x00, 0x00, 0x00, 0x04, /* Version 4 */ 0x00, 0x00, 0x00, 0x00, /* NULLPROC */ 0x00, 0x00, 0x00, 0x00, /* NULL credentials */ 0x00, 0x00, 0x00, 0x00, /* (length 0) */ 0x00, 0x00, 0x00, 0x00, /* NULL verifier */ 0x00, 0x00, 0x00, 0x00 /* (length 0) */ }; char nullproc_expected_response[] = { 0x80, 0x00, 0x00, 0x18, /* last fragment, fragment length: 24 */ 0x00, 0x00, 0x00, 0x00, /* xid, will be overwritten */ 0x00, 0x00, 0x00, 0x01, /* message type: reply */ 0x00, 0x00, 0x00, 0x00, /* reply state: accepted */ 0x00, 0x00, 0x00, 0x00, /* NULL verifier */ 0x00, 0x00, 0x00, 0x00, /* (length 0) */ 0x00, 0x00, 0x00, 0x00 /* accept state: RPC executed successfully */ }; char nullproc_response[sizeof(nullproc_expected_response)]; int sock_fd, r; ssize_t bytes; enum { WAIT_FOR_CONNECT, WAIT_FOR_SEND, WAIT_FOR_RECEIVE, DONE } state = WAIT_FOR_CONNECT; struct pollfd poll_fd; int timeout_msec = timeout * 1000; int pos = 0; size_t msg_start; socklen_t len; union { char buf[256]; struct sockaddr_in in; } client_addr; socklen_t client_addr_len = sizeof(client_addr); /* get some random data for xid * (we don't care about the entropy pool state, * as we don't pretend that sec=sys NFSv4 is at * all cryptographically safe) */ { int urandom_fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC); if (urandom_fd < 0) return -errno; r = read(urandom_fd, nullproc_request + 4, 4); if (r != 4) { r = -errno; close(urandom_fd); return r; } close(urandom_fd); /* copy xid so we are sure that we get something matching * back */ memcpy(nullproc_expected_response + 4, nullproc_request + 4, 4); } sock_fd = socket(domain, type | SOCK_CLOEXEC, 0); if (sock_fd < 0) return -errno; r = fcntl(sock_fd, F_GETFL); if (r < 0) goto error_out; r = fcntl(sock_fd, F_SETFL, r | O_NONBLOCK); if (r < 0) goto error_out; if (type == SOCK_DGRAM) { state = WAIT_FOR_SEND; msg_start = 4; } else { msg_start = 0; r = connect(sock_fd, dest, dest_len); if (r < 0 && errno != EINPROGRESS && errno != EWOULDBLOCK) goto error_out; } while (state != DONE) { poll_fd.fd = sock_fd; poll_fd.events = (state == WAIT_FOR_RECEIVE ? POLLIN : POLLOUT); poll_fd.revents = 0; r = poll(&poll_fd, 1, timeout_msec); if (r == 0) { errno = ETIMEDOUT; goto error_out; } switch (state) { case WAIT_FOR_CONNECT: len = sizeof(errno); r = getsockopt(sock_fd, SOL_SOCKET, SO_ERROR, &errno, &len); if (r < 0 || errno != 0) goto error_out; state = WAIT_FOR_SEND; break; case WAIT_FOR_SEND: /* UDP doesn't have fragment length */ if (type == SOCK_DGRAM) bytes = sendto(sock_fd, nullproc_request + 4, sizeof(nullproc_request) - 4, 0, dest, dest_len); else bytes = send(sock_fd, nullproc_request, sizeof(nullproc_request), 0); if (bytes != (int)sizeof(nullproc_request) - (type == SOCK_DGRAM) * 4) { if (bytes >= 0) errno = EMSGSIZE; goto error_out; } state = WAIT_FOR_RECEIVE; pos = 0; break; case WAIT_FOR_RECEIVE: if (type == SOCK_DGRAM) { /* UDP doesn't have fragment length */ bytes = recvfrom(sock_fd, nullproc_response + 4, sizeof(nullproc_response) - 4, 0, dest, &dest_len); if (bytes != (int)sizeof(nullproc_response) - 4) { if (bytes >= 0) errno = -1; /* unexpected response */ goto error_out; } state = DONE; } else { bytes = recv(sock_fd, &nullproc_response[pos], sizeof(nullproc_response) - pos, 0); if (bytes <= 0) { if (bytes == 0) errno = -1; /* unexpected response */ goto error_out; } if (bytes < (int)sizeof(nullproc_response) - pos) { pos += bytes; continue; } state = DONE; } case DONE: break; } } /* We had a successful response from the server * at this point */ r = getsockname(sock_fd, (struct sockaddr *)&client_addr, &client_addr_len); if (r < 0) r = -errno; close(sock_fd); if (r < 0) return r; /* Compare the response to the expected response */ r = memcmp(&nullproc_expected_response[msg_start], &nullproc_response[msg_start], sizeof(nullproc_response) - msg_start); if (r == 0 && ip_addr) { /* Write string representation of client address to ip_addr */ *ip_addr = '\0'; if (domain == AF_INET) set_buf(ip_addr, ip_addr_len, small_inet_ntoa(client_addr.in.sin_addr), NULL); } return r; error_out: r = -errno; close(sock_fd); return r; }
/* 线程工作函数 */ void *thread_fun(void *arg) { int ch = (int)arg; for (;;) set_buf((char)ch); }