void spawner_new_c::setup_stream_(const options_class::redirect redirect, std_stream_type source_type, runner* this_runner) { auto source_pipe = this_runner->get_pipe(source_type); if (redirect.type == options_class::std) { if (source_type == std_stream_input) { get_std(std_stream_input, redirect.flags)->connect(source_pipe); } else { source_pipe->connect(get_std(source_type, redirect.flags)); } return; } PANIC_IF(redirect.type != options_class::pipe); auto index = redirect.pipe_index; auto stream = redirect.name; PANIC_IF(index < 0 || index >= runners.size()); auto target_runner = runners[index]; multipipe_ptr target_pipe; if (stream == "stdin") { PANIC_IF(source_type == std_stream_input); target_pipe = target_runner->get_pipe(std_stream_input, redirect.flags); source_pipe->connect(target_pipe); } else if (stream == "stdout") { PANIC_IF(source_type != std_stream_input); target_pipe = target_runner->get_pipe(std_stream_output, redirect.flags); target_pipe->connect(source_pipe); } else if (stream == "stderr") { PANIC_IF(source_type != std_stream_input); target_pipe = target_runner->get_pipe(std_stream_error, redirect.flags); target_pipe->connect(source_pipe); } else { PANIC("invalid stream name"); } if (control_mode_enabled) { if (source_type == std_stream_output) { setup_stream_in_control_mode_(this_runner, source_pipe); } else if (stream == "stdout") { setup_stream_in_control_mode_(target_runner, target_pipe); } } }
void send_duration_info(int duration) { DurationMsg dmsg; dmsg.duration = duration; ppack(get_pipe(), CK_MSG_DURATION, (CheckMsg *) & dmsg); }
void send_failure_info(const char *msg) { FailMsg fmsg; fmsg.msg = (char *) msg; ppack(fileno(get_pipe()), CK_MSG_FAIL, (CheckMsg *) &fmsg); }
TestResult * receive_test_result (int waserror) { FILE *fp; RcvMsg *rmsg; TestResult *result; fp = get_pipe (); if (fp == NULL) { eprintf ("Error in call to get_pipe", __FILE__, __LINE__ - 2); } rewind (fp); rmsg = punpack (fp); if (rmsg == NULL) { eprintf ("Error in call to punpack", __FILE__, __LINE__ - 4); } teardown_pipe (); setup_pipe (); result = construct_test_result (rmsg, waserror); rcvmsg_free (rmsg); return result; }
void send_ctx_info(enum ck_result_ctx ctx) { CtxMsg cmsg; cmsg.ctx = ctx; ppack(fileno(get_pipe()), CK_MSG_CTX, (CheckMsg *) &cmsg); }
void send_failure_info(const char *msg) { FailMsg fmsg; fmsg.msg = strdup(msg); ppack(get_pipe(), CK_MSG_FAIL, (CheckMsg *) & fmsg); free(fmsg.msg); }
void send_loc_info(const char * file, int line) { LocMsg lmsg; lmsg.file = (char *) file; lmsg.line = line; ppack(fileno(get_pipe()), CK_MSG_LOC, (CheckMsg *) &lmsg); }
void send_loc_info(const char *file, int line) { LocMsg lmsg; lmsg.file = strdup(file); lmsg.line = line; ppack(get_pipe(), CK_MSG_LOC, (CheckMsg *) & lmsg); free(lmsg.file); }
bool http_response::get_body(string& out, const char* to_charset /* = NULL */) { if (header_ok_ == false) { logger_error("header not read yet"); return false; } else if (client_->body_length() == 0) return true; else if (client_->body_length() < 0) { const char* method = client_->request_method(); if (method && (strcmp(method, "GET") == 0 || strcmp(method, "CONNECT") == 0)) { return true; } logger_error("client request body length(%d) invalid", (int) client_->body_length()); return false; } http_pipe* hp = get_pipe(to_charset); if (hp) { pipe_string ps(out); hp->append(&ps); } string buf; int ret; // 读 HTTP 请求体 while (true) { ret = client_->read_body(buf); if (ret < 0) { close(); break; } else if (ret == 0) break; if (hp) hp->update(buf.c_str(), ret); else out.append(buf); } if (hp) { hp->update_end(); delete hp; } return true; }
/* pubid : recver; user = sender */ void *get_pipe_strict(char *pubid, USERS *user, acetables *g_ape) { transpipe *pipe = get_pipe(pubid, g_ape); if (pipe != NULL && pipe->type == CHANNEL_PIPE && !isonchannel(user, pipe->pipe)) { return NULL; } return pipe; }
/* to manage subuser use post_to_pipe() instead */ void post_raw_pipe(RAW *raw, char *pipe, acetables *g_ape) { transpipe *spipe; if ((spipe = get_pipe(pipe, g_ape)) != NULL) { if (spipe->type == CHANNEL_PIPE) { post_raw_channel(raw, spipe->pipe, g_ape); } else { post_raw(raw, spipe->pipe, g_ape); } } }
USERS *seek_user_simple(const char *pubid, acetables *g_ape) { transpipe *gpipe; gpipe = get_pipe(pubid, g_ape); if (gpipe == NULL || gpipe->type != USER_PIPE) { return NULL; } return gpipe->pipe; }
CHANNEL *getchanbypubid(const char *pubid, acetables *g_ape) { transpipe *gpipe; gpipe = get_pipe(pubid, g_ape); if (gpipe == NULL || gpipe->type != CHANNEL_PIPE) { return NULL; } return gpipe->pipe; }
void spawner_new_c::process_agent_message_(const std::string& message, int agent_index) { std::string mod_message = std::to_string(agent_index) + "#" + message; auto runner = runners[agent_to_runner_index_(agent_index)]; runner->get_pipe(std_stream_output)->write(mod_message.c_str(), mod_message.size()); wait_agent_mutex_.lock(); if (awaited_agents_[agent_index - 1]) { awaited_agents_[agent_index - 1] = false; runner->suspend(); static_cast<secure_runner*>(runner)->prolong_time_limits(); } else { // it hasn't been waited for, but sent a message. what do? } wait_agent_mutex_.unlock(); }
/* Post raw to a proxy and propagate it to all of it's attached users */ void proxy_post_raw(RAW *raw, ape_proxy *proxy, acetables *g_ape) { ape_proxy_pipe *to = proxy->to; transpipe *pipe; while (to != NULL) { pipe = get_pipe(to->pipe, g_ape); if (pipe != NULL && pipe->type == USER_PIPE) { post_raw(raw, pipe->pipe, g_ape); } else { ;// } to = to->next; } }
ape_proxy *proxy_are_linked(char *pubid, char *pubid_proxy, acetables *g_ape) { transpipe *pipe = get_pipe(pubid_proxy, g_ape); struct _ape_proxy_pipe *ppipe; if (pipe == NULL || pipe->type != PROXY_PIPE || ((ppipe = ((ape_proxy *)(pipe->pipe))->to) == NULL)) { return NULL; } while (ppipe != NULL) { if (strcmp(pubid, ppipe->pipe) == 0) { return ((ape_proxy *)(pipe->pipe)); } ppipe = ppipe->next; } return NULL; }
void proxy_attach(ape_proxy *proxy, char *pipe, int allow_write, acetables *g_ape) { ape_proxy_pipe *to; transpipe *gpipe; if (proxy == NULL || ((gpipe = get_pipe(pipe, g_ape)) == NULL)) { return; } to = xmalloc(sizeof(*to)); memcpy(to->pipe, gpipe->pubid, strlen(gpipe->pubid)+1); to->allow_write = allow_write; to->next = proxy->to; proxy->to = to; proxy->nlink++; link_pipe(gpipe, proxy->pipe, proxy_detach); }
/* Returns : * -1 if splice is not possible or not possible anymore and we must switch to * user-land copy (eg: to_forward reached) * 0 when we know that polling is required to get more data (EAGAIN) * 1 for all other cases (we can safely try again, or if an activity has been * detected (DATA/NULL/ERR)) * Sets : * BF_READ_NULL * BF_READ_PARTIAL * BF_WRITE_PARTIAL (during copy) * BF_OUT_EMPTY (during copy) * SI_FL_ERR * SI_FL_WAIT_ROOM * (SI_FL_WAIT_RECV) * * This function automatically allocates a pipe from the pipe pool. It also * carefully ensures to clear b->pipe whenever it leaves the pipe empty. */ static int stream_sock_splice_in(struct buffer *b, struct stream_interface *si) { static int splice_detects_close; int fd = si->fd; int ret; unsigned long max; int retval = 1; if (!b->to_forward) return -1; if (!(b->flags & BF_KERN_SPLICING)) return -1; if (b->l) { /* We're embarrassed, there are already data pending in * the buffer and we don't want to have them at two * locations at a time. Let's indicate we need some * place and ask the consumer to hurry. */ si->flags |= SI_FL_WAIT_ROOM; EV_FD_CLR(fd, DIR_RD); b->rex = TICK_ETERNITY; b->cons->chk_snd(b->cons); return 1; } if (unlikely(b->pipe == NULL)) { if (pipes_used >= global.maxpipes || !(b->pipe = get_pipe())) { b->flags &= ~BF_KERN_SPLICING; return -1; } } /* At this point, b->pipe is valid */ while (1) { if (b->to_forward == BUF_INFINITE_FORWARD) max = MAX_SPLICE_AT_ONCE; else max = b->to_forward; if (!max) { /* It looks like the buffer + the pipe already contain * the maximum amount of data to be transferred. Try to * send those data immediately on the other side if it * is currently waiting. */ retval = -1; /* end of forwarding */ break; } ret = splice(fd, NULL, b->pipe->prod, NULL, max, SPLICE_F_MOVE|SPLICE_F_NONBLOCK); if (ret <= 0) { if (ret == 0) { /* connection closed. This is only detected by * recent kernels (>= 2.6.27.13). If we notice * it works, we store the info for later use. */ splice_detects_close = 1; b->flags |= BF_READ_NULL; retval = 1; /* no need for further polling */ break; } if (errno == EAGAIN) { /* there are two reasons for EAGAIN : * - nothing in the socket buffer (standard) * - pipe is full * - the connection is closed (kernel < 2.6.27.13) * Since we don't know if pipe is full, we'll * stop if the pipe is not empty. Anyway, we * will almost always fill/empty the pipe. */ if (b->pipe->data) { si->flags |= SI_FL_WAIT_ROOM; retval = 1; break; } /* We don't know if the connection was closed, * but if we know splice detects close, then we * know it for sure. * But if we're called upon POLLIN with an empty * pipe and get EAGAIN, it is suspect enough to * try to fall back to the normal recv scheme * which will be able to deal with the situation. */ if (splice_detects_close) retval = 0; /* we know for sure that it's EAGAIN */ else retval = -1; break; } if (errno == ENOSYS || errno == EINVAL) { /* splice not supported on this end, disable it */ b->flags &= ~BF_KERN_SPLICING; si->flags &= ~SI_FL_CAP_SPLICE; put_pipe(b->pipe); b->pipe = NULL; return -1; } /* here we have another error */ si->flags |= SI_FL_ERR; retval = 1; break; } /* ret <= 0 */ if (b->to_forward != BUF_INFINITE_FORWARD) b->to_forward -= ret; b->total += ret; b->pipe->data += ret; b->flags |= BF_READ_PARTIAL; b->flags &= ~BF_OUT_EMPTY; if (b->pipe->data >= SPLICE_FULL_HINT || ret >= global.tune.recv_enough) { /* We've read enough of it for this time. */ retval = 1; break; } } /* while */ if (unlikely(!b->pipe->data)) { put_pipe(b->pipe); b->pipe = NULL; } return retval; }
std::shared_ptr<input_pipe_c> runner::get_input_pipe() { return std::static_pointer_cast<input_pipe_c>(get_pipe(STD_INPUT_PIPE)); }
/* * Find a file on file_list. Outputs return a FILE*, while inputs return FIN*. */ PTR file_find(STRING * sval, int type) { PTR result = 0; FILE_NODE *p; FILE_NODE *q; char *name = sval->str; TRACE(("file_find(%s, %d)\n", name, type)); for (q = 0, p = file_list; p != 0; q = p, p = p->link) { /* search is by name and type */ if (strcmp(name, p->name->str) == 0 && (p->type == type || /* no distinction between F_APPEND and F_TRUNC here */ (p->type >= F_APPEND && type >= F_APPEND))) { if (q != 0) { /* delete from list for move to front */ q->link = p->link; } break; /* while loop */ } } if (!p) { /* open a new one */ p = alloc_filenode(); switch (p->type = (short) type) { case F_TRUNC: if (!(p->ptr = (PTR) tfopen(name, BinMode2("wb", "w")))) output_failed(name); break; case F_APPEND: if (!(p->ptr = (PTR) tfopen(name, BinMode2("ab", "a")))) output_failed(name); break; case F_IN: p->ptr = (PTR) FINopen(name, 0); break; case PIPE_OUT: case PIPE_IN: #if defined(HAVE_REAL_PIPES) || defined(HAVE_FAKE_PIPES) if (!(p->ptr = get_pipe(name, type, &p->pid))) { if (type == PIPE_OUT) output_failed(name); } #else rt_error("pipes not supported"); #endif break; #ifdef DEBUG default: bozo("bad file type"); #endif } } else if (p->ptr == 0 && type == F_IN) { p->ptr = (PTR) FINopen(name, 0); } /* put p at the front of the list */ if (p->ptr == 0) { free_filenode(p); } else { if (p != file_list) { p->link = file_list; file_list = p; } /* successful open */ p->name = sval; sval->ref_cnt++; TRACE(("-> %p\n", p->ptr)); result = p->ptr; } return result; }
bool http_response::get_body(xml& out, const char* to_charset /* = NULL */) { if (header_ok_ == false) { logger_error("header not read yet"); return false; } else if (client_->body_length() == 0) return true; else if (client_->body_length() < 0) { const char* method = client_->request_method(); if (method && (strcmp(method, "GET") == 0 || strcmp(method, "CONNECT") == 0)) { return true; } logger_error("client request body length(%d) invalid", (int) client_->body_length()); return false; } if (debug_) client_->print_header("----request---"); http_pipe* hp = get_pipe(to_charset); if (hp) hp->append(&out); string buf; int ret; while (true) { // 循环读取客户端请求数据体 ret = client_->read_body(buf); if (ret == 0) break; if (ret < 0) { logger_error("read client body error"); close(); return false; } // 流式分析 xml 格式的数据体 if (hp) hp->update(buf.c_str(), ret); else out.update(buf.c_str()); if (debug_) printf("%s", buf.c_str()); } if (hp) { hp->update_end(); delete hp; } return true; }
static void stub_recv_cmd_submit(struct stub_device *sdev, struct usbip_header *pdu) { int ret; struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = interface_to_usbdev(sdev->interface); int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); priv = stub_priv_alloc(sdev, pdu); if (!priv) return; /* setup a urb */ if (usb_pipeisoc(pipe)) priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets, GFP_KERNEL); else priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!priv->urb) { uerr("malloc urb\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } /* set priv->urb->transfer_buffer */ if (pdu->u.cmd_submit.transfer_buffer_length > 0) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); if (!priv->urb->transfer_buffer) { uerr("malloc x_buff\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } } /* set priv->urb->setup_packet */ priv->urb->setup_packet = kzalloc(8, GFP_KERNEL); if (!priv->urb->setup_packet) { uerr("allocate setup_packet\n"); usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } memcpy(priv->urb->setup_packet, &pdu->u.cmd_submit.setup, 8); /* set other members from the base header of pdu */ priv->urb->context = (void *) priv; priv->urb->dev = udev; priv->urb->pipe = pipe; priv->urb->complete = stub_complete; usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0); if (usbip_recv_xbuff(ud, priv->urb) < 0) return; if (usbip_recv_iso(ud, priv->urb) < 0) return; /* no need to submit an intercepted request, but harmless? */ tweak_special_requests(priv->urb); /* urb is now ready to submit */ ret = usb_submit_urb(priv->urb, GFP_KERNEL); if (ret == 0) dbg_stub_rx("submit urb ok, seqnum %u\n", pdu->base.seqnum); else { uerr("submit_urb error, %d\n", ret); usbip_dump_header(pdu); usbip_dump_urb(priv->urb); /* * Pessimistic. * This connection will be discarded. */ usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); } dbg_stub_rx("Leave\n"); return; }
std::shared_ptr<output_pipe_c> base_runner::get_output_pipe() { return std::static_pointer_cast<output_pipe_c>(get_pipe(STD_INPUT_PIPE)); }
static int proc_err_get(lua_State *L) { lua_apr_proc *process = proc_check(L, 1); return get_pipe(L, process->handle.err, "err_parent"); }
void render_all_pipes(GameData* data, DrawConfig* config){ for(int i=0; i < get_num_pipes(data); i++){ Pipe pipe = get_pipe(data,i); render_pipe(data,config,&pipe); } }
/* * This is the callback which is called by the connection layer to receive data * into the buffer from the connection. It iterates over the transport layer's * rcv_buf function. */ static void si_conn_recv_cb(struct connection *conn) { struct stream_interface *si = conn->owner; struct channel *chn = si->ib; int ret, max, cur_read; int read_poll = MAX_READ_POLL_LOOPS; /* stop immediately on errors. Note that we DON'T want to stop on * POLL_ERR, as the poller might report a write error while there * are still data available in the recv buffer. This typically * happens when we send too large a request to a backend server * which rejects it before reading it all. */ if (conn->flags & CO_FL_ERROR) return; /* stop here if we reached the end of data */ if (conn_data_read0_pending(conn)) goto out_shutdown_r; /* maybe we were called immediately after an asynchronous shutr */ if (chn->flags & CF_SHUTR) return; cur_read = 0; if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !chn->buf->o && global.tune.idle_timer && (unsigned short)(now_ms - chn->last_read) >= global.tune.idle_timer) { /* The buffer was empty and nothing was transferred for more * than one second. This was caused by a pause and not by * congestion. Reset any streaming mode to reduce latency. */ chn->xfer_small = 0; chn->xfer_large = 0; chn->flags &= ~(CF_STREAMER | CF_STREAMER_FAST); } /* First, let's see if we may splice data across the channel without * using a buffer. */ if (conn->xprt->rcv_pipe && (chn->pipe || chn->to_forward >= MIN_SPLICE_FORWARD) && chn->flags & CF_KERN_SPLICING) { if (buffer_not_empty(chn->buf)) { /* We're embarrassed, there are already data pending in * the buffer and we don't want to have them at two * locations at a time. Let's indicate we need some * place and ask the consumer to hurry. */ goto abort_splice; } if (unlikely(chn->pipe == NULL)) { if (pipes_used >= global.maxpipes || !(chn->pipe = get_pipe())) { chn->flags &= ~CF_KERN_SPLICING; goto abort_splice; } } ret = conn->xprt->rcv_pipe(conn, chn->pipe, chn->to_forward); if (ret < 0) { /* splice not supported on this end, let's disable it */ chn->flags &= ~CF_KERN_SPLICING; goto abort_splice; } if (ret > 0) { if (chn->to_forward != CHN_INFINITE_FORWARD) chn->to_forward -= ret; chn->total += ret; cur_read += ret; chn->flags |= CF_READ_PARTIAL; } if (conn_data_read0_pending(conn)) goto out_shutdown_r; if (conn->flags & CO_FL_ERROR) return; if (conn->flags & CO_FL_WAIT_ROOM) { /* the pipe is full or we have read enough data that it * could soon be full. Let's stop before needing to poll. */ si->flags |= SI_FL_WAIT_ROOM; __conn_data_stop_recv(conn); } /* splice not possible (anymore), let's go on on standard copy */ } abort_splice: if (chn->pipe && unlikely(!chn->pipe->data)) { put_pipe(chn->pipe); chn->pipe = NULL; } /* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling * was enabled, which implies that the recv buffer was not full. So we have a guarantee * that if such an event is not handled above in splice, it will be handled here by * recv(). */ while (!(conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_DATA_RD_SH | CO_FL_WAIT_ROOM | CO_FL_HANDSHAKE))) { max = bi_avail(chn); if (!max) { si->flags |= SI_FL_WAIT_ROOM; break; } ret = conn->xprt->rcv_buf(conn, chn->buf, max); if (ret <= 0) break; cur_read += ret; /* if we're allowed to directly forward data, we must update ->o */ if (chn->to_forward && !(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) { unsigned long fwd = ret; if (chn->to_forward != CHN_INFINITE_FORWARD) { if (fwd > chn->to_forward) fwd = chn->to_forward; chn->to_forward -= fwd; } b_adv(chn->buf, fwd); } chn->flags |= CF_READ_PARTIAL; chn->total += ret; if (channel_full(chn)) { si->flags |= SI_FL_WAIT_ROOM; break; } if ((chn->flags & CF_READ_DONTWAIT) || --read_poll <= 0) { si->flags |= SI_FL_WAIT_ROOM; __conn_data_stop_recv(conn); break; } /* if too many bytes were missing from last read, it means that * it's pointless trying to read again because the system does * not have them in buffers. */ if (ret < max) { /* if a streamer has read few data, it may be because we * have exhausted system buffers. It's not worth trying * again. */ if (chn->flags & CF_STREAMER) break; /* if we read a large block smaller than what we requested, * it's almost certain we'll never get anything more. */ if (ret >= global.tune.recv_enough) break; } } /* while !flags */ if (conn->flags & CO_FL_ERROR) return; if (cur_read) { if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) && (cur_read <= chn->buf->size / 2)) { chn->xfer_large = 0; chn->xfer_small++; if (chn->xfer_small >= 3) { /* we have read less than half of the buffer in * one pass, and this happened at least 3 times. * This is definitely not a streamer. */ chn->flags &= ~(CF_STREAMER | CF_STREAMER_FAST); } else if (chn->xfer_small >= 2) { /* if the buffer has been at least half full twice, * we receive faster than we send, so at least it * is not a "fast streamer". */ chn->flags &= ~CF_STREAMER_FAST; } } else if (!(chn->flags & CF_STREAMER_FAST) && (cur_read >= chn->buf->size - global.tune.maxrewrite)) { /* we read a full buffer at once */ chn->xfer_small = 0; chn->xfer_large++; if (chn->xfer_large >= 3) { /* we call this buffer a fast streamer if it manages * to be filled in one call 3 consecutive times. */ chn->flags |= (CF_STREAMER | CF_STREAMER_FAST); } } else { chn->xfer_small = 0; chn->xfer_large = 0; } chn->last_read = now_ms; } if (conn_data_read0_pending(conn)) /* connection closed */ goto out_shutdown_r; return; out_shutdown_r: /* we received a shutdown */ chn->flags |= CF_READ_NULL; if (chn->flags & CF_AUTO_CLOSE) channel_shutw_now(chn); stream_sock_read0(si); conn_data_read0(conn); return; }