int context_continue(Context * ctx) { int signal = 0; assert(is_dispatch_thread()); assert(ctx->stopped); assert(!ctx->pending_intercept); assert(!EXT(ctx)->pending_step); assert(!ctx->exited); if (skip_breakpoint(ctx, 0)) return 0; if (!EXT(ctx)->ptrace_event) { while (ctx->pending_signals != 0) { while ((ctx->pending_signals & (1 << signal)) == 0) signal++; if (ctx->sig_dont_pass & (1 << signal)) { ctx->pending_signals &= ~(1 << signal); signal = 0; } else { break; } } assert(signal != SIGSTOP); assert(signal != SIGTRAP); } trace(LOG_CONTEXT, "context: resuming ctx %#lx, id %s, with signal %d", ctx, ctx->id, signal); if (EXT(ctx)->regs_dirty) { if (ptrace(PTRACE_SETREGS, EXT(ctx)->pid, 0, (int)EXT(ctx)->regs) < 0) { int err = errno; if (err == ESRCH) { EXT(ctx)->regs_dirty = 0; send_context_started_event(ctx); return 0; } trace(LOG_ALWAYS, "error: ptrace(PTRACE_SETREGS) failed: ctx %#lx, id %s, error %d %s", ctx, ctx->id, err, errno_to_str(err)); errno = err; return -1; } EXT(ctx)->regs_dirty = 0; } if (ptrace(PTRACE_CONT, EXT(ctx)->pid, 0, signal) < 0) { int err = errno; if (err == ESRCH) { send_context_started_event(ctx); return 0; } trace(LOG_ALWAYS, "error: ptrace(PTRACE_CONT, ...) failed: ctx %#lx, id %s, error %d %s", ctx, ctx->id, err, errno_to_str(err)); errno = err; return -1; } ctx->pending_signals &= ~(1 << signal); send_context_started_event(ctx); return 0; }
void check_error(int error) { if (error == 0) return; #if ENABLE_Trace trace(LOG_ALWAYS, "Fatal error %d: %s", error, errno_to_str(error)); trace(LOG_ALWAYS, " Exiting agent..."); if (log_file == stderr) exit(1); #endif fprintf(stderr, "Fatal error %d: %s", error, errno_to_str(error)); fprintf(stderr, " Exiting agent..."); exit(1); }
static void run_cache_client(int retry) { Trap trap; unsigned i; unsigned id = current_client.id; void * args_copy = NULL; assert(id != 0); current_cache = NULL; cache_miss_cnt = 0; def_channel = NULL; if (current_client.args_copy) args_copy = current_client.args; for (i = 0; i < listeners_cnt; i++) listeners[i](retry ? CTLE_RETRY : CTLE_START); if (set_trap(&trap)) { current_client.client(current_client.args); clear_trap(&trap); assert(current_client.id == 0); assert(cache_miss_cnt == 0); } else if (id != current_client.id) { trace(LOG_ALWAYS, "Unhandled exception in data cache client: %s", errno_to_str(trap.error)); assert(current_client.id == 0); assert(cache_miss_cnt == 0); } else { if (get_error_code(trap.error) != ERR_CACHE_MISS || cache_miss_cnt == 0 || current_cache == NULL) { trace(LOG_ALWAYS, "Unhandled exception in data cache client: %s", errno_to_str(trap.error)); for (i = 0; i < listeners_cnt; i++) listeners[i](CTLE_COMMIT); } else { AbstractCache * cache = current_cache; if (cache->wait_list_cnt >= cache->wait_list_max) { cache->wait_list_max += 8; cache->wait_list_buf = (WaitingCacheClient *)loc_realloc(cache->wait_list_buf, cache->wait_list_max * sizeof(WaitingCacheClient)); } if (current_client.args != NULL && !current_client.args_copy) { void * mem = loc_alloc(current_client.args_size); memcpy(mem, current_client.args, current_client.args_size); current_client.args = mem; current_client.args_copy = 1; } if (cache->wait_list_cnt == 0) list_add_last(&cache->link, &cache_list); if (current_client.channel != NULL) channel_lock_with_msg(current_client.channel, channel_lock_msg); cache->wait_list_buf[cache->wait_list_cnt++] = current_client; for (i = 0; i < listeners_cnt; i++) listeners[i](CTLE_ABORT); args_copy = NULL; } memset(¤t_client, 0, sizeof(current_client)); current_cache = NULL; cache_miss_cnt = 0; def_channel = NULL; } if (args_copy != NULL) loc_free(args_copy); }
void check_error_debug(const char * file, int line, int error) { if (error == 0) return; #if ENABLE_Trace trace(LOG_ALWAYS, "Fatal error %d: %s", error, errno_to_str(error)); trace(LOG_ALWAYS, " At %s:%d", file, line); trace(LOG_ALWAYS, " Exiting agent..."); if (log_file == stderr) exit(1); #endif fprintf(stderr, "Fatal error %d: %s\n", error, errno_to_str(error)); fprintf(stderr, " At %s:%d\n", file, line); fprintf(stderr, " Exiting agent...\n"); exit(1); }
static void np_channel_read_done(void * x) { AsyncReqInfo * req = (AsyncReqInfo *)x; ChannelNP * c = (ChannelNP *)req->client_data; ssize_t len = 0; assert(is_dispatch_thread()); assert(c->magic == CHANNEL_MAGIC); assert(c->read_pending != 0); assert(c->lock_cnt > 0); loc_free(req->u.user.data); c->read_pending = 0; /* some data is available retrieve it */ { len = c->rd_req.u.user.rval; if (req->error) { if (c->chan.state != ChannelStateDisconnected) { trace(LOG_ALWAYS, "Can't read from socket: %s", errno_to_str(req->error)); } len = 0; /* Treat error as EOF */ } } if (c->chan.state != ChannelStateDisconnected) { ibuf_read_done(&c->ibuf, len); } else if (len > 0) { np_post_read(&c->ibuf, c->ibuf.buf, c->ibuf.buf_size); } else { np_unlock(&c->chan); } }
static int certificate_verify_callback(int preverify_ok, X509_STORE_CTX * ctx) { char fnm[FILE_PATH_SIZE]; DIR * dir = NULL; int err = 0; int found = 0; snprintf(fnm, sizeof(fnm), "%s/ssl", tcf_dir); if (!err && (dir = opendir(fnm)) == NULL) err = errno; while (!err && !found) { int l = 0; X509 * cert = NULL; FILE * fp = NULL; struct dirent * ent = readdir(dir); if (ent == NULL) break; l = strlen(ent->d_name); if (l < 5 || strcmp(ent->d_name + l -5 , ".cert") != 0) continue; snprintf(fnm, sizeof(fnm), "%s/ssl/%s", tcf_dir, ent->d_name); if (!err && (fp = fopen(fnm, "r")) == NULL) err = errno; if (!err && (cert = PEM_read_X509(fp, NULL, NULL, NULL)) == NULL) err = set_ssl_errno(); if (!err && fclose(fp) != 0) err = errno; if (!err && X509_cmp(X509_STORE_CTX_get_current_cert(ctx), cert) == 0) found = 1; } if (dir != NULL && closedir(dir) < 0 && !err) err = errno; if (err) trace(LOG_ALWAYS, "Cannot read certificate %s: %s", fnm, errno_to_str(err)); else if (!found) trace(LOG_ALWAYS, "Authentication failure: invalid certificate"); return err == 0 && found; }
void virtual_stream_create(const char * type, const char * context_id, size_t buf_len, unsigned access, VirtualStreamCallBack * callback, void * callback_args, VirtualStream ** res) { LINK * l; VirtualStream * stream = loc_alloc_zero(sizeof(VirtualStream)); buf_len++; list_init(&stream->clients); strncpy(stream->type, type, sizeof(stream->type) - 1); stream->magic = STREAM_MAGIC; stream->id = id_cnt++; stream->access = access; stream->callback = callback; stream->callback_args = callback_args; stream->ref_cnt = 1; stream->buf = loc_alloc(buf_len); stream->buf_len = buf_len; for (l = subscriptions.next; l != &subscriptions; l = l->next) { Subscription * h = all2subscription(l); if (strcmp(type, h->type) == 0) { Trap trap; create_client(stream, h->channel); if (set_trap(&trap)) { send_event_stream_created(&h->channel->out, stream, context_id); clear_trap(&trap); } else { trace(LOG_ALWAYS, "Exception sending stream created event: %d %s", trap.error, errno_to_str(trap.error)); } } } list_add_first(&stream->link_all, &streams); *res = stream; }
int context_read_mem(Context * ctx, ContextAddress address, void * buf, size_t size) { ContextAddress word_addr; unsigned word_size = context_word_size(ctx); assert(is_dispatch_thread()); assert(!ctx->exited); trace(LOG_CONTEXT, "context: read memory ctx %#lx, id %s, address %#lx, size %zu", ctx, ctx->id, address, size); assert(word_size <= sizeof(unsigned long)); for (word_addr = address & ~((ContextAddress)word_size - 1); word_addr < address + size; word_addr += word_size) { unsigned long word = 0; errno = 0; word = ptrace(PTRACE_PEEKDATA, EXT(ctx)->pid, (char *)word_addr, 0); if (errno != 0) { int err = errno; trace(LOG_CONTEXT, "error: ptrace(PTRACE_PEEKDATA, ...) failed: ctx %#lx, id %s, addr %#lx, error %d %s", ctx, ctx->id, word_addr, err, errno_to_str(err)); errno = err; return -1; } if (word_addr < address || word_addr + word_size > address + size) { size_t i; for (i = 0; i < word_size; i++) { if (word_addr + i >= address && word_addr + i < address + size) { ((char *)buf)[word_addr + i - address] = ((char *)&word)[i]; } } } else { memcpy((char *)buf + (word_addr - address), &word, word_size); } } return check_breakpoints_on_memory_read(ctx, address, buf, size); }
int context_attach(pid_t pid, ContextAttachCallBack * done, void * data, int mode) { Context * ctx = NULL; assert(done != NULL); trace(LOG_CONTEXT, "context: attaching pid %d", pid); if ((mode & CONTEXT_ATTACH_SELF) == 0 && ptrace(PT_ATTACH, pid, 0, 0) < 0) { int err = errno; trace(LOG_ALWAYS, "error: ptrace(PT_ATTACH) failed: pid %d, error %d %s", pid, err, errno_to_str(err)); errno = err; return -1; } add_waitpid_process(pid); ctx = create_context(pid2id(pid, 0)); ctx->mem = ctx; ctx->mem_access |= MEM_ACCESS_INSTRUCTION; ctx->mem_access |= MEM_ACCESS_DATA; ctx->mem_access |= MEM_ACCESS_USER; ctx->big_endian = big_endian_host(); EXT(ctx)->pid = pid; EXT(ctx)->attach_callback = done; EXT(ctx)->attach_data = data; list_add_first(&ctx->ctxl, &pending_list); /* TODO: context_attach works only for main task in a process */ return 0; }
static void refresh_peer_server(int sock, PeerServer * ps) { unsigned i; const char * transport = peer_server_getprop(ps, "TransportName", NULL); assert(transport != NULL); if (strcmp(transport, "UNIX") == 0) { char str_id[64]; PeerServer * ps2 = peer_server_alloc(); ps2->flags = ps->flags | PS_FLAG_LOCAL | PS_FLAG_DISCOVERABLE; for (i = 0; i < ps->ind; i++) { peer_server_addprop(ps2, loc_strdup(ps->list[i].name), loc_strdup(ps->list[i].value)); } snprintf(str_id, sizeof(str_id), "%s:%s", transport, peer_server_getprop(ps, "Host", "")); for (i = 0; str_id[i]; i++) { /* Character '/' is prohibited in a peer ID string */ if (str_id[i] == '/') str_id[i] = '|'; } peer_server_addprop(ps2, loc_strdup("ID"), loc_strdup(str_id)); peer_server_add(ps2, PEER_DATA_RETENTION_PERIOD * 2); } else { struct sockaddr_in sin; #if defined(_WRS_KERNEL) int sinlen; #else socklen_t sinlen; #endif const char *str_port = peer_server_getprop(ps, "Port", NULL); int ifcind; struct in_addr src_addr; ip_ifc_info ifclist[MAX_IFC]; sinlen = sizeof sin; if (getsockname(sock, (struct sockaddr *)&sin, &sinlen) != 0) { trace(LOG_ALWAYS, "refresh_peer_server: getsockname error: %s", errno_to_str(errno)); return; } ifcind = build_ifclist(sock, MAX_IFC, ifclist); while (ifcind-- > 0) { char str_host[64]; char str_id[64]; PeerServer * ps2; if (sin.sin_addr.s_addr != INADDR_ANY && (ifclist[ifcind].addr & ifclist[ifcind].mask) != (sin.sin_addr.s_addr & ifclist[ifcind].mask)) { continue; } src_addr.s_addr = ifclist[ifcind].addr; ps2 = peer_server_alloc(); ps2->flags = ps->flags | PS_FLAG_LOCAL | PS_FLAG_DISCOVERABLE; for (i = 0; i < ps->ind; i++) { peer_server_addprop(ps2, loc_strdup(ps->list[i].name), loc_strdup(ps->list[i].value)); } inet_ntop(AF_INET, &src_addr, str_host, sizeof(str_host)); snprintf(str_id, sizeof(str_id), "%s:%s:%s", transport, str_host, str_port); peer_server_addprop(ps2, loc_strdup("ID"), loc_strdup(str_id)); peer_server_addprop(ps2, loc_strdup("Host"), loc_strdup(str_host)); peer_server_addprop(ps2, loc_strdup("Port"), loc_strdup(str_port)); peer_server_add(ps2, PEER_DATA_RETENTION_PERIOD * 2); } } }
static void channel_send_command_cb(Channel * c, void * client_data, int error) { struct channel_extra *ce = (struct channel_extra *)c->client_data; struct command_extra *cmd = (struct command_extra *)client_data; lua_State *L = ce->L; InputStream * inp = &c->inp; luaL_Buffer msg; int ch; lua_rawgeti(L, LUA_REGISTRYINDEX, cmd->result_cbrefp->ref); if(!error) { luaL_buffinit(L, &msg); while((ch = read_stream(inp)) >= 0) { luaL_addchar(&msg, ch); } luaL_pushresult(&msg); lua_pushnil(L); trace(LOG_LUA, "lua_channel_send_command_reply %p %d %s", c, cmd->result_cbrefp->ref, lua_tostring(L, -2)); } else { lua_pushnil(L); lua_pushstring(L, errno_to_str(error)); trace(LOG_LUA, "lua_channel_send_command_reply %p %d error %d", c, cmd->result_cbrefp->ref, error); } if(lua_pcall(L, 2, 0, 0) != 0) { fprintf(stderr, "%s\n", lua_tostring(L,1)); exit(1); } luaref_owner_free(L, cmd); }
static void lua_channel_connect_cb(void * client_data, int error, Channel * c) { struct channel_extra *ce = (struct channel_extra *)client_data; lua_State *L = ce->L; trace(LOG_LUA, "lua_channel_connect_cb %p %d", c, error); assert(ce->connect_cbrefp != NULL); lua_rawgeti(L, LUA_REGISTRYINDEX, ce->connect_cbrefp->ref); if(!error) { assert(c != NULL); ce->c = c; c->client_data = ce; c->protocol = ce->pe->p; c->connecting = channel_connecting; c->connected = channel_connected; c->receive = channel_receive; c->disconnected = channel_disconnected; lua_rawgeti(L, LUA_REGISTRYINDEX, ce->self_refp->ref); lua_pushnil(L); } else { assert(c == NULL); luaref_owner_free(L, ce); lua_pushnil(L); lua_pushstring(L, errno_to_str(error)); } if(lua_pcall(L, 2, 0, 0) != 0) { fprintf(stderr, "%s\n", lua_tostring(L,1)); exit(1); } }
static void write_process_input_done(void * x) { AsyncReqInfo * req = x; ProcessInput * inp = (ProcessInput *)req->client_data; inp->req_posted = 0; if (inp->prs == NULL) { /* Process has exited */ virtual_stream_delete(inp->vstream); loc_free(inp); } else { int wr = inp->req.u.fio.rval; if (wr < 0) { int err = inp->req.error; trace(LOG_ALWAYS, "Can't write process input stream: %d %s", err, errno_to_str(err)); inp->buf_pos = inp->buf_len = 0; } else { inp->buf_pos += wr; } process_input_streams_callback(inp->vstream, 0, inp); } }
static void handle_channel_msg(void * x) { Trap trap; ChannelNP * c = (ChannelNP *)x; int has_msg; assert(is_dispatch_thread()); assert(c->magic == CHANNEL_MAGIC); assert(c->ibuf.handling_msg == HandleMsgTriggered); assert(c->ibuf.message_count); has_msg = ibuf_start_message(&c->ibuf); if (has_msg <= 0) { if (has_msg < 0 && c->chan.state != ChannelStateDisconnected) { trace(LOG_PROTOCOL, "Socket is shutdown by remote peer, channel %#lx %s", c, c->chan.peer_name); channel_close(&c->chan); } } else if (set_trap(&trap)) { if (c->chan.receive) { c->chan.receive(&c->chan); } else { handle_protocol_message(&c->chan); assert(c->out_bin_block == NULL); } clear_trap(&trap); } else { trace(LOG_ALWAYS, "Exception in message handler: %s", errno_to_str(trap.error)); send_eof_and_close(&c->chan, trap.error); } }
static void tcp_flush_with_flags(ChannelTCP * c, int flags) { unsigned char * p = c->obuf; assert(is_dispatch_thread()); assert(c->magic == CHANNEL_MAGIC); assert(c->chan.out.end == p + sizeof(c->obuf)); assert(c->out_bin_block == NULL); assert(c->chan.out.cur >= p); assert(c->chan.out.cur <= p + sizeof(c->obuf)); if (c->chan.out.cur == p) return; if (c->chan.state != ChannelStateDisconnected && c->out_errno == 0) { #if ENABLE_OutputQueue c->out_queue.post_io_request = post_write_request; output_queue_add(&c->out_queue, p, c->chan.out.cur - p); #else assert(c->ssl == NULL); while (p < c->chan.out.cur) { size_t sz = c->chan.out.cur - p; ssize_t wr = send(c->socket, p, sz, flags); if (wr < 0) { int err = errno; trace(LOG_PROTOCOL, "Can't send() on channel %#lx: %s", c, errno_to_str(err)); c->out_errno = err; c->chan.out.cur = c->obuf; return; } p += wr; } assert(p == c->chan.out.cur); #endif } c->chan.out.cur = c->obuf; }
uint64_t timestamp_ms() { timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) { roc_panic("clock_gettime(CLOCK_MONOTONIC): %s", errno_to_str().c_str()); } return uint64_t(ts.tv_sec * 1000 + ts.tv_nsec / 1000000); }
static void tcp_flush_with_flags(OutputStream * out, int flags) { int cnt = 0; ChannelTCP * c = channel2tcp(out2channel(out)); assert(is_dispatch_thread()); assert(c->magic == CHANNEL_MAGIC); assert(c->obuf_inp <= BUF_SIZE); if (c->obuf_inp == 0) return; if (c->socket < 0 || c->out_errno) { c->obuf_inp = 0; return; } while (cnt < c->obuf_inp) { int wr = 0; if (c->ssl) { #if ENABLE_SSL wr = SSL_write(c->ssl, c->obuf + cnt, c->obuf_inp - cnt); if (wr <= 0) { int err = SSL_get_error(c->ssl, wr); if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) { struct timeval tv; fd_set readfds; fd_set writefds; fd_set errorfds; FD_ZERO(&readfds); FD_ZERO(&writefds); FD_ZERO(&errorfds); if (err == SSL_ERROR_WANT_READ) FD_SET(c->socket, &readfds); if (err == SSL_ERROR_WANT_WRITE) FD_SET(c->socket, &writefds); FD_SET(c->socket, &errorfds); tv.tv_sec = 10L; tv.tv_usec = 0; if (select(c->socket + 1, &readfds, &writefds, &errorfds, &tv) >= 0) continue; } trace(LOG_PROTOCOL, "Can't SSL_write() on channel %#lx: %s", c, ERR_error_string(ERR_get_error(), NULL)); c->out_errno = EIO; c->obuf_inp = 0; return; } #else assert(0); #endif } else { wr = send(c->socket, c->obuf + cnt, c->obuf_inp - cnt, flags); if (wr < 0) { int err = errno; trace(LOG_PROTOCOL, "Can't send() on channel %#lx: %d %s", c, err, errno_to_str(err)); c->out_errno = err; c->obuf_inp = 0; return; } } cnt += wr; } assert(cnt == c->obuf_inp); c->obuf_inp = 0; }
int context_attach_self(void) { if (ptrace(PT_TRACE_ME, 0, 0, 0) < 0) { int err = errno; trace(LOG_ALWAYS, "error: ptrace(PT_TRACE_ME) failed: pid %d, error %d %s", getpid(), err, errno_to_str(err)); errno = err; return -1; } return 0; }
static void read_stream_done(Channel *c, void *client_data, int error) { PortConnection * conn = ((PortReadInfo *) client_data)->conn; int idx = ((PortReadInfo *) client_data)->idx; size_t read_size = 0; conn->pending_read_request &= ~(1 << idx); if (error) { trace(LOG_ALWAYS, "Reply error %d: %s\n", error, errno_to_str(error)); read_packet_callback(conn, error, idx, 0); } else { int end; InputStream *inp = &conn->server->channel->inp; int ch = peek_stream(inp); if (ch == 'n') { (void) read_stream(inp); if (read_stream(inp) != 'u') goto err_json_syntax; if (read_stream(inp) != 'l') goto err_json_syntax; if (read_stream(inp) != 'l') goto err_json_syntax; } else { JsonReadBinaryState state; json_read_binary_start(&state, inp); for (;;) { size_t rd = json_read_binary_data(&state, conn->read_buffer[idx] + read_size, sizeof conn->read_buffer[idx]); if (rd == 0) break; read_size += rd; } assert(state.size_start <= 0 || read_size == state.size_start); json_read_binary_end(&state); } json_test_char(&c->inp, MARKER_EOA); error = read_errno(inp); (void)json_read_long(inp); if (read_stream(inp) != 0) goto err_json_syntax; end = json_read_boolean(inp); json_test_char(&c->inp, MARKER_EOA); json_test_char(&c->inp, MARKER_EOM); #if 0 if (read_stream(inp) != 0 || read_stream(inp) != MARKER_EOM) goto err_json_syntax; #endif if (end) read_packet_callback(conn, 0, idx, 0); else read_packet_callback(conn, 0, idx, read_size); } return; err_json_syntax: return; }
void async_req_post(AsyncReqInfo * req) { WorkerThread * wt; trace(LOG_ASYNCREQ, "async_req_post: req %p, type %d", req, req->type); #if ENABLE_AIO { int res = 0; switch (req->type) { case AsyncReqSeekRead: case AsyncReqSeekWrite: memset(&req->u.fio.aio, 0, sizeof(req->u.fio.aio)); req->u.fio.aio.aio_fildes = req->u.fio.fd; req->u.fio.aio.aio_offset = req->u.fio.offset; req->u.fio.aio.aio_buf = req->u.fio.bufp; req->u.fio.aio.aio_nbytes = req->u.fio.bufsz; req->u.fio.aio.aio_sigevent.sigev_notify = SIGEV_THREAD; req->u.fio.aio.aio_sigevent.sigev_notify_function = aio_done; req->u.fio.aio.aio_sigevent.sigev_value.sival_ptr = req; res = req->type == AsyncReqSeekWrite ? aio_write(&req->u.fio.aio) : aio_read(&req->u.fio.aio); if (res < 0) { req->u.fio.rval = -1; req->error = errno; post_event(req->done, req); } return; } } #endif check_error(pthread_mutex_lock(&wtlock)); if (list_is_empty(&wtlist)) { int error; wt = loc_alloc_zero(sizeof *wt); check_error(pthread_cond_init(&wt->cond, NULL)); wt->req = req; error = pthread_create(&wt->thread, &pthread_create_attr, worker_thread_handler, wt); if (error) { trace(LOG_ALWAYS, "Can't create a worker thread: %d %s", error, errno_to_str(error)); loc_free(wt); req->error = error; post_event(req->done, req); } } else { wt = wtlink2wt(wtlist.next); list_remove(&wt->wtlink); assert(wt->req == NULL); wt->req = req; check_error(pthread_cond_signal(&wt->cond)); } check_error(pthread_mutex_unlock(&wtlock)); }
int context_single_step(Context * ctx) { assert(is_dispatch_thread()); assert(context_has_state(ctx)); assert(ctx->stopped); assert(!ctx->exited); assert(!EXT(ctx)->pending_step); if (skip_breakpoint(ctx, 1)) return 0; trace(LOG_CONTEXT, "context: single step ctx %#lx, id %s", ctx, ctx->id); if (EXT(ctx)->regs_dirty) { if (ptrace(PTRACE_SETREGS, EXT(ctx)->pid, 0, (int)EXT(ctx)->regs) < 0) { int err = errno; if (err == ESRCH) { EXT(ctx)->regs_dirty = 0; EXT(ctx)->pending_step = 1; send_context_started_event(ctx); return 0; } trace(LOG_ALWAYS, "error: ptrace(PTRACE_SETREGS) failed: ctx %#lx, id %s, error %d %s", ctx, ctx->id, err, errno_to_str(err)); errno = err; return -1; } EXT(ctx)->regs_dirty = 0; } if (ptrace(PTRACE_SINGLESTEP, EXT(ctx)->pid, 0, 0) < 0) { int err = errno; if (err == ESRCH) { EXT(ctx)->pending_step = 1; send_context_started_event(ctx); return 0; } trace(LOG_ALWAYS, "error: ptrace(PTRACE_SINGLESTEP, ...) failed: ctx %#lx, id %s, error %d %s", ctx, ctx->id, err, errno_to_str(err)); errno = err; return -1; } EXT(ctx)->pending_step = 1; send_context_started_event(ctx); return 0; }
void exception(int error) { assert(is_dispatch_thread()); assert(error != 0); if (chain == NULL) { trace(LOG_ALWAYS, "Unhandled exception %d: %s.", error, errno_to_str(error)); exit(error); } errno = chain->error = error; longjmp(chain->env, 1); }
void sleep_for_ms(uint64_t ms) { timespec ts; ts.tv_sec = ms / 1000; ts.tv_nsec = ms % 1000 * 1000000; while (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL) == -1) { if (errno != EINTR) { roc_panic("clock_nanosleep(CLOCK_MONOTONIC): %s", errno_to_str().c_str()); } } }
static void post_write_request(OutputBuffer * bf) { ChannelTCP * c = obuf2tcp(bf->queue); assert(c->socket >= 0); c->wr_req.client_data = c; c->wr_req.done = done_write_request; #if ENABLE_SSL if (c->ssl) { int wr = SSL_write(c->ssl, bf->buf + bf->buf_pos, bf->buf_len - bf->buf_pos); if (wr <= 0) { int err = SSL_get_error(c->ssl, wr); if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) { c->wr_req.type = AsyncReqSelect; c->wr_req.u.select.nfds = c->socket + 1; FD_ZERO(&c->wr_req.u.select.readfds); FD_ZERO(&c->wr_req.u.select.writefds); FD_ZERO(&c->wr_req.u.select.errorfds); if (err == SSL_ERROR_WANT_WRITE) FD_SETX(c->socket, &c->wr_req.u.select.writefds); if (err == SSL_ERROR_WANT_READ) FD_SETX(c->socket, &c->wr_req.u.select.readfds); FD_SETX(c->socket, &c->wr_req.u.select.errorfds); c->wr_req.u.select.timeout.tv_sec = 10; async_req_post(&c->wr_req); } else { int error = set_ssl_errno(); c->chan.out.cur = c->obuf; trace(LOG_PROTOCOL, "Can't SSL_write() on channel %#lx: %s", c, errno_to_str(error)); c->wr_req.type = AsyncReqSend; c->wr_req.error = error; c->wr_req.u.sio.rval = -1; post_event(done_write_request, &c->wr_req); } } else { c->wr_req.type = AsyncReqSend; c->wr_req.error = 0; c->wr_req.u.sio.rval = wr; post_event(done_write_request, &c->wr_req); } } else #endif { c->wr_req.type = AsyncReqSend; c->wr_req.u.sio.sock = c->socket; c->wr_req.u.sio.bufp = bf->buf + bf->buf_pos; c->wr_req.u.sio.bufsz = bf->buf_len - bf->buf_pos; c->wr_req.u.sio.flags = c->out_queue.queue.next == c->out_queue.queue.prev ? 0 : MSG_MORE; async_req_post(&c->wr_req); } tcp_lock(&c->chan); }
static void set_socket_options(int sock) { int snd_buf = OUT_BUF_SIZE; int rcv_buf = IN_BUF_SIZE; struct linger optval; int i = 1; /* * set SO_LINGER & SO_REUSEADDR socket options so that it closes the * connections gracefully, when required to close. */ optval.l_onoff = 1; optval.l_linger = 0; if (setsockopt(sock, SOL_SOCKET, SO_LINGER, (void *) &optval, sizeof(optval)) != 0) { int error = errno; trace(LOG_ALWAYS, "Unable to set SO_LINGER socket option: %s", errno_to_str(error)); }; #if !(defined(_WIN32) || defined(__CYGWIN__)) { const int i = 1; if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *) &i, sizeof(i)) < 0) { int error = errno; trace(LOG_ALWAYS, "Unable to set SO_REUSEADDR socket option: ", errno_to_str(error)); } } #endif /* Set TCP_NODELAY socket option to optimize communication */ i = 1; if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &i, sizeof(i)) < 0) { int error = errno; trace(LOG_ALWAYS, "Can't set TCP_NODELAY option on a socket: %s", errno_to_str(error)); } i = 1; if (setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &i, sizeof(i)) < 0) { int error = errno; trace(LOG_ALWAYS, "Can't set SO_KEEPALIVE option on a socket: %s", errno_to_str(error)); } if (setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *) &snd_buf, sizeof(snd_buf)) < 0) { trace(LOG_ALWAYS, "setsockopt(SOL_SOCKET,SO_SNDBUF,%d) error: %s", snd_buf, errno_to_str(errno)); } if (setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char *) &rcv_buf, sizeof(rcv_buf)) < 0) { trace(LOG_ALWAYS, "setsockopt(SOL_SOCKET,SO_RCVBUF,%d) error: %s", rcv_buf, errno_to_str(errno)); } }
void str_exception(int error, char * msg) { assert(is_dispatch_thread()); assert(error != 0); if (chain == NULL) { trace(LOG_ALWAYS, "Unhandled exception %d: %s:\n %s", error, errno_to_str(error), msg); exit(error); } strncpy(chain->msg, msg, sizeof(chain->msg) - 1); chain->msg[sizeof(chain->msg) - 1] = 0; set_exception_errno(error, msg); longjmp(chain->env, error); }
static void np_server_accept_done(void * x) { AsyncReqInfo * req = (AsyncReqInfo *)x; ServerNP * si = (ServerNP *)req->client_data; if (si->sock < 0) { /* Server closed. */ loc_free(si); return; } if (req->error) { trace(LOG_ALWAYS, "Socket accept failed: %s", errno_to_str(req->error)); } else { ChannelNP * c = create_channel(si->np_sock, si->is_ssl, 1); if (c == NULL) { trace(LOG_ALWAYS, "Cannot create channel for accepted connection: %s", errno_to_str(errno)); closesocket(req->u.acc.rval); } else { struct sockaddr * addr_buf; /* Socket remote address */ socklen_t addr_len; #if defined(SOCK_MAXADDRLEN) addr_len = SOCK_MAXADDRLEN; #else addr_len = 0x1000; #endif addr_buf = (struct sockaddr *)loc_alloc(addr_len); if (getpeername(nopoll_conn_socket (si->np_sock), addr_buf, &addr_len) < 0) { trace(LOG_ALWAYS, "Unable to get peer remote name: %s", errno_to_str(errno)); closesocket(req->u.acc.rval); } else { set_peer_addr(c, addr_buf, addr_len); si->serv.new_conn(&si->serv, &c->chan); } loc_free(addr_buf); } } async_req_post(req); }
int context_stop(Context * ctx) { ContextExtensionVxWorks * ext = EXT(ctx); struct event_info * info; VXDBG_CTX vxdbg_ctx; assert(is_dispatch_thread()); assert(ctx->parent != NULL); assert(!ctx->stopped); assert(!ctx->exited); assert(!ext->regs_dirty); if (ctx->pending_intercept) { trace(LOG_CONTEXT, "context: stop ctx %#lx, id %#x", ctx, ext->pid); } else { trace(LOG_CONTEXT, "context: temporary stop ctx %#lx, id %#x", ctx, ext->pid); } taskLock(); if (taskIsStopped(ext->pid)) { /* Workaround for situation when a task was stopped without notifying TCF agent */ int n = 0; SPIN_LOCK_ISR_TAKE(&events_lock); n = events_cnt; SPIN_LOCK_ISR_GIVE(&events_lock); if (n > 0) { trace(LOG_CONTEXT, "context: already stopped ctx %#lx, id %#x", ctx, ext->pid); taskUnlock(); return 0; } } else { vxdbg_ctx.ctxId = ext->pid; vxdbg_ctx.ctxType = VXDBG_CTX_TASK; if (vxdbgStop(vxdbg_clnt_id, &vxdbg_ctx) != OK) { int error = errno; taskUnlock(); if (error == S_vxdbgLib_INVALID_CTX) return 0; trace(LOG_ALWAYS, "context: can't stop ctx %#lx, id %#x: %s", ctx, ext->pid, errno_to_str(error)); return -1; } } assert(taskIsStopped(ext->pid)); info = event_info_alloc(EVENT_HOOK_STOP); if (info != NULL) { info->stopped_ctx.ctxId = ext->pid; event_info_post(info); } taskUnlock(); return 0; }
/** * @brief * * @param cpu_set The CPU set. * * @return 0 on success. Negative value on error. */ static void cpu_set_init_Solaris(cpu_set_p cpu_set) { int i; int num_cpus = sysconf(_SC_NPROCESSORS_ONLN); /* allocate cpus */ cpu_t cpus = (cpu_t)malloc( num_cpus * sizeof(struct cpu_s) ); if ( cpus == NULL ) THROW(BadAlloc); for (i = 0; i < num_cpus; i++) { /* initialize fields */ memset( &cpus[i], 0, sizeof(struct cpu_s) ); } /* find the CPUs on the system */ int num_found = 0; int cpu_num; for (cpu_num = 0; ; cpu_num++) { int status = p_online(cpu_num, P_STATUS); if ( (status == -1) && (errno == EINVAL) ) continue; /* found a new CPU */ cpus[num_found].cpu_unique_id = cpu_num; cpus[num_found].cpu_id = cpu_num; if ( processor_info( cpu_num, &cpus[num_found].cpu_proc_info ) ) { free(cpus); THROW4(ThreadException, "processor_info() failed with %s on CPU %d/%d\n", errno_to_str().data(), cpu_num+1, num_cpus); } num_found++; if ( num_found == num_cpus ) break; } /* return parameters */ cpu_set->cpuset_num_cpus = num_cpus; cpu_set->cpuset_cpus = cpus; }
int context_continue(Context * ctx) { ContextExtensionVxWorks * ext = EXT(ctx); VXDBG_CTX vxdbg_ctx; assert(is_dispatch_thread()); assert(ctx->parent != NULL); assert(ctx->stopped); assert(!ctx->pending_intercept); assert(!ctx->exited); assert(taskIsStopped(ext->pid)); trace(LOG_CONTEXT, "context: continue ctx %#lx, id %#x", ctx, ext->pid); if (ext->regs_dirty) { if (taskRegsSet(ext->pid, ext->regs) != OK) { int error = errno; trace(LOG_ALWAYS, "context: can't set regs ctx %#lx, id %#x: %s", ctx, ext->pid, errno_to_str(error)); return -1; } ext->regs_dirty = 0; } vxdbg_ctx.ctxId = ext->pid; vxdbg_ctx.ctxType = VXDBG_CTX_TASK; taskLock(); if (vxdbgCont(vxdbg_clnt_id, &vxdbg_ctx) != OK) { int error = errno; taskUnlock(); trace(LOG_ALWAYS, "context: can't continue ctx %#lx, id %#x: %s", ctx, ext->pid, errno_to_str(error)); return -1; } assert(!taskIsStopped(ext->pid)); taskUnlock(); send_context_started_event(ctx); return 0; }