void connection_start_read(Client *client) { if(client == NULL) { return; } if(client->TlsContext != NULL) { uv_os_fd_t fd; uv_poll_t *handle = Malloc(sizeof(uv_poll_t)); handle->data = client; uv_fileno((uv_handle_t *)client->handle, &fd); uv_poll_init(serverstate_get_event_loop(), handle, fd); if(uv_poll_start(handle, UV_READABLE, connection_poll_callback) < 0) { client_free(client); } } else { if(uv_read_start((uv_stream_t *)client->handle, connection_allocate_buffer_callback, connection_on_read_callback) < 0) { client_free(client); } } }
void iotjs_uart_open_worker(uv_work_t* work_req) { UART_WORKER_INIT; IOTJS_VALIDATED_STRUCT_METHOD(iotjs_uart_t, uart); int fd = open(iotjs_string_data(&_this->device_path), O_RDWR | O_NOCTTY | O_NDELAY); if (fd < 0) { req_data->result = false; return; } struct termios options; tcgetattr(fd, &options); options.c_cflag = CLOCAL | CREAD; options.c_cflag |= baud_to_constant(_this->baud_rate); options.c_cflag |= databits_to_constant(_this->data_bits); options.c_iflag = IGNPAR; options.c_oflag = 0; options.c_lflag = 0; tcflush(fd, TCIFLUSH); tcsetattr(fd, TCSANOW, &options); _this->device_fd = fd; uv_poll_t* poll_handle = &_this->poll_handle; uv_loop_t* loop = iotjs_environment_loop(iotjs_environment_get()); uv_poll_init(loop, poll_handle, fd); poll_handle->data = uart; uv_poll_start(poll_handle, UV_READABLE, iotjs_uart_read_cb); req_data->result = true; }
int main(int argc, const char* const* argv) { printf("GIMME YOUR INPUTS!\n"); // set stdin/out to nonblocking set_non_blocking(STDIN_FILENO); set_non_blocking(STDOUT_FILENO); loop = uv_default_loop(); uv_poll_init(loop, &stdin_watcher, STDIN_FILENO); uv_poll_init(loop, &stdout_watcher, STDOUT_FILENO); uv_poll_start(&stdin_watcher, UV_READABLE, stdin_cb); uv_run(loop, 0); return 0; }
static void(io_start)(void* io, int fd, int action) { struct LwqqAsyncIo_* io_ = (struct LwqqAsyncIo_*)io; io_->h.data = io; uv_poll_init(loop, &io_->h, fd); uv_poll_start(&io_->h, action, io_cb_wrap); }
static getdns_return_t getdns_libuv_schedule(getdns_eventloop *loop, int fd, uint64_t timeout, getdns_eventloop_event *el_ev) { getdns_libuv *ext = (getdns_libuv *)loop; poll_timer *my_ev; uv_poll_t *my_poll; uv_timer_t *my_timer; assert(el_ev); assert(!(el_ev->read_cb || el_ev->write_cb) || fd >= 0); assert( el_ev->read_cb || el_ev->write_cb || el_ev->timeout_cb); DEBUG_UV("enter libuv_schedule(el_ev = %p, el_ev->ev = %p)\n" , el_ev, el_ev->ev); if (!(my_ev = GETDNS_MALLOC(ext->mf, poll_timer))) return GETDNS_RETURN_MEMORY_ERROR; my_ev->to_close = 0; my_ev->mf = ext->mf; el_ev->ev = my_ev; if (el_ev->read_cb) { my_poll = &my_ev->read; my_poll->data = el_ev; uv_poll_init(ext->loop, my_poll, fd); uv_poll_start(my_poll, UV_READABLE, getdns_libuv_read_cb); } if (el_ev->write_cb) { my_poll = &my_ev->write; my_poll->data = el_ev; uv_poll_init(ext->loop, my_poll, fd); uv_poll_start(my_poll, UV_WRITABLE, getdns_libuv_write_cb); } if (el_ev->timeout_cb) { my_timer = &my_ev->timer; my_timer->data = el_ev; uv_timer_init(ext->loop, my_timer); uv_timer_start(my_timer, getdns_libuv_timeout_cb, timeout, 0); } DEBUG_UV("exit libuv_schedule(el_ev = %p, el_ev->ev = %p)\n" , el_ev, el_ev->ev); return GETDNS_RETURN_GOOD; }
void input_init(void) { log_msg("INIT", "INPUT"); tk = termkey_new(0,0); termkey_set_flags(tk, TERMKEY_FLAG_UTF8 | TERMKEY_CANON_DELBS); uv_poll_init(eventloop(), &poll_handle, 0); uv_poll_start(&poll_handle, UV_READABLE, input_check); }
static void broker_server_new_client(uv_poll_t *poll, int status, int events) { (void) status; (void) events; Server *server = poll->data; Client *client = dslink_calloc(1, sizeof(Client)); if (!client) { goto fail; } client->server = server; client->sock = dslink_socket_init(0); if (!client->sock) { dslink_free(client); goto fail; } if (mbedtls_net_accept(&server->srv, &client->sock->socket_ctx, NULL, 0, NULL) != 0) { log_warn("Failed to accept a client connection\n"); goto fail_poll_setup; } uv_poll_t *clientPoll = dslink_malloc(sizeof(uv_poll_t)); if (!clientPoll) { goto fail_poll_setup; } uv_loop_t *loop = poll->loop; if (uv_poll_init(loop, clientPoll, client->sock->socket_ctx.fd) != 0) { dslink_free(clientPoll); goto fail_poll_setup; } clientPoll->data = client; client->poll = clientPoll; uv_poll_start(clientPoll, UV_READABLE, broker_server_client_ready); log_debug("Accepted a client connection\n"); return; fail: { mbedtls_net_context tmp; mbedtls_net_init(&tmp); mbedtls_net_accept(&server->srv, &tmp, NULL, 0, NULL); mbedtls_net_free(&tmp); } return; fail_poll_setup: dslink_socket_free(client->sock); dslink_free(client); }
static int luv_new_poll(lua_State* L) { int fd = luaL_checkinteger(L, 1); uv_poll_t* handle = (uv_poll_t*)luv_newuserdata(L, sizeof(*handle)); int ret = uv_poll_init(luv_loop(L), handle, fd); if (ret < 0) { lua_pop(L, 1); return luv_error(L, ret); } handle->data = luv_setup_handle(L); return 1; }
Poller::Poller(int fd) { Nan::HandleScope scope; this->fd = fd; this->poll_handle = new uv_poll_t(); memset(this->poll_handle, 0, sizeof(uv_poll_t)); poll_handle->data = this; int status = uv_poll_init(uv_default_loop(), poll_handle, fd); if (0 != status) { Nan::ThrowError(uv_strerror(status)); return; } uv_poll_init_success = true; }
void lws_libuv_accept(struct lws *wsi, int accept_fd) { struct lws_context *context = lws_get_context(wsi); struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi]; if (!LWS_LIBUV_ENABLED(context)) return; lwsl_debug("%s: new wsi %p\n", __func__, wsi); wsi->w_read.context = context; uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher, accept_fd); }
static int start_http_server(Server *server, const char *host, const char *port, uv_loop_t *loop, uv_poll_t *poll) { if (mbedtls_net_bind(&server->srv, host, port, MBEDTLS_NET_PROTO_TCP) != 0) { log_fatal("Failed to bind to %s:%s\n", host, port); return 0; } else { log_info("HTTP server bound to %s:%s\n", host, port); } uv_poll_init(loop, poll, server->srv.fd); poll->data = server; uv_poll_start(poll, UV_READABLE, broker_server_new_client); return 1; }
// Constructor Impl( int fileDescriptor, CallbackBase* callback, uv_poll_event eventsToMonitor ) : mFileDescriptor( fileDescriptor ), mEventsToMonitor( eventsToMonitor ), mCallback( callback ), pollHandle( NULL ) { // heap allocate a handle as it will be alive after the FileDescriptorMonitor::Impl object is deleted. pollHandle = new uv_poll_t; // Node.JS uses uv_default_loop uv_poll_init( uv_default_loop(), pollHandle, fileDescriptor); pollHandle->data = this; uv_poll_start( pollHandle, mEventsToMonitor, PollCabllack); }
int async_poll_fd(int const fd, int *const events) { assert(events); struct poll_state state[1]; state->thread = async_active(); state->status = 0; state->events = 0; uv_poll_t poll[1]; poll->data = state; int rc = uv_poll_init(async_loop, poll, fd); if(rc < 0) return rc; rc = uv_poll_start(poll, *events, poll_cb); if(rc < 0) return rc; async_yield(); async_close((uv_handle_t *)poll); *events = state->events; return state->status; }
void lws_libuv_accept(struct lws *wsi, lws_sock_file_fd_type desc) { struct lws_context *context = lws_get_context(wsi); struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi]; if (!LWS_LIBUV_ENABLED(context)) return; wsi->w_read.context = context; if (wsi->mode == LWSCM_RAW_FILEDESC || wsi->event_pipe) uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher, (int)(long long)desc.filefd); else uv_poll_init_socket(pt->io_loop_uv, &wsi->w_read.uv_watcher, desc.sockfd); }
LWS_VISIBLE int lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi) { struct lws_context_per_thread *pt = &context->pt[tsi]; struct lws *wsi = wsi_from_fd(context, pt->lserv_fd); int status = 0, n; if (!loop) { loop = lws_malloc(sizeof(*loop)); uv_loop_init(loop); pt->ev_loop_foreign = 0; } else pt->ev_loop_foreign = 1; pt->io_loop_uv = loop; if (pt->context->use_ev_sigint) { assert(ARRAY_SIZE(sigs) <= ARRAY_SIZE(pt->signals)); for (n = 0; n < ARRAY_SIZE(sigs); n++) { uv_signal_init(loop, &pt->signals[n]); pt->signals[n].data = pt->context; uv_signal_start(&pt->signals[n], context->lws_uv_sigint_cb, sigs[n]); } } /* * Initialize the accept wsi read watcher with the listening socket * and register a callback for read operations * * We have to do it here because the uv loop(s) are not * initialized until after context creation. */ if (wsi) { wsi->w_read.context = context; uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher, pt->lserv_fd); uv_poll_start(&wsi->w_read.uv_watcher, UV_READABLE, lws_io_cb); } uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher); uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb, 1000, 1000); return status; }
void RunLoop::addWatch(int fd, Event event, std::function<void(int, Event)>&& callback) { MBGL_VERIFY_THREAD(tid); Watch *watch = nullptr; auto watchPollIter = impl->watchPoll.find(fd); if (watchPollIter == impl->watchPoll.end()) { std::unique_ptr<Watch> watchPtr = std::make_unique<Watch>(); watch = watchPtr.get(); impl->watchPoll[fd] = std::move(watchPtr); if (uv_poll_init(impl->loop, &watch->poll, fd)) { throw std::runtime_error("Failed to init poll on file descriptor."); } } else { watch = watchPollIter->second.get(); } watch->poll.data = watch; watch->fd = fd; watch->eventCallback = std::move(callback); int pollEvent = 0; switch (event) { case Event::Read: pollEvent = UV_READABLE; break; case Event::Write: pollEvent = UV_WRITABLE; break; case Event::ReadWrite: pollEvent = UV_READABLE | UV_WRITABLE; break; default: throw std::runtime_error("Unhandled event."); } if (uv_poll_start(&watch->poll, pollEvent, &Watch::onEvent)) { throw std::runtime_error("Failed to start poll on file descriptor."); } }
int tun_start(struct tundev *tun) { uv_loop_t *loop = uv_default_loop(); struct tundev_context *ctx = tun->contexts; tcp_client_start(ctx, loop); uv_poll_init(loop, &ctx->watcher, ctx->tunfd); uv_poll_start(&ctx->watcher, UV_READABLE, poll_cb); #ifndef ANDROID signal_install(loop, signal_cb, tun); #endif uv_run(loop, UV_RUN_DEFAULT); loop_close(loop); return 0; }
static getdns_return_t getdns_libuv_schedule(getdns_eventloop *loop, int fd, uint64_t timeout, getdns_eventloop_event *el_ev) { getdns_libuv *ext = (getdns_libuv *)loop; poll_timer *my_ev; uv_poll_t *my_poll; uv_timer_t *my_timer; int poll_events; assert(el_ev); assert(!(el_ev->read_cb || el_ev->write_cb) || fd >= 0); assert( el_ev->read_cb || el_ev->write_cb || el_ev->timeout_cb); my_ev = (poll_timer*)malloc(sizeof(poll_timer)); if (!my_ev) return GETDNS_RETURN_MEMORY_ERROR; my_ev->to_close = 0; el_ev->ev = my_ev; if (el_ev->read_cb || el_ev->write_cb) { my_poll = &my_ev->poll_h; my_poll->data = el_ev; uv_poll_init(ext->loop, my_poll, fd); poll_events = 0; if (el_ev->read_cb) poll_events |= UV_READABLE; if (el_ev->write_cb) poll_events |= UV_WRITABLE; uv_poll_start(my_poll, poll_events, getdns_libuv_poll_cb); } if (el_ev->timeout_cb) { my_timer = &my_ev->timer; my_timer->data = el_ev; uv_timer_init(ext->loop, my_timer); uv_timer_start(my_timer, getdns_libuv_timeout_cb, timeout, 0); } return GETDNS_RETURN_GOOD; }
/* * getdns_extension_set_libuv_loop * */ getdns_return_t getdns_extension_set_libuv_loop(struct getdns_context *context, struct uv_loop_s *uv_loop) { RETURN_IF_NULL(context, GETDNS_RETURN_INVALID_PARAMETER); RETURN_IF_NULL(uv_loop, GETDNS_RETURN_INVALID_PARAMETER); /* TODO: cleanup current extension base */ getdns_return_t r = getdns_extension_detach_eventloop(context); if (r != GETDNS_RETURN_GOOD) { return r; } struct getdns_libuv_data* uv_data = (struct getdns_libuv_data*) malloc(sizeof(struct getdns_libuv_data)); if (!uv_data) { return GETDNS_RETURN_MEMORY_ERROR; } int fd = getdns_context_fd(context); uv_data->poll_handle = (uv_poll_t*) malloc(sizeof(uv_poll_t)); uv_poll_init(uv_loop, uv_data->poll_handle, fd); uv_data->poll_handle->data = context; uv_data->loop = uv_loop; uv_data->polling = 0; return getdns_extension_set_eventloop(context, &LIBUV_EXT, uv_data); } /* getdns_extension_set_libuv_loop */
static int elops_accept_uv(struct lws *wsi) { struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; wsi->w_read.context = wsi->context; wsi->w_read.uv.pwatcher = lws_malloc(sizeof(*wsi->w_read.uv.pwatcher), "uvh"); if (!wsi->w_read.uv.pwatcher) return -1; if (wsi->role_ops->file_handle) uv_poll_init(pt->uv.io_loop, wsi->w_read.uv.pwatcher, (int)(long long)wsi->desc.filefd); else uv_poll_init_socket(pt->uv.io_loop, wsi->w_read.uv.pwatcher, wsi->desc.sockfd); ((uv_handle_t *)wsi->w_read.uv.pwatcher)->data = (void *)wsi; return 0; }
struct gputop_perf_stream * gputop_perf_open_i915_oa_query(struct gputop_perf_query *query, int period_exponent, size_t perf_buffer_size, void (*ready_cb)(uv_poll_t *poll, int status, int events), bool overwrite, char **error) { struct gputop_perf_stream *stream; i915_oa_attr_t oa_attr; struct perf_event_attr attr; int event_fd; uint8_t *mmap_base; int expected_max_samples; memset(&attr, 0, sizeof(attr)); attr.size = sizeof(attr); attr.type = lookup_i915_oa_id(); attr.sample_type = PERF_SAMPLE_RAW; attr.sample_period = 1; attr.watermark = true; attr.wakeup_watermark = perf_buffer_size / 4; memset(&oa_attr, 0, sizeof(oa_attr)); oa_attr.size = sizeof(oa_attr); oa_attr.format = query->perf_oa_format; oa_attr.metrics_set = query->perf_oa_metrics_set; oa_attr.timer_exponent = period_exponent; attr.config = (uint64_t)&oa_attr; event_fd = perf_event_open(&attr, -1, /* pid */ 0, /* cpu */ -1, /* group fd */ PERF_FLAG_FD_CLOEXEC); /* flags */ if (event_fd == -1) { asprintf(error, "Error opening i915_oa perf event: %m\n"); return NULL; } /* NB: A read-write mapping ensures the kernel will stop writing data when * the buffer is full, and will report samples as lost. */ mmap_base = mmap(NULL, perf_buffer_size + page_size, PROT_READ | PROT_WRITE, MAP_SHARED, event_fd, 0); if (mmap_base == MAP_FAILED) { asprintf(error, "Error mapping circular buffer, %m\n"); close (event_fd); return NULL; } stream = xmalloc0(sizeof(*stream)); stream->type = GPUTOP_STREAM_PERF; stream->ref_count = 1; stream->query = query; stream->fd = event_fd; stream->perf.buffer = mmap_base + page_size; stream->perf.buffer_size = perf_buffer_size; stream->perf.mmap_page = (void *)mmap_base; expected_max_samples = (stream->perf.buffer_size / MAX_CORE_PERF_OA_SAMPLE_SIZE) * 1.2; memset(&stream->perf.header_buf, 0, sizeof(stream->perf.header_buf)); stream->overwrite = overwrite; if (overwrite) { stream->perf.header_buf.len = expected_max_samples; stream->perf.header_buf.offsets = xmalloc(sizeof(uint32_t) * expected_max_samples); } stream->fd_poll.data = stream; uv_poll_init(gputop_ui_loop, &stream->fd_poll, stream->fd); uv_poll_start(&stream->fd_poll, UV_READABLE, ready_cb); return stream; }
/* u2_term_io_init(): initialize terminal. */ void u2_term_io_init() { u2_utty* uty_u = malloc(sizeof(u2_utty)); if ( u2_yes == u2_Host.ops_u.dem ) { uty_u->fid_i = 1; uv_pipe_init(u2L, &(uty_u->pop_u), uty_u->fid_i); uv_pipe_open(&(uty_u->pop_u), uty_u->fid_i); } else { // Initialize event processing. Rawdog it. // { uty_u->fid_i = 0; // stdin, yes we write to it... uv_poll_init(u2L, &(uty_u->wax_u), uty_u->fid_i); uv_poll_start(&(uty_u->wax_u), UV_READABLE | UV_WRITABLE, _term_poll_cb); } // Configure horrible stateful terminfo api. // { if ( 0 != setupterm(0, 2, 0) ) { c3_assert(!"init-setupterm"); } } // Load terminfo strings. // { c3_w len_w; # define _utfo(way, nam) \ { \ uty_u->ufo_u.way.nam##_y = (const c3_y *) tigetstr(#nam); \ c3_assert(uty_u->ufo_u.way.nam##_y); \ } uty_u->ufo_u.inn.max_w = 0; #if 1 _utfo(inn, kcuu1); _utfo(inn, kcud1); _utfo(inn, kcub1); _utfo(inn, kcuf1); _utfo(out, clear); _utfo(out, el); // _utfo(out, el1); _utfo(out, ed); _utfo(out, bel); _utfo(out, cub1); _utfo(out, cuf1); _utfo(out, cuu1); _utfo(out, cud1); // _utfo(out, cub); // _utfo(out, cuf); #else // libuv hardcodes an ansi terminal - which doesn't seem to work... // uty_u->ufo_u.out.clear_y = "\033[H\033[J"; uty_u->ufo_u.out.el_y = "\033[K"; uty_u->ufo_u.out.ed_y = "\033[J"; uty_u->ufo_u.out.bel_y = "\007"; uty_u->ufo_u.out.cub1_y = "\010"; uty_u->ufo_u.out.cud1_y = "\033[B"; uty_u->ufo_u.out.cuu1_y = "\033[A"; uty_u->ufo_u.out.cuf1_y = "\033[C"; #endif // Terminfo chronically reports the wrong sequence for arrow // keys on xterms. Drastic fix for ridiculous unacceptable bug. // Yes, we could fix this with smkx/rmkx, but this is retarded as well. { uty_u->ufo_u.inn.kcuu1_y = (const c3_y*)"\033[A"; uty_u->ufo_u.inn.kcud1_y = (const c3_y*)"\033[B"; uty_u->ufo_u.inn.kcuf1_y = (const c3_y*)"\033[C"; uty_u->ufo_u.inn.kcub1_y = (const c3_y*)"\033[D"; } uty_u->ufo_u.inn.max_w = 0; if ( (len_w = strlen((c3_c*)uty_u->ufo_u.inn.kcuu1_y)) > uty_u->ufo_u.inn.max_w ) { uty_u->ufo_u.inn.max_w = len_w; } if ( (len_w = strlen((c3_c*)uty_u->ufo_u.inn.kcud1_y)) > uty_u->ufo_u.inn.max_w ) { uty_u->ufo_u.inn.max_w = len_w; } if ( (len_w = strlen((c3_c*)uty_u->ufo_u.inn.kcub1_y)) > uty_u->ufo_u.inn.max_w ) { uty_u->ufo_u.inn.max_w = len_w; } if ( (len_w = strlen((c3_c*)uty_u->ufo_u.inn.kcuf1_y)) > uty_u->ufo_u.inn.max_w ) { uty_u->ufo_u.inn.max_w = len_w; } } // Load old terminal state to restore. // #if 1 { if ( 0 != tcgetattr(uty_u->fid_i, &uty_u->bak_u) ) { c3_assert(!"init-tcgetattr"); } if ( -1 == fcntl(uty_u->fid_i, F_GETFL, &uty_u->cug_i) ) { c3_assert(!"init-fcntl"); } uty_u->cug_i &= ~O_NONBLOCK; // could fix? uty_u->nob_i = uty_u->cug_i | O_NONBLOCK; // O_NDELAY on older unix } // Construct raw termios configuration. // { uty_u->raw_u = uty_u->bak_u; uty_u->raw_u.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN); uty_u->raw_u.c_iflag &= ~(ICRNL | INPCK | ISTRIP); uty_u->raw_u.c_cflag &= ~(CSIZE | PARENB); uty_u->raw_u.c_cflag |= CS8; uty_u->raw_u.c_oflag &= ~(OPOST); uty_u->raw_u.c_cc[VMIN] = 0; uty_u->raw_u.c_cc[VTIME] = 0; } #endif // Initialize mirror and accumulator state. // { uty_u->tat_u.mir.lin_w = 0; uty_u->tat_u.mir.len_w = 0; uty_u->tat_u.mir.cus_w = 0; uty_u->tat_u.esc.ape = u2_no; uty_u->tat_u.esc.bra = u2_no; uty_u->tat_u.fut.len_w = 0; uty_u->tat_u.fut.wid_w = 0; } } // This is terminal 1, linked in host. // { uty_u->tid_l = 1; uty_u->out_u = 0; uty_u->tou_u = 0; uty_u->nex_u = u2_Host.uty_u; u2_Host.uty_u = uty_u; u2_Host.tem_u = uty_u; } if ( u2_no == u2_Host.ops_u.dem ) { // Start raw input. // { if ( 0 != tcsetattr(uty_u->fid_i, TCSADRAIN, &uty_u->raw_u) ) { c3_assert(!"init-tcsetattr"); } if ( -1 == fcntl(uty_u->fid_i, F_SETFL, uty_u->nob_i) ) { c3_assert(!"init-fcntl"); } } } }
struct gputop_perf_stream * gputop_open_i915_perf_oa_query(struct gputop_perf_query *query, int period_exponent, size_t perf_buffer_size, void (*ready_cb)(uv_poll_t *poll, int status, int events), bool overwrite, char **error) { struct gputop_perf_stream *stream; struct i915_perf_open_param param; struct i915_perf_oa_attr oa_attr; int ret; memset(¶m, 0, sizeof(param)); memset(&oa_attr, 0, sizeof(oa_attr)); param.type = I915_PERF_OA_EVENT; param.flags = 0; param.flags |= I915_PERF_FLAG_FD_CLOEXEC; param.flags |= I915_PERF_FLAG_FD_NONBLOCK; param.sample_flags = I915_PERF_SAMPLE_OA_REPORT; oa_attr.size = sizeof(oa_attr); oa_attr.flags |= I915_OA_FLAG_PERIODIC; oa_attr.oa_format = query->perf_oa_format; oa_attr.metrics_set = query->perf_oa_metrics_set; oa_attr.oa_timer_exponent = period_exponent; param.attr = (uintptr_t)&oa_attr; ret = perf_ioctl(drm_fd, I915_IOCTL_PERF_OPEN, ¶m); if (ret == -1) { asprintf(error, "Error opening i915_oa perf event: %m\n"); return NULL; } stream = xmalloc0(sizeof(*stream)); stream->type = GPUTOP_STREAM_I915_PERF; stream->ref_count = 1; stream->query = query; stream->fd = param.fd; /* We double buffer the samples we read from the kernel so * we can maintain a stream->last pointer for calculating * counter deltas */ stream->oa.buf_sizes = MAX_I915_PERF_OA_SAMPLE_SIZE * 100; stream->oa.bufs[0] = xmalloc0(stream->oa.buf_sizes); stream->oa.bufs[1] = xmalloc0(stream->oa.buf_sizes); stream->overwrite = overwrite; if (overwrite) { #warning "TODO: support flight-recorder mode" assert(0); } stream->fd_poll.data = stream; uv_poll_init(gputop_ui_loop, &stream->fd_poll, stream->fd); uv_poll_start(&stream->fd_poll, UV_READABLE, ready_cb); return stream; }
struct gputop_perf_stream * gputop_perf_open_trace(int pid, int cpu, const char *system, const char *event, size_t trace_struct_size, size_t perf_buffer_size, void (*ready_cb)(uv_poll_t *poll, int status, int events), bool overwrite, char **error) { struct gputop_perf_stream *stream; struct perf_event_attr attr; int event_fd; uint8_t *mmap_base; int expected_max_samples; char *filename = NULL; int id = 0; size_t sample_size = 0; asprintf(&filename, "/sys/kernel/debug/tracing/events/%s/%s/id", system, event); if (filename) { struct stat st; if (stat(filename, &st) < 0) { int err = errno; free(filename); filename = NULL; if (err == EPERM) { asprintf(error, "Permission denied to open tracepoint %s:%s" " (Linux tracepoints require root privileges)", system, event); return NULL; } else { asprintf(error, "Failed to open tracepoint %s:%s: %s", system, event, strerror(err)); return NULL; } } } id = read_file_uint64(filename); free(filename); filename = NULL; memset(&attr, 0, sizeof(attr)); attr.size = sizeof(attr); attr.type = PERF_TYPE_TRACEPOINT; attr.config = id; attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME; attr.sample_period = 1; attr.watermark = true; attr.wakeup_watermark = perf_buffer_size / 4; event_fd = perf_event_open(&attr, pid, cpu, -1, /* group fd */ PERF_FLAG_FD_CLOEXEC); /* flags */ if (event_fd == -1) { asprintf(error, "Error opening i915_oa perf event: %m\n"); return NULL; } /* NB: A read-write mapping ensures the kernel will stop writing data when * the buffer is full, and will report samples as lost. */ mmap_base = mmap(NULL, perf_buffer_size + page_size, PROT_READ | PROT_WRITE, MAP_SHARED, event_fd, 0); if (mmap_base == MAP_FAILED) { asprintf(error, "Error mapping circular buffer, %m\n"); close (event_fd); return NULL; } stream = xmalloc0(sizeof(*stream)); stream->type = GPUTOP_STREAM_PERF; stream->ref_count = 1; stream->fd = event_fd; stream->perf.buffer = mmap_base + page_size; stream->perf.buffer_size = perf_buffer_size; stream->perf.mmap_page = (void *)mmap_base; sample_size = sizeof(struct perf_event_header) + 8 /* _TIME */ + trace_struct_size; /* _RAW */ expected_max_samples = (stream->perf.buffer_size / sample_size) * 1.2; memset(&stream->perf.header_buf, 0, sizeof(stream->perf.header_buf)); stream->overwrite = overwrite; if (overwrite) { stream->perf.header_buf.len = expected_max_samples; stream->perf.header_buf.offsets = xmalloc(sizeof(uint32_t) * expected_max_samples); } stream->fd_poll.data = stream; uv_poll_init(gputop_ui_loop, &stream->fd_poll, stream->fd); uv_poll_start(&stream->fd_poll, UV_READABLE, ready_cb); return stream; }
struct gputop_perf_stream * gputop_perf_open_generic_counter(int pid, int cpu, uint64_t type, uint64_t config, size_t perf_buffer_size, void (*ready_cb)(uv_poll_t *poll, int status, int events), bool overwrite, char **error) { struct gputop_perf_stream *stream; struct perf_event_attr attr; int event_fd; uint8_t *mmap_base; int expected_max_samples; size_t sample_size = 0; memset(&attr, 0, sizeof(attr)); attr.size = sizeof(attr); attr.type = type; attr.config = config; attr.sample_type = PERF_SAMPLE_READ | PERF_SAMPLE_TIME; attr.sample_period = 1; attr.watermark = true; attr.wakeup_watermark = perf_buffer_size / 4; event_fd = perf_event_open(&attr, pid, cpu, -1, /* group fd */ PERF_FLAG_FD_CLOEXEC); /* flags */ if (event_fd == -1) { asprintf(error, "Error opening i915_oa perf event: %m\n"); return NULL; } /* NB: A read-write mapping ensures the kernel will stop writing data when * the buffer is full, and will report samples as lost. */ mmap_base = mmap(NULL, perf_buffer_size + page_size, PROT_READ | PROT_WRITE, MAP_SHARED, event_fd, 0); if (mmap_base == MAP_FAILED) { asprintf(error, "Error mapping circular buffer, %m\n"); close (event_fd); return NULL; } stream = xmalloc0(sizeof(*stream)); stream->type = GPUTOP_STREAM_PERF; stream->ref_count = 1; stream->fd = event_fd; stream->perf.buffer = mmap_base + page_size; stream->perf.buffer_size = perf_buffer_size; stream->perf.mmap_page = (void *)mmap_base; sample_size = sizeof(struct perf_event_header) + 8; /* _TIME */ expected_max_samples = (stream->perf.buffer_size / sample_size) * 1.2; memset(&stream->perf.header_buf, 0, sizeof(stream->perf.header_buf)); stream->overwrite = overwrite; if (overwrite) { stream->perf.header_buf.len = expected_max_samples; stream->perf.header_buf.offsets = xmalloc(sizeof(uint32_t) * expected_max_samples); } stream->fd_poll.data = stream; uv_poll_init(gputop_ui_loop, &stream->fd_poll, stream->fd); uv_poll_start(&stream->fd_poll, UV_READABLE, ready_cb); return stream; }
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, uv_os_sock_t socket) { return uv_poll_init(loop, handle, socket); }