void details::http_listener_impl::handle_request(http_request msg) { // Specific method handler takes priority over general. const method &mtd = msg.method(); if(m_supported_methods.count(mtd)) { m_supported_methods[mtd](msg); } else if(mtd == methods::OPTIONS) { handle_options(msg); } else if(mtd == methods::TRCE) { handle_trace(msg); } else if(m_all_requests != nullptr) { m_all_requests(msg); } else { // Method is not supported. // Send back a list of supported methods to the client. http_response response(status_codes::MethodNotAllowed); response.headers().add(U("Allow"), get_supported_methods()); msg.reply(response); } }
bool win32_windowed_app::setup(const win32_windowed_app_setup_params& params) { set_assert_failed_handler([&](const char* file_name, int line_number, const char* expression) { return handle_assert_failed(file_name, line_number, expression); }); set_alert_handler([&](const char* file_name, int line_number, const char* message) { return handle_alert(file_name, line_number, message); }); set_trace_handler([&](const char* file_name, int line_number, bool add_newline, const char* message) { handle_trace(file_name, line_number, add_newline, message); }); TRACE("win32_windowed_app setup..."); params.trace_dump(); ASSERT(s_wnd_proc_context == nullptr); s_wnd_proc_context = this; ASSERT(params.get_hinstance() != nullptr); ASSERT(!params.get_window_class_name().empty()); ASSERT(!params.get_window_caption().empty()); ASSERT(params.get_idle_proc() != nullptr); _setup_params = params; WNDCLASSEX wc = build_wnd_class_ex(params); if (!::RegisterClassEx(&wc)) { ALERT("RegisterClassEx failed : {0}", get_last_win32_error_message()); return false; } DWORD style = WS_OVERLAPPEDWINDOW; if (params.get_is_visible_by_default()) { style |= WS_VISIBLE; } _hwnd = ::CreateWindow( params.get_window_class_name().c_str(), params.get_window_caption().c_str(), style, 0, 0, 0, 0, nullptr, nullptr, params.get_hinstance(), nullptr); if (_hwnd == nullptr) { ALERT("CreateWindow failed : {0}", get_last_win32_error_message()); return false; } _error_dialog.setup(params.get_error_dialog_resources(), _hwnd, params.get_hinstance()); _process_uid = make_date_time_as_utc_now().to_local_time_string("%m_%d_%H_%M_%S"); return true; }
void handle_return(PyFrameObject *frame, PyObject *value) { decrement_depth(); if (in_no_trace_context()) { if (should_exit_no_trace_context()) { exit_no_trace_context(); } return; } set_string(&(arguments[0]->name), "return value"); if (NULL == value) { value = Py_None; } set_string(&(arguments[0]->type), value->ob_type->tp_name); set_string(&(arguments[0]->value), pyobj_to_cstr(value)); handle_trace(frame, RECORD__RECORD_TYPE__RETURN, 1); }
void win32_cli_app::setup(const win32_cli_app_setup_params& params) { _setup_params = params; _error_handle = ::GetStdHandle(STD_ERROR_HANDLE); _output_handle = ::GetStdHandle(STD_OUTPUT_HANDLE); set_assert_failed_handler([&](const char* file_name, int line_number, const char* expression) { return handle_assert_failed(file_name, line_number, expression); }); set_alert_handler([&](const char* file_name, int line_number, const char* message) { return handle_alert(file_name, line_number, message); }); set_trace_handler([&](const char* file_name, int line_number, bool add_newline, const char* message) { handle_trace(file_name, line_number, add_newline, message); }); }
void handle_call(PyFrameObject *frame) { PyObject *name, *value; int i, argcount, count = 0; increment_depth(); if (in_no_trace_context()) { return; } if (FALSE == should_trace_frame(frame)) { enter_no_trace_context(); return; } argcount = frame->f_code->co_argcount; if (frame->f_code->co_flags & CO_VARARGS) { argcount++; } if (frame->f_code->co_flags & CO_VARKEYWORDS) { argcount++; } for (i = 0; i < min(argcount, MAX_ARGS); i++) { name = PyTuple_GetItem(frame->f_code->co_varnames, i); if (NULL == frame->f_locals) { value = frame->f_localsplus[i]; } else { value = PyDict_GetItem(frame->f_locals, name); } if (NULL != value) { // happens when exec is used set_string(&(arguments[i]->name), PYSTR_TO_CHAR(name)); set_string(&(arguments[i]->type), value->ob_type->tp_name); set_string(&(arguments[i]->value), pyobj_to_cstr(value)); count++; } } handle_trace(frame, RECORD__RECORD_TYPE__CALL, count); }
/* * Load a blktrace file by reading all the blk_io_trace entries, and storing * them as io_pieces like the fio text version would do. */ int load_blktrace(struct thread_data *td, const char *filename, int need_swap) { struct blk_io_trace t; unsigned long ios[DDIR_RWDIR_CNT], skipped_writes; unsigned int rw_bs[DDIR_RWDIR_CNT]; struct fifo *fifo; int fd, i, old_state; struct fio_file *f; int this_depth, depth; fd = open(filename, O_RDONLY); if (fd < 0) { td_verror(td, errno, "open blktrace file"); return 1; } fifo = fifo_alloc(TRACE_FIFO_SIZE); old_state = td_bump_runstate(td, TD_SETTING_UP); td->o.size = 0; ios[0] = ios[1] = 0; rw_bs[0] = rw_bs[1] = 0; skipped_writes = 0; this_depth = depth = 0; do { int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t)); if (ret < 0) goto err; else if (!ret) break; else if (ret < (int) sizeof(t)) { log_err("fio: short fifo get\n"); break; } if (need_swap) byteswap_trace(&t); if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { log_err("fio: bad magic in blktrace data: %x\n", t.magic); goto err; } if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { log_err("fio: bad blktrace version %d\n", t.magic & 0xff); goto err; } ret = discard_pdu(td, fifo, fd, &t); if (ret < 0) { td_verror(td, ret, "blktrace lseek"); goto err; } else if (t.pdu_len != ret) { log_err("fio: discarded %d of %d\n", ret, t.pdu_len); goto err; } if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) { if ((t.action & 0xffff) == __BLK_TA_QUEUE) this_depth++; else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) { depth = max(depth, this_depth); this_depth = 0; } if (t_is_write(&t) && read_only) { skipped_writes++; continue; } } handle_trace(td, &t, ios, rw_bs); } while (1); for (i = 0; i < td->files_index; i++) { f = td->files[i]; trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE); } fifo_free(fifo); close(fd); td_restore_runstate(td, old_state); if (!td->files_index) { log_err("fio: did not find replay device(s)\n"); return 1; } /* * For stacked devices, we don't always get a COMPLETE event so * the depth grows to insane values. Limit it to something sane(r). */ if (!depth || depth > 1024) depth = 1024; if (skipped_writes) log_err("fio: %s skips replay of %lu writes due to read-only\n", td->o.name, skipped_writes); if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) { log_err("fio: found no ios in blktrace data\n"); return 1; } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) { td->o.td_ddir = TD_DDIR_READ; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) { td->o.td_ddir = TD_DDIR_WRITE; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; } else { td->o.td_ddir = TD_DDIR_RW; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM]; } /* * We need to do direct/raw ios to the device, to avoid getting * read-ahead in our way. */ td->o.odirect = 1; /* * we don't know if this option was set or not. it defaults to 1, * so we'll just guess that we should override it if it's still 1 */ if (td->o.iodepth != 1) td->o.iodepth = depth; return 0; err: close(fd); fifo_free(fifo); return 1; }
/* * Load a blktrace file by reading all the blk_io_trace entries, and storing * them as io_pieces like the fio text version would do. */ int load_blktrace(struct thread_data *td, const char *filename, int need_swap) { unsigned long long ttime, delay; struct blk_io_trace t; unsigned long ios[2], skipped_writes; unsigned int cpu; unsigned int rw_bs[2]; struct fifo *fifo; int fd, i, old_state; struct fio_file *f; fd = open(filename, O_RDONLY); if (fd < 0) { td_verror(td, errno, "open blktrace file"); return 1; } fifo = fifo_alloc(TRACE_FIFO_SIZE); old_state = td_bump_runstate(td, TD_SETTING_UP); td->o.size = 0; cpu = 0; ttime = 0; ios[0] = ios[1] = 0; rw_bs[0] = rw_bs[1] = 0; skipped_writes = 0; do { int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t)); if (ret < 0) goto err; else if (!ret) break; else if (ret < (int) sizeof(t)) { log_err("fio: short fifo get\n"); break; } if (need_swap) byteswap_trace(&t); if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { log_err("fio: bad magic in blktrace data: %x\n", t.magic); goto err; } if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { log_err("fio: bad blktrace version %d\n", t.magic & 0xff); goto err; } ret = discard_pdu(td, fifo, fd, &t); if (ret < 0) { td_verror(td, ret, "blktrace lseek"); goto err; } else if (t.pdu_len != ret) { log_err("fio: discarded %d of %d\n", ret, t.pdu_len); goto err; } if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) { if (!ttime) { ttime = t.time; cpu = t.cpu; } delay = 0; if (cpu == t.cpu) delay = t.time - ttime; if ((t.action & BLK_TC_ACT(BLK_TC_WRITE)) && read_only) skipped_writes++; else { /* * set delay to zero if no_stall enabled for * fast replay */ if (td->o.no_stall) delay = 0; handle_trace(td, &t, delay, ios, rw_bs); } ttime = t.time; cpu = t.cpu; } else { delay = 0; handle_trace(td, &t, delay, ios, rw_bs); } } while (1); for (i = 0; i < td->files_index; i++) { f = td->files[i]; trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE); } fifo_free(fifo); close(fd); td_restore_runstate(td, old_state); if (!td->files_index) { log_err("fio: did not find replay device(s)\n"); return 1; } if (skipped_writes) log_err("fio: %s skips replay of %lu writes due to read-only\n", td->o.name, skipped_writes); if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) { log_err("fio: found no ios in blktrace data\n"); return 1; } else if (ios[DDIR_READ] && !ios[DDIR_READ]) { td->o.td_ddir = TD_DDIR_READ; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) { td->o.td_ddir = TD_DDIR_WRITE; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; } else { td->o.td_ddir = TD_DDIR_RW; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; } /* * We need to do direct/raw ios to the device, to avoid getting * read-ahead in our way. */ td->o.odirect = 1; return 0; err: close(fd); fifo_free(fifo); return 1; }
void handle_exception(PyFrameObject *frame, PyObject *exc_info) { set_string(&(arguments[0]->name), "exception"); set_string(&(arguments[0]->type), ((PyTypeObject*) PyTuple_GET_ITEM(exc_info, 0))->tp_name); set_string(&(arguments[0]->value), pyobj_to_cstr(PyTuple_GET_ITEM(exc_info, 1))); handle_trace(frame, RECORD__RECORD_TYPE__EXCEPTION, 1); }
/** * \brief Process the user interface * * This function processes user touch events and updates the display. * * The first registered touch is used to control the cursor. Its presence is * indicated with a red frame around the display. When the cursor touch moves, * its movement is visualized with a trace on the display. * * The second registered touch is used to control mouse buttons, and is * indicated with an inner frame of which the color depends on which side the * touch is for: blue for left and green for right button click. The frame is * shown until the touch is released. * To detect which side click it is, the position relative to the cursor touch * is used, i.e., a touch to the left of the first touch will cause a left * mouse click. * * \param framenumber Current USB frame number * * \note This function should be called every millisecond, e.g., for each USB * frame. */ void ui_process(uint16_t framenumber) { static bool process_traces = false; static bool register_trace = false; static bool left_press = true; static gfx_coord_t old_x; static gfx_coord_t old_y; uint16_t blink_frame; uint8_t new_x_pos; uint8_t new_y_pos; /* Blink LED1 at 1 Hz (toggle every 500 frames) */ blink_frame = framenumber % 1000; if (blink_frame == 0) { LED_On(LED1_GPIO); } else if (blink_frame == 500) { LED_Off(LED1_GPIO); } /* Interleave processing of traces and touch events so they are each * handled every 2 milliseconds. */ if (process_traces) { process_traces = false; /* Is there a new trace to register? */ if (register_trace) { handle_trace(old_x, old_y); /* No new trace, clear an old one */ } else { handle_trace(0, 0); } /* Return now, don't process touches */ return; } /* Process traces in next iteration */ process_traces = true; register_trace = false; /* Process new touch, if it was flagged */ if (ui_new_touch_event) { ui_new_touch_event = false; /* Is event for first finger on display? */ if (ui_touch_event.id == 0) { /* Upon new press, draw red frame */ if (ui_touch_event.status & MXT_PRESS_EVENT) { old_x = ui_touch_event.x; old_y = ui_touch_event.y; draw_frame(0, 2, GFX_COLOR_RED); /* Upon release, erase red frame */ } else if (ui_touch_event.status & MXT_RELEASE_EVENT) { draw_frame(0, 2, COLOR_BACKGROUND); /* Upon move, update mouse position and display trace */ } else if (ui_touch_event.status & MXT_DETECT_EVENT) { /* Scale the relative position down to get a * sensible cursor speed. */ new_x_pos = (ui_touch_event.x - old_x) >> 2; new_y_pos = (ui_touch_event.y - old_y) >> 2; udi_hid_mouse_moveX(new_x_pos); udi_hid_mouse_moveY(new_y_pos); old_x = ui_touch_event.x; old_y = ui_touch_event.y; register_trace = true; } } /* Is event for second finger on display? */ if (ui_touch_event.id == 1) { /* For new press, signal a left or right mouse button * press depending on which side of the cursor it's on */ if (ui_touch_event.status & MXT_PRESS_EVENT) { /* Left click? */ if (ui_touch_event.x <= old_x) { left_press = true; udi_hid_mouse_btnleft(HID_MOUSE_BTN_DOWN); /* Draw a blue inner-frame */ draw_frame(2, 2, GFX_COLOR_BLUE); /* Must otherwise be a right click. */ } else { left_press = false; udi_hid_mouse_btnright(HID_MOUSE_BTN_DOWN); /* Draw a green inner-frame */ draw_frame(2, 2, GFX_COLOR_GREEN); } } /* For release, signal release of the mouse button */ if (ui_touch_event.status & MXT_RELEASE_EVENT) { if (left_press) { udi_hid_mouse_btnleft(HID_MOUSE_BTN_UP); } else { udi_hid_mouse_btnright(HID_MOUSE_BTN_UP); } /* Erase blue/green inner-frame */ draw_frame(2, 2, COLOR_BACKGROUND); } } }
/* * Load a blktrace file by reading all the blk_io_trace entries, and storing * them as io_pieces like the fio text version would do. */ int load_blktrace(struct thread_data *td, const char *filename, int need_swap) { struct blk_io_trace t; unsigned long ios[DDIR_RWDIR_CNT], skipped_writes; unsigned int rw_bs[DDIR_RWDIR_CNT]; struct fifo *fifo; int fd, i, old_state; struct fio_file *f; int this_depth[DDIR_RWDIR_CNT], depth[DDIR_RWDIR_CNT], max_depth; fd = open(filename, O_RDONLY); if (fd < 0) { td_verror(td, errno, "open blktrace file"); return 1; } fifo = fifo_alloc(TRACE_FIFO_SIZE); old_state = td_bump_runstate(td, TD_SETTING_UP); td->o.size = 0; for (i = 0; i < DDIR_RWDIR_CNT; i++) { ios[i] = 0; rw_bs[i] = 0; this_depth[i] = 0; depth[i] = 0; } skipped_writes = 0; do { int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t)); if (ret < 0) goto err; else if (!ret) break; else if (ret < (int) sizeof(t)) { log_err("fio: short fifo get\n"); break; } if (need_swap) byteswap_trace(&t); if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { log_err("fio: bad magic in blktrace data: %x\n", t.magic); goto err; } if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { log_err("fio: bad blktrace version %d\n", t.magic & 0xff); goto err; } ret = discard_pdu(td, fifo, fd, &t); if (ret < 0) { td_verror(td, ret, "blktrace lseek"); goto err; } else if (t.pdu_len != ret) { log_err("fio: discarded %d of %d\n", ret, t.pdu_len); goto err; } if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) { if ((t.action & 0xffff) == __BLK_TA_QUEUE) depth_inc(&t, this_depth); else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) || ((t.action & 0xffff) == __BLK_TA_FRONTMERGE)) depth_dec(&t, this_depth); else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) depth_end(&t, this_depth, depth); if (t_is_write(&t) && read_only) { skipped_writes++; continue; } } handle_trace(td, &t, ios, rw_bs); } while (1); for (i = 0; i < td->files_index; i++) { f = td->files[i]; trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE); } fifo_free(fifo); close(fd); td_restore_runstate(td, old_state); if (!td->files_index) { log_err("fio: did not find replay device(s)\n"); return 1; } /* * For stacked devices, we don't always get a COMPLETE event so * the depth grows to insane values. Limit it to something sane(r). */ max_depth = 0; for (i = 0; i < DDIR_RWDIR_CNT; i++) { if (depth[i] > 1024) depth[i] = 1024; else if (!depth[i] && ios[i]) depth[i] = 1; max_depth = max(depth[i], max_depth); } if (skipped_writes) log_err("fio: %s skips replay of %lu writes due to read-only\n", td->o.name, skipped_writes); if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) { log_err("fio: found no ios in blktrace data\n"); return 1; } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) { td->o.td_ddir = TD_DDIR_READ; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) { td->o.td_ddir = TD_DDIR_WRITE; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; } else { td->o.td_ddir = TD_DDIR_RW; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM]; } /* * We need to do direct/raw ios to the device, to avoid getting * read-ahead in our way. But only do so if the minimum block size * is a multiple of 4k, otherwise we don't know if it's safe to do so. */ if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095)) td->o.odirect = 1; /* * If depth wasn't manually set, use probed depth */ if (!fio_option_is_set(&td->o, iodepth)) td->o.iodepth = td->o.iodepth_low = max_depth; return 0; err: close(fd); fifo_free(fifo); return 1; }