void socket_stub_wait_closed() { uint32_t mask; mask = 0x1; event_read(&closed_events, &mask, sizeof(mask)); }
void mouseb_event_poll(void) { unsigned i; int type, code, value; log_debug(("mouseb:event: mouseb_event_poll()\n")); for(i=0;i<event_state.mac;++i) { struct mouse_item_context* item = event_state.map + i; while (event_read(item->f, &type, &code, &value) == 0) { if (type == EV_KEY) { unsigned j; for(j=0;j<item->button_mac;++j) { if (code == item->button_map[j].code) { item->button_map[j].state = value != 0; break; } } } else if (type == EV_REL) { unsigned j; for(j=0;j<item->axe_mac;++j) { if (code == item->axe_map[j].code) { item->axe_map[j].value += value; break; } } } } } }
static struct http_server_connection_t * allocate_connection(struct http_server_t *self_p) { uint32_t mask; struct http_server_connection_t *connection_p; while (1) { sys_lock(); connection_p = self_p->connections_p; while (connection_p->thrd.stack.buf_p != NULL) { if (connection_p->state == http_server_connection_state_free_t) { connection_p->state = http_server_connection_state_allocated_t; break; } connection_p++; } sys_unlock(); /* Connection available. */ if (connection_p->thrd.name_p != NULL) { break; } mask = 0x1; event_read(&self_p->events, &mask, sizeof(mask)); } return (connection_p); }
/* * Tests that the L3 bank handling is correct. We fixed it in commit e9aaac1. */ static int l3_bank_test(void) { struct event event; char *p; int i; p = malloc(MALLOC_SIZE); FAIL_IF(!p); event_init(&event, 0x84918F); FAIL_IF(event_open(&event)); for (i = 0; i < MALLOC_SIZE; i += 0x10000) p[i] = i; event_read(&event); event_report(&event); FAIL_IF(event.result.running == 0); FAIL_IF(event.result.enabled == 0); event_close(&event); free(p); return 0; }
/** * The connection thread serves a client for the duration of the * socket lifetime. */ static void *connection_main(void *arg_p) { struct http_server_connection_t *connection_p = arg_p; struct http_server_t *self_p = connection_p->self_p; uint32_t mask; /* thrd_init_env(buf, sizeof(buf)); */ /* thrd_set_env("CWD", self_p->root_path_p); */ thrd_set_name(connection_p->thrd.name_p); /* Wait for a connection from the listener. */ while (1) { log_object_print(NULL, LOG_DEBUG, FSTR("Connection thread '%s' waiting for a new connection.\r\n"), thrd_get_name()); mask = 0x1; event_read(&connection_p->events, &mask, sizeof(mask)); if (mask & 0x1) { handle_request(self_p, connection_p); socket_close(&connection_p->socket); /* Add thread to the free list. */ sys_lock(); connection_p->state = http_server_connection_state_free_t; sys_unlock(); mask = 0x1; event_write(&self_p->events, &mask, sizeof(mask)); } } return (NULL); }
void joystickb_event_poll(void) { unsigned i; int type, code, value; log_debug(("joystickb:event: joystickb_event_poll()\n")); ++event_state.counter; for(i=0;i<event_state.mac;++i) { struct joystick_item_context* item = event_state.map + i; while (event_read(item->f, &type, &code, &value) == 0) { if (type == EV_KEY) { unsigned j; for(j=0;j<item->button_mac;++j) { if (code == item->button_map[j].code) { item->button_map[j].state = value != 0; break; } } #ifdef USE_ACTLABS_HACK /* recogize the special button and enable the hack */ if (item->vendor_id == ACTLABS_VENDOR && (item->device_id == ACTLABS_DEVICE_1 || item->device_id == ACTLABS_DEVICE_2) && code == ACTLABS_BUTTON) { if (value) { item->actlabs_hack_enable = 1; item->actlabs_hack_counter = event_state.counter; } else { item->actlabs_hack_enable = 0; } } #endif } else if (type == EV_REL) { unsigned j; for(j=0;j<item->rel_mac;++j) { if (code == item->rel_map[j].code) { item->rel_map[j].value += value; break; } } } else if (type == EV_ABS) { unsigned j; for(j=0;j<item->stick_mac;++j) { unsigned k; struct joystick_stick_context* stick = item->stick_map + j; for(k=0;k<stick->axe_mac;++k) { struct joystick_axe_context* axe = stick->axe_map + k; if (code == axe->code) joystickb_event_axe_set(axe, value); } } } } } }
VkResult intel_event_get_status(struct intel_event *event) { VkResult ret; uint32_t val; ret = event_read(event, &val); if (ret != VK_SUCCESS) return ret; return (val) ? VK_EVENT_SET : VK_EVENT_RESET; }
int cpu_event_pinned_vs_ebb(void) { union pipe read_pipe, write_pipe; struct event event; int cpu, rc; pid_t pid; SKIP_IF(!ebb_is_supported()); cpu = pick_online_cpu(); FAIL_IF(cpu < 0); FAIL_IF(bind_to_cpu(cpu)); FAIL_IF(pipe(read_pipe.fds) == -1); FAIL_IF(pipe(write_pipe.fds) == -1); pid = fork(); if (pid == 0) { /* NB order of pipes looks reversed */ exit(ebb_child(write_pipe, read_pipe)); } /* We setup the cpu event first */ rc = setup_cpu_event(&event, cpu); if (rc) { kill_child_and_wait(pid); return rc; } /* Signal the child to install its EBB event and wait */ if (sync_with_child(read_pipe, write_pipe)) /* If it fails, wait for it to exit */ goto wait; /* Signal the child to run */ FAIL_IF(sync_with_child(read_pipe, write_pipe)); wait: /* We expect it to fail to read the event */ FAIL_IF(wait_for_child(pid) != 2); FAIL_IF(event_disable(&event)); FAIL_IF(event_read(&event)); event_report(&event); /* The cpu event should have run */ FAIL_IF(event.result.value == 0); FAIL_IF(event.result.enabled != event.result.running); return 0; }
int socket_accept(struct socket_t *self_p, struct socket_t *accepted_p, struct inet_addr_t *addr_p) { uint32_t mask; chan_init(&accepted_p->base, read, write, size); mask = 0x1; event_read(&accept_events, &mask, sizeof(mask)); return (0); }
int perf_reader_poll(int num_readers, struct perf_reader **readers, int timeout) { struct pollfd pfds[] = { {readers[0]->fd, POLLIN}, }; if (poll(pfds, num_readers, timeout) > 0) { int i; for (i = 0; i < num_readers; ++i) { if (pfds[i].revents & POLLIN) event_read(readers[i]); } } return 0; }
static int child(void) { /* Even though we have EBE=0 we can still see the EBB regs */ FAIL_IF(mfspr(SPRN_BESCR) != 0); FAIL_IF(mfspr(SPRN_EBBHR) != 0); FAIL_IF(mfspr(SPRN_EBBRR) != 0); FAIL_IF(catch_sigill(write_pmc1)); /* We can still read from the event, though it is on our parent */ FAIL_IF(event_read(&event)); return 0; }
/** * def read(self, mask) */ static mp_obj_t class_event_read(mp_obj_t self_in, mp_obj_t mask_in) { struct class_event_t *self_p; uint32_t mask; self_p = MP_OBJ_TO_PTR(self_in); mask = mp_obj_get_int(mask_in); if (event_read(&self_p->event, &mask, sizeof(mask)) != sizeof(mask)) { nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "failed to read event mask")); } return (MP_OBJ_NEW_SMALL_INT(mask)); }
int task_event_vs_ebb(void) { union pipe read_pipe, write_pipe; struct event event; pid_t pid; int rc; SKIP_IF(!ebb_is_supported()); FAIL_IF(pipe(read_pipe.fds) == -1); FAIL_IF(pipe(write_pipe.fds) == -1); pid = fork(); if (pid == 0) { /* NB order of pipes looks reversed */ exit(ebb_child(write_pipe, read_pipe)); } /* We setup the task event first */ rc = setup_child_event(&event, pid); if (rc) { kill_child_and_wait(pid); return rc; } /* Signal the child to install its EBB event and wait */ if (sync_with_child(read_pipe, write_pipe)) /* If it fails, wait for it to exit */ goto wait; /* Signal the child to run */ FAIL_IF(sync_with_child(read_pipe, write_pipe)); wait: /* The EBB event should push the task event off so the child should succeed */ FAIL_IF(wait_for_child(pid)); FAIL_IF(event_disable(&event)); FAIL_IF(event_read(&event)); event_report(&event); /* The task event may have run, or not so we can't assert anything about it */ return 0; }
/** * The main thread of the music player. Handles events and coverts * samples to play a song. */ static void *music_player_main(struct music_player_t *self_p) { uint32_t mask; struct time_t timeout; thrd_set_name("music_player"); /* Start the periodic fill timer. */ timeout.seconds = 0; timeout.nanoseconds = 10000000; timer_init(&self_p->timer, &timeout, (void (*)(void *))fill_timer_cb, self_p, TIMER_PERIODIC); timer_start(&self_p->timer); /* Start the main loop of the music player. */ while (1) { mask = (EVENT_PLAY | EVENT_PAUSE | EVENT_STOP | EVENT_TIMEOUT); event_read(&self_p->event, &mask, sizeof(mask)); if (mask & EVENT_STOP) { handle_event_stop(self_p); } if (mask & EVENT_PAUSE) { handle_event_pause(self_p); } if (mask & EVENT_PLAY) { handle_event_play(self_p); } /* Play if the state in playing, eyy! */ if (self_p->state == STATE_PLAYING) { play_chunk(self_p); } } return (NULL); }
int main() { uint32_t mask; init(); /* Spawn the shell. */ shell_args.chin_p = &uart.chin; shell_args.chout_p = &uart.chout; shell_args.username_p = NULL; shell_args.password_p = NULL; thrd_spawn(shell_entry, &shell_args, 0, shell_stack, sizeof(shell_stack)); while (1) { mask = (EVENT_BUTTON_PLAY | EVENT_BUTTON_NEXT | EVENT_BUTTON_PREV | EVENT_BUTTON_STOP); event_read(&event, &mask, sizeof(mask)); if (mask & EVENT_BUTTON_PLAY) { handle_event_play(); } if (mask & EVENT_BUTTON_NEXT) { handle_event_next(); } if (mask & EVENT_BUTTON_PREV) { handle_event_prev(); } if (mask & EVENT_BUTTON_STOP) { handle_event_stop(); } } return (0); }
int main() { uint32_t mask; init(); /* Spawn the shell. */ shell_init(&shell, &uart.chin, &uart.chout, NULL, NULL, NULL, NULL); thrd_spawn(shell_main, &shell, 0, shell_stack, sizeof(shell_stack)); while (1) { mask = (EVENT_BUTTON_PLAY | EVENT_BUTTON_NEXT | EVENT_BUTTON_PREV | EVENT_BUTTON_STOP); event_read(&event, &mask, sizeof(mask)); if (mask & EVENT_BUTTON_PLAY) { handle_event_play(); } if (mask & EVENT_BUTTON_NEXT) { handle_event_next(); } if (mask & EVENT_BUTTON_PREV) { handle_event_prev(); } if (mask & EVENT_BUTTON_STOP) { handle_event_stop(); } } return (0); }
int main() { uint32_t mask; struct time_t timeout; sys_start(); event_init(&event); /* Initialize and start a periodic timer. */ timeout.seconds = 1; timeout.nanoseconds = 0; timer_init(&timer, &timeout, timer_cb, NULL, TIMER_PERIODIC); timer_start(&timer); while (1) { mask = TIMEOUT_EVENT; event_read(&event, &mask, sizeof(mask)); std_printf(FSTR("timeout\r\n")); } return (0); }
// trace_add? // var8, vara, varc, vare, var10 void trace_add(u16 op, FUNC *table, u8 *log_data, u16 table_offset, u16 result) { AGI_EVENT *temp4 = 0; LOGIC *logic_orig = 0; // orig logic_cur u8 *msg; //table += op<<2; table += op; push_row_col(); text_attrib_push(); text_colour(0, 0xF); trace_scroll(); if (logic_called != 0) { logic_called = 0; agi_printf("=========================="); trace_scroll(); } logic_orig = logic_cur; if ((trace_logic==0) || ((logic_cur=logic_list_find(trace_logic)) == 0)) { agi_printf("%d: cmd.%d", logic_orig->num, op); } else { if (op == 0) agi_printf("%d: %s", logic_orig->num, "return"); else { msg = logic_msg(op + table_offset); if (msg != 0) agi_printf("%d: %s", logic_orig->num, logic_msg(op + table_offset)); else { if (result != 0xFFFF) agi_printf("%d: eval.%d", logic_orig->num, op); else agi_printf("%d: cmd.%d", logic_orig->num, op); } } } logic_cur = logic_orig; // print function name? trace_var_print(table, log_data); if (result != 0xFFFF) { goto_row_col(trace_bottom, trace_right-2); if (result == 0) agi_printf(" :%c", 'F'); else agi_printf(" :%c", 'T'); } ch_update(); while (trace_state != 0) { temp4 = event_read(); if (temp4 != 0) if (temp4->type == 1) break; } if (temp4 != 0) if (temp4->data == '+') trace_state = 2; pop_row_col(); text_attrib_pop(); ch_update(); }
static int per_event_excludes(void) { struct event *e, events[4]; char *platform; int i; platform = (char *)get_auxv_entry(AT_BASE_PLATFORM); FAIL_IF(!platform); SKIP_IF(strcmp(platform, "power8") != 0); /* * We need to create the events disabled, otherwise the running/enabled * counts don't match up. */ e = &events[0]; event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions"); e->attr.disabled = 1; e = &events[1]; event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions(k)"); e->attr.disabled = 1; e->attr.exclude_user = 1; e->attr.exclude_hv = 1; e = &events[2]; event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions(h)"); e->attr.disabled = 1; e->attr.exclude_user = 1; e->attr.exclude_kernel = 1; e = &events[3]; event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions(u)"); e->attr.disabled = 1; e->attr.exclude_hv = 1; e->attr.exclude_kernel = 1; FAIL_IF(event_open(&events[0])); /* * The open here will fail if we don't have per event exclude support, * because the second event has an incompatible set of exclude settings * and we're asking for the events to be in a group. */ for (i = 1; i < 4; i++) FAIL_IF(event_open_with_group(&events[i], events[0].fd)); /* * Even though the above will fail without per-event excludes we keep * testing in order to be thorough. */ prctl(PR_TASK_PERF_EVENTS_ENABLE); /* Spin for a while */ for (i = 0; i < INT_MAX; i++) asm volatile("" : : : "memory"); prctl(PR_TASK_PERF_EVENTS_DISABLE); for (i = 0; i < 4; i++) { FAIL_IF(event_read(&events[i])); event_report(&events[i]); } /* * We should see that all events have enabled == running. That * shows that they were all on the PMU at once. */ for (i = 0; i < 4; i++) FAIL_IF(events[i].result.running != events[i].result.enabled); /* * We can also check that the result for instructions is >= all the * other counts. That's because it is counting all instructions while * the others are counting a subset. */ for (i = 1; i < 4; i++) FAIL_IF(events[0].result.value < events[i].result.value); for (i = 0; i < 4; i++) event_close(&events[i]); return 0; }