int libsoccr_save(struct libsoccr_sk *sk, struct libsoccr_sk_data *data, unsigned data_size) { struct tcp_info ti; if (!data || data_size < SOCR_DATA_MIN_SIZE) { loge("Invalid input parameters\n"); return -1; } memset(data, 0, data_size); if (refresh_sk(sk, data, &ti)) return -2; if (get_stream_options(sk, data, &ti)) return -3; if (get_window(sk, data)) return -4; sk->flags |= SK_FLAG_FREE_SQ | SK_FLAG_FREE_RQ; if (get_queue(sk->fd, TCP_RECV_QUEUE, &data->inq_seq, data->inq_len, &sk->recv_queue)) return -4; if (get_queue(sk->fd, TCP_SEND_QUEUE, &data->outq_seq, data->outq_len, &sk->send_queue)) return -5; return sizeof(struct libsoccr_sk_data); }
void _run() { boost::shared_ptr< sdl_event_t > e; while ( _cancel == false ) { if ( get_queue() ) { std::cout << "before get event." << std::endl; get_queue()->dequeue( e ); std::cout << "after get event." << std::endl; switch ( e->type() ) { case detail::redraw_event::type: { lock(); _redraw_handler->redraw( wrap_sdl_image( _screen )); unlock(); break; } case detail::key_up_event::type: { lock(); if ( key_up() == true ) { _redraw_handler->redraw( wrap_sdl_image( _screen )); } unlock(); break; } case detail::quit_event::type: { std::cout << "received quit event." << std::endl; quit(); } } } } std::cout << "thread main is done." << std::endl; }
static uint32_t slavio_serial_mem_readb(void *opaque, target_phys_addr_t addr) { SerialState *ser = opaque; ChannelState *s; uint32_t saddr; uint32_t ret; int channel; saddr = (addr & 3) >> 1; channel = (addr & SERIAL_MAXADDR) >> 2; s = &ser->chn[channel]; switch (saddr) { case 0: SER_DPRINTF("Read channel %c, reg[%d] = %2.2x\n", CHN_C(s), s->reg, s->rregs[s->reg]); ret = s->rregs[s->reg]; s->reg = 0; return ret; case 1: s->rregs[0] &= ~1; clr_rxint(s); if (s->type == kbd || s->type == mouse) ret = get_queue(s); else ret = s->rx; SER_DPRINTF("Read channel %c, ch %d\n", CHN_C(s), ret); return ret; default: break; } return 0; }
void thread_init(void) { DPRINTF("\n"); thread_running = thread_create(NULL); assert(get_queue() == thread_running); }
void on_link_opening(proton::event &e) { proton::link& lnk = e.link(); if (lnk.is_sender()) { proton::sender &sender(lnk.sender()); proton::terminus &remote_source(lnk.remote_source()); if (remote_source.is_dynamic()) { std::string address = queue_name(); lnk.source().address(address); queue *q = new queue(true); queues[address] = q; q->subscribe(sender); std::cout << "broker dynamic outgoing link from " << address << std::endl; } else { std::string address = remote_source.address(); if (!address.empty()) { lnk.source().address(address); get_queue(address).subscribe(sender); std::cout << "broker outgoing link from " << address << std::endl; } } } else { std::string address = lnk.remote_target().address(); if (!address.empty()) lnk.target().address(address); std::cout << "broker incoming link to " << address << std::endl; } }
uint32_t rpc_recv(void *out, uint32_t *len_io, RPC_RECV_FLAG_T flags) { CLIENT_THREAD_STATE_T *thread = CLIENT_GET_THREAD_STATE(); uint32_t res = 0; uint32_t len; bool recv_ctrl; if (!len_io) { len_io = &len; } recv_ctrl = flags & (RPC_RECV_FLAG_RES | RPC_RECV_FLAG_CTRL | RPC_RECV_FLAG_LEN); /* do we want to receive anything in the control channel at all? */ assert(recv_ctrl || (flags & RPC_RECV_FLAG_BULK)); /* must receive something... */ assert(!(flags & RPC_RECV_FLAG_CTRL) || !(flags & RPC_RECV_FLAG_BULK)); /* can't receive user data over both bulk and control... */ if (recv_ctrl || len_io[0]) { /* do nothing if we're just receiving bulk of length 0 */ merge_flush(thread); if (recv_ctrl) { VCHIQ_HEADER_T *header = vchiu_queue_pop(get_queue(thread)); uint32_t *ctrl = (uint32_t *)header->data; assert(header->size == rpc_pad_ctrl(header->size)); if (flags & RPC_RECV_FLAG_LEN) { len_io[0] = *(ctrl++); } if (flags & RPC_RECV_FLAG_RES) { res = *(ctrl++); } if (flags & RPC_RECV_FLAG_CTRL) { memcpy(out, ctrl, len_io[0]); ctrl += rpc_pad_ctrl(len_io[0]) >> 2; } assert((uint8_t *)ctrl == ((uint8_t *)header->data + header->size)); vchiq_release_message(get_handle(thread), header); }
int create_queue(struct ast_json* j_queue) { int ret; char* uuid; struct ast_json* j_tmp; if(j_queue == NULL) { return false; } j_tmp = ast_json_deep_copy(j_queue); uuid = gen_uuid(); ast_json_object_set(j_tmp, "uuid", ast_json_string_create(uuid)); ast_log(LOG_NOTICE, "Create queue. uuid[%s], name[%s]\n", ast_json_string_get(ast_json_object_get(j_tmp, "uuid")), ast_json_string_get(ast_json_object_get(j_tmp, "name")) ); ret = db_insert("queue", j_tmp); AST_JSON_UNREF(j_tmp); if(ret == false) { ast_free(uuid); return false; } // send ami event j_tmp = get_queue(uuid); send_manager_evt_out_queue_create(j_tmp); AST_JSON_UNREF(j_tmp); return true; }
void *consume(void *arg) { int data; while(1){ data = (int)(get_queue(&queue)); } }
void clear(void) { assert(total > 0); for(size_t i(0); i < queues.size(); ++i) get_queue(i).clear(); total = 0; }
int main(int argc, char *argv[]) { assert(argc == 2); op_time = strtol(argv[1], NULL, 10); assert(op_time >= 0); debug("Hello client! Got %ld.\n", op_time); control_queue = get_queue(CONTROL_KEY); clients_server_queue = get_queue(CLIENTS_SERVER_KEY); server_clients_queue = get_queue(SERVER_CLIENTS_KEY); pid = getpid(); debug("My pid is %ld.\n", pid); Mesg msg; msg.mesg_type = pid; queue_send(control_queue, (char *) &msg); char buffer[500]; while (fgets(buffer, sizeof buffer, stdin) != NULL) { char op; int n; const char *p = buffer; sscanf(p, "%c%n", &op, &n); p += n; switch (op) { case 'r': op_read(p); break; case 'w': op_write(p); break; case 's': op_sum(p); break; case 'x': op_swap(p); break; } } msg.op = QUIT; queue_send(clients_server_queue, (char *) &msg); return 0; }
void queue_buffer::clear_queue(const process_id id) { assert(id < buffered_work.size()); queue& q(get_queue(id)); const size_t size(q.size()); q.clear(); total -= size; }
/* 向位于虚拟机栈位置一的包队列中插入一个不完整的包. 并返回这个包的地址, 此时包中还没有内容. * 插入的位置遵照 fd 的哈希值获得. * * 参数: L 是 Lua 虚拟机栈; fd 是套接字 id; * 返回: 新生成的不完整包对象 */ static struct uncomplete * save_uncomplete(lua_State *L, int fd) { struct queue *q = get_queue(L); int h = hash_fd(fd); struct uncomplete * uc = skynet_malloc(sizeof(struct uncomplete)); memset(uc, 0, sizeof(*uc)); uc->next = q->hash[h]; uc->pack.id = fd; q->hash[h] = uc; return uc; }
void * execute( void * data ) { while( 1 ) { pthread_mutex_lock( &mutex ); if( ! empty_chain_queue( &head ) ) { char buf[100]; get_queue( &head, buf ); printf("buf = %s\n", buf ); } pthread_mutex_unlock( &mutex ); } }
void thread_yield(void) { thread_t *next; DPRINTF("\n"); rotate_queue(); // LIST_DUMP(&runq); next = get_queue(); DPRINTF("current:%p next:%p\n", thread_running, next); if(thread_running != next) thread_switch(next); }
bool queue_buffer::push(const process_id id, work& work) { assert(id < buffered_work.size()); queue& q(get_queue(id)); q.push(work, work.get_strat_level()); ++total; return q.size() > THREADS_GLOBAL_WORK_FLUSH; }
static void recv_bulk(CLIENT_THREAD_STATE_T *thread, void *out, uint32_t len) { if (len <= CTRL_THRESHOLD) { VCHIQ_HEADER_T *header = vchiu_queue_pop(get_queue(thread)); assert(header->size == len); memcpy(out, header->data, len); vchiq_release_message(get_handle(thread), header); } else { VCHIQ_STATUS_T vchiq_status = vchiq_queue_bulk_receive(get_handle(thread), out, rpc_pad_bulk(len), NULL); assert(vchiq_status == VCHIQ_SUCCESS); VCOS_STATUS_T vcos_status = vcos_event_wait(&bulk_event); assert(vcos_status == VCOS_SUCCESS); } }
static void incr_active_cnt(h2o_http2_scheduler_node_t *node) { h2o_http2_scheduler_openref_t *ref; /* do nothing if node is the root */ if (node->_parent == NULL) return; ref = (h2o_http2_scheduler_openref_t *)node; if (++ref->_active_cnt != 1) return; /* just changed to active */ queue_set(get_queue(ref->node._parent), &ref->_queue_node, ref->weight); /* delegate the change towards root */ incr_active_cnt(ref->node._parent); }
static void do_rebind(h2o_http2_scheduler_openref_t *ref, h2o_http2_scheduler_node_t *new_parent, int exclusive) { /* rebind _all_link */ h2o_linklist_unlink(&ref->_all_link); h2o_linklist_insert(&new_parent->_all_refs, &ref->_all_link); /* rebind to WRR (as well as adjust active_cnt) */ if (ref->_active_cnt != 0) { queue_unset(&ref->_queue_node); queue_set(get_queue(new_parent), &ref->_queue_node, ref->weight); decr_active_cnt(ref->node._parent); incr_active_cnt(new_parent); } /* update the backlinks */ ref->node._parent = new_parent; if (exclusive) convert_to_exclusive(new_parent, ref); }
/* 向位于虚拟机栈位置一的包队列中插入一个完整的数据包. 当队列不够容纳此包时将扩张队列. clone 表示是否需要复制消息. * 参数: L 是 Lua 虚拟机栈; fd 是套接字 id; buffer 是数据缓冲; size 是数据的大小; clone 表示是否需要复制消息. */ static void push_data(lua_State *L, int fd, void *buffer, int size, int clone) { if (clone) { void * tmp = skynet_malloc(size); memcpy(tmp, buffer, size); buffer = tmp; } struct queue *q = get_queue(L); struct netpack *np = &q->queue[q->tail]; if (++q->tail >= q->cap) q->tail -= q->cap; np->id = fd; np->buffer = buffer; np->size = size; if (q->head == q->tail) { expand_queue(L, q); } }
/* * edict_get - convenience function for creating an edict */ edict_t * edict_get(bool forget) { edict_t *edict; edict = (edict_t *)Malloc(sizeof(edict_t)); bzero(edict, sizeof(edict_t)); /* reserve a message queue, if results are wanted */ if (false == forget) edict->resultmq = get_queue(); else edict->resultmq = -1; pthread_mutex_init(&edict->reference.mx, NULL); edict->reference.count = 1; return edict; }
// Runs the appropriate action for each queued event void event_process(bool deferred) { Event event; while (kl_shift(Event, get_queue(deferred), &event) == 0) { switch (event.type) { case kEventSignal: signal_handle(event); break; case kEventRStreamData: rstream_read_event(event); break; case kEventJobExit: job_exit_event(event); break; default: abort(); } } }
thread_pool_t * create_thread_pool(const char *name, int (*routine) (thread_pool_t *, thread_ctx_t *, edict_t *), pool_limits_t *limits, void *arg) { thread_pool_t *pool; pthread_mutex_t *pool_mx; pool_ctx_t *pool_ctx; int ret; /* init */ pool = (thread_pool_t *)Malloc(sizeof(thread_pool_t)); pool->work_queue_id = get_queue(); if (pool->work_queue_id < 0) { Free(pool); return NULL; } pool->arg = arg; pool->name = name; pool_mx = (pthread_mutex_t *) Malloc(sizeof(pthread_mutex_t)); ret = pthread_mutex_init(pool_mx, NULL); if (ret) daemon_fatal("pthread_mutex_init"); pool_ctx = (pool_ctx_t *)Malloc(sizeof(pool_ctx_t)); pool_ctx->mx = pool_mx; pool_ctx->routine = routine; pool_ctx->info = pool; pool_ctx->count_thread = 0; pool_ctx->count_idle = 0; pool_ctx->ewma_idle = 0; pool_ctx->max_thread = limits ? limits->max_thread : 0; pool_ctx->watchdog_time = limits ? limits->watchdog_time : 0; /* watchdog timer, 0 is disabled */ pool_ctx->wdlist = NULL; /* start the first thread */ create_thread(NULL, DETACH, &thread_pool, pool_ctx); return pool; }
int update_queue(struct ast_json* j_queue) { char* tmp; char* sql; struct ast_json* j_tmp; int ret; const char* uuid; uuid = ast_json_string_get(ast_json_object_get(j_queue, "uuid")); if(uuid == NULL) { ast_log(LOG_WARNING, "Could not get uuid.\n"); return false; } tmp = db_get_update_str(j_queue); if(tmp == NULL) { ast_log(LOG_WARNING, "Could not get update str.\n"); return false; } ast_asprintf(&sql, "update queue set %s where in_use=%d and uuid=\"%s\";", tmp, E_DL_USE_OK, uuid); ast_free(tmp); ret = db_exec(sql); ast_free(sql); if(ret == false) { ast_log(LOG_WARNING, "Could not update queue info. uuid[%s]\n", uuid); return false; } j_tmp = get_queue(uuid); if(j_tmp == NULL) { ast_log(LOG_WARNING, "Could not get update queue info. uuid[%s]\n", uuid); return false; } send_manager_evt_out_queue_update(j_tmp); AST_JSON_UNREF(j_tmp); return true; }
void *order_thread_function(void *arg) { FILE* orders = (FILE*) arg; printf("order thread started\n"); while(1){ struct order_info* current = read_order(orders); if(current == NULL){ printf("exiting orders thread\n"); time_to_exit = 1; pthread_exit("exited order thread"); } struct order_queue* cat_queue = get_queue(current->category); <<<<<<< HEAD int wait = 0; =======
CAMLprim value win_wait (value timeout, value event_count) { CAMLparam2(timeout, event_count); DWORD t, t2; DWORD res; long ret, n = Long_val(event_count); t = Long_val(timeout); if (t < 0) t = INFINITE; t2 = (compN > 0) ? 0 : t; D(printf("Waiting: %ld events, timeout %ldms -> %ldms\n", n, t, t2)); res = (n > 0) ? WaitForMultipleObjectsEx(n, events, FALSE, t, TRUE) : WaitForMultipleObjectsEx(1, &dummyEvent, FALSE, t, TRUE); D(printf("Done waiting\n")); if ((t != t2) && (res == WAIT_TIMEOUT)) res = WAIT_IO_COMPLETION; switch (res) { case WAIT_TIMEOUT: D(printf("Timeout\n")); ret = -1; break; case WAIT_IO_COMPLETION: D(printf("I/O completion\n")); ret = -2; break; case WAIT_FAILED: D(printf("Wait failed\n")); ret = 0; win32_maperr (GetLastError ()); uerror("WaitForMultipleObjectsEx", Nothing); break; default: ret = res; D(printf("Event: %ld\n", res)); break; } get_queue (Val_unit); CAMLreturn (Val_long(ret)); }
/// 将数据压入 queue.queue 中 static void push_data(lua_State *L, int fd, void *buffer, int size, int clone) { // 如果需要复制, 则分配新的内存空间, 复制数据 if (clone) { void * tmp = skynet_malloc(size); memcpy(tmp, buffer, size); buffer = tmp; } // 将数据压入到 queue.queue 中 struct queue *q = get_queue(L); struct netpack *np = &q->queue[q->tail]; if (++q->tail >= q->cap) q->tail -= q->cap; np->id = fd; np->buffer = buffer; np->size = size; // 扩展队列空间 if (q->head == q->tail) { expand_queue(L, q); } }
// Runs the appropriate action for each queued event bool event_process(bool deferred) { bool processed_events = false; Event event; while (kl_shift(Event, get_queue(deferred), &event) == 0) { processed_events = true; switch (event.type) { case kEventSignal: signal_handle(event); break; case kEventRStreamData: rstream_read_event(event); break; case kEventJobExit: job_exit_event(event); break; default: abort(); } } return processed_events; }
// Wait for some event bool event_poll(int32_t ms) { uv_run_mode run_mode = UV_RUN_ONCE; if (input_ready()) { // If there's a pending input event to be consumed, do it now return true; } static int recursive = 0; if (!(recursive++)) { // Only needs to start the libuv handle the first time we enter here input_start(); } uv_timer_t timer; uv_prepare_t timer_prepare; TimerData timer_data = {.ms = ms, .timed_out = false, .timer = &timer}; if (ms > 0) { uv_timer_init(uv_default_loop(), &timer); // This prepare handle that actually starts the timer uv_prepare_init(uv_default_loop(), &timer_prepare); // Timeout passed as argument to the timer timer.data = &timer_data; // We only start the timer after the loop is running, for that we // use a prepare handle(pass the interval as data to it) timer_prepare.data = &timer_data; uv_prepare_start(&timer_prepare, timer_prepare_cb); } else if (ms == 0) { // For ms == 0, we need to do a non-blocking event poll by // setting the run mode to UV_RUN_NOWAIT. run_mode = UV_RUN_NOWAIT; } do { // Run one event loop iteration, blocking for events if run_mode is // UV_RUN_ONCE uv_run(uv_default_loop(), run_mode); // Process immediate events outside uv_run since libuv event loop not // support recursion(processing events may cause a recursive event_poll // call) event_process(false); } while ( // Continue running if ... !input_ready() && // we have no input !event_has_deferred() && // no events are waiting to be processed run_mode != UV_RUN_NOWAIT && // ms != 0 !timer_data.timed_out); // we didn't get a timeout if (!(--recursive)) { // Again, only stop when we leave the top-level invocation input_stop(); } if (ms > 0) { // Ensure the timer-related handles are closed and run the event loop // once more to let libuv perform it's cleanup uv_close((uv_handle_t *)&timer, NULL); uv_close((uv_handle_t *)&timer_prepare, NULL); uv_run(uv_default_loop(), UV_RUN_NOWAIT); event_process(false); } return input_ready() || event_has_deferred(); } bool event_has_deferred() { return !kl_empty(get_queue(true)); } // Push an event to the queue void event_push(Event event, bool deferred) { *kl_pushp(Event, get_queue(deferred)) = event; }
void thread_finalize(void) { thread_destroy(thread_running); thread_running = get_queue(); }
int main(int argc, char **argv) { thread_info_t threads[THREADS]; int balls[BALLS]; int queues[QUEUES]; queuepair_t qpairs[QUEUEPAIRS]; int *counter; int ret; int i; int *exitvalue; int sum = 0; gross_ctx_t myctx = { 0x00 }; /* dummy context */ ctx = &myctx; printf("Check: msgqueue\n"); printf(" Creating %d message queues...", QUEUES); fflush(stdout); for (i=0; i < QUEUES; i++) queues[i] = get_queue(); printf(" Done.\n"); printf(" Making %d circular queue pairs...", QUEUEPAIRS); for (i=0; i < QUEUEPAIRS; i++) { qpairs[i].inq = queues[i % QUEUES]; qpairs[i].outq = queues[(i + 1) % QUEUES]; } printf(" Done.\n"); printf(" Creating %d threads to test the message queues...", THREADS); fflush(stdout); /* start the threads */ for (i=0; i < THREADS; i++) create_thread(&threads[i], 0, &msgqueueping, &qpairs[i % QUEUEPAIRS]); printf(" Done.\n"); printf(" Sending out %d chain letters...", BALLS); fflush(stdout); /* serve ping pong balls */ for (i=0; i < BALLS; i++) { balls[i] = 0; counter = &balls[i]; put_msg(queues[i % QUEUES], &counter, sizeof(int *)); } printf(" Done.\n"); printf(" Waiting for the results..."); fflush(stdout); for (i=0; i < THREADS; i++) { ret = pthread_join(*threads[i].thread, (void **)&exitvalue); if (ret == 0) { if (*exitvalue != 0) { printf(" Thread returned %d (!= 0)\n", *exitvalue); return 1; } } else { perror("pthread_join:"); return 2; } Free(threads[i].thread); Free(exitvalue); } printf(" Done.\n"); for (i=0; i < BALLS; i++) sum += balls[i]; if (sum != LOOPSIZE * THREADS) return 3; else return 0; }