static int actor_msg_handler(qamsg_header_t *header, qactor_t *actor) { qactor_msg_t *actor_msg; qamsg_t *msg; lua_State *state; msg = (qamsg_t*)header; actor_msg = &(msg->actor_msg); state = actor->state; /* * if the state yield waiting for msg, push the msg * into stack and resume */ if (lua_status(state) == LUA_YIELD && actor->waiting_msg) { actor->waiting_msg = 0; lua_newtable(state); qlua_dump_dict(state, actor_msg->arg_dict); qdict_free(actor_msg->arg_dict); qlua_resume(state, 1); } else { /* else add the msg to the actor msg list */ qlist_add_tail(&actor_msg->entry, &(actor->msg_list)); } qfree(msg); return QOK; }
void fcfs_add ( model_net_request * req, const mn_sched_params * sched_params, int remote_event_size, void * remote_event, int local_event_size, void * local_event, void * sched, model_net_sched_rc * rc, tw_lp * lp){ mn_sched_qitem *q = malloc(sizeof(mn_sched_qitem)); q->entry_time = tw_now(lp); q->req = *req; q->sched_params = *sched_params; q->rem = req->is_pull ? PULL_MSG_SIZE : req->msg_size; if (remote_event_size > 0){ q->remote_event = malloc(remote_event_size); memcpy(q->remote_event, remote_event, remote_event_size); } else { q->remote_event = NULL; } if (local_event_size > 0){ q->local_event = malloc(local_event_size); memcpy(q->local_event, local_event, local_event_size); } else { q->local_event = NULL; } mn_sched_queue *s = sched; s->queue_len++; qlist_add_tail(&q->ql, &s->reqs); dprintf("%lu (mn): adding %srequest from %lu to %lu, size %lu, at %lf\n", lp->gid, req->is_pull ? "pull " : "", req->src_lp, req->final_dest_lp, req->msg_size, tw_now(lp)); }
void dbpf_op_queue_add(dbpf_op_queue_p op_queue, dbpf_queued_op_t *dbpf_op) { gossip_debug(GOSSIP_DBPF_COALESCE_DEBUG, "op_queue add: %p\n", dbpf_op); qlist_add_tail(&dbpf_op->link, op_queue); }
static void handle_rev_io_sched_compl( lsm_state_t *ns, tw_bf *b, lsm_message_t *m_in, tw_lp *lp) { if (LSM_DEBUG) printf("handle_rev_io_sched_compl called\n"); if (ns->sched.active_count) { lsm_sched_op_t *prev = rc_stack_pop(ns->sched.freelist); handle_rev_io_request(ns, b, &prev->data, m_in, lp); qlist_add_tail(&prev->ql, &ns->sched.queues[m_in->prio]); } ns->sched.active_count++; }
int rr_next( tw_stime * poffset, void * sched, void * rc_event_save, model_net_sched_rc * rc, tw_lp * lp){ int ret = fcfs_next(poffset, sched, rc_event_save, rc, lp); // if error in fcfs or the request was finished & removed, then nothing to // do here if (ret == -1 || ret == 1) return ret; // otherwise request was successful, still in the queue else { mn_sched_queue *s = sched; qlist_add_tail(qlist_pop(&s->reqs), &s->reqs); return ret; } }
static void handle_io_sched_new( lsm_state_t *ns, tw_bf *b, lsm_message_t *m_in, tw_lp *lp) { if (LSM_DEBUG) printf("handle_io_sched_new called\n"); // if nothing else is going on, then issue directly if (!ns->sched.active_count) handle_io_request(ns, b, &m_in->data, m_in, lp); else { lsm_sched_op_t *op = malloc(sizeof(*op)); op->data = m_in->data; qlist_add_tail(&op->ql, &ns->sched.queues[m_in->prio]); } ns->sched.active_count++; }
static int kqueue_poll(qengine_t *engine, int timeout_ms) { qkqueue_t *kqueue; int num, ret; kqueue = engine->data; num = 0; if (timeout_ms > 0) { struct timespec timeout; timeout.tv_sec = timeout_ms / 1000; timeout.tv_nsec = (timeout_ms - 1000 * timeout.tv_sec) * 1000; ret = kevent(kqueue->kqfd, NULL, 0, kqueue->events, engine->size, &timeout); } else { ret = kevent(kqueue->kqfd, NULL, 0, kqueue->events, engine->size, NULL); } if (ret > 0) { int i; num = ret; for (i = 0; i < num; ++i) { int flags = 0; int fd = -1; struct kevent *event = NULL; qevent_t *ev = NULL; event = kqueue->events + i; fd = event->ident; ev = engine->events[fd]; if (event->filter & EVFILT_READ) { flags |= QEVENT_READ; } if (event->filter & EVFILT_WRITE) { flags |= QEVENT_WRITE; } ev->flags = flags; qlist_add_tail(&ev->active_entry, &engine->active); } } return num; }
static int queues_queue_add(struct PINT_manager_s *manager, PINT_worker_inst * inst, PINT_queue_id queue_id) { struct PINT_worker_queues_s *w; struct PINT_queue_s *queue; w = &inst->queues; queue = id_gen_fast_lookup(queue_id); assert(queue); gen_mutex_lock(&w->mutex); qlist_add_tail(&queue->link, &w->queues); PINT_queue_add_producer(queue_id, w); PINT_queue_add_consumer(queue_id, w); gen_cond_signal(&w->cond); gen_mutex_unlock(&w->mutex); return 0; }
static int threaded_queues_queue_add(struct PINT_manager_s *manager, PINT_worker_inst *inst, PINT_queue_id queue_id) { struct PINT_worker_threaded_queues_s *w; struct PINT_queue_s *queue; w = &inst->threaded; queue = id_gen_fast_lookup(queue_id); gen_mutex_lock(&w->mutex); assert(queue->link.next == NULL && queue->link.prev == NULL); qlist_add_tail(&queue->link, &w->queues); PINT_queue_add_producer(queue_id, w); PINT_queue_add_consumer(queue_id, w); /* send a signal to one thread waiting for a queue to be added */ gen_cond_signal(&w->cond); gen_mutex_unlock(&w->mutex); return 0; }
static int epoll_poll(qengine_t *engine, int timeout_ms) { int num; int flags; int i, fd; qepoll_t *epoll; struct epoll_event *event; qevent_t *ev; epoll = (qepoll_t*)engine->data; num = epoll_wait(epoll->fd, &(epoll->events[0]), engine->size, timeout_ms); if (num > 0) { for (i = 0; i < num; i++) { flags = 0; event = epoll->events + i; fd = event->data.fd; ev = engine->events[fd]; if (event->events & EPOLLIN) { flags |= QEVENT_READ; } if (event->events & EPOLLOUT) { flags |= QEVENT_WRITE; } if (event->events & (EPOLLERR | EPOLLHUP)) { flags = QEVENT_ERROR; ev->error = 1; } ev->flags = flags; qlist_add_tail(&ev->active_entry, &engine->active); } } return num; }
int cfio_recv_add_msg(int client_rank, int size, char *data, uint32_t *func_code, int itr) { int client_index; cfio_msg_t *msg; client_index = cfio_map_get_client_index_of_server(client_rank); if (cfio_buf_is_empty(buffer[client_index])) { cfio_buf_clear(buffer[client_index]); } if(is_free_space_enough(buffer[client_index], size) == CFIO_BUF_FREE_SPACE_NOT_ENOUGH) { #ifdef DEBUG // printf("%s server %d client %d CFIO_RECV_BUF_FULL \n", __func__, rank, client_index); printf("itr %d server %d client %d buffer_info: start, used, free: %lu, %lu, %lu .\n", itr, rank, client_rank, (uintptr_t)(buffer[client_index]->start_addr), (uintptr_t)(buffer[client_index]->used_addr), (uintptr_t)(buffer[client_index]->free_addr)); #endif return CFIO_RECV_BUF_FULL; } *func_code = *((uint32_t *)(data + sizeof(size_t))); #ifdef DEBUG printf("itr %d server %d client %d func_code %d msg_size %d \n", itr, rank, client_rank, *func_code, size); #endif if (!size) { #ifdef DEBUG printf("itr %d server %d client %d buffer_info: start, used, free: %lu, %lu, %lu .\n", itr, rank, client_rank, (uintptr_t)(buffer[client_index]->start_addr), (uintptr_t)(buffer[client_index]->used_addr), (uintptr_t)(buffer[client_index]->free_addr)); #endif return CFIO_ERROR_UNEXPECTED_MSG; } if (FUNC_FINAL == *func_code) { // printf("%s server %d client %d FUNC_FINAL %d \n", __func__, rank, client_index, FUNC_FINAL); return CFIO_ERROR_NONE; } else if (FUNC_NC_CLOSE == *func_code) { #ifdef DEBUG printf("itr %d server %d client %d buffer_info: start, end, used, free: %lu, %lu, %lu, %lu .\n", itr, rank, client_rank, (uintptr_t)(buffer[client_index]->start_addr), (uintptr_t)(buffer[client_index]->start_addr + buffer[client_index]->size), (uintptr_t)(buffer[client_index]->used_addr), (uintptr_t)(buffer[client_index]->free_addr)); #endif } memcpy(buffer[client_index]->free_addr, data, size); msg = cfio_msg_create(); msg->addr = buffer[client_index]->free_addr; msg->size = size; msg->src = client_rank; msg->dst = rank; msg->func_code = *func_code; use_buf(buffer[client_index], size); qlist_add_tail(&(msg->link), &(msg_head[client_index].link)); return CFIO_ERROR_NONE; }
int cfio_recv( int src, int rank, MPI_Comm comm, uint32_t *func_code) { MPI_Status status; int size; cfio_msg_t *msg; int client_index; client_index = cfio_map_get_client_index_of_server(src); //times_start(); debug(DEBUG_RECV, "client_index = %d", client_index); if(is_free_space_enough(buffer[client_index], max_msg_size) == CFIO_BUF_FREE_SPACE_NOT_ENOUGH) { return CFIO_RECV_BUF_FULL; } // ensure_free_space(buffer[client_index], max_msg_size, // cfio_recv_server_buf_free); MPI_Recv(buffer[client_index]->free_addr, max_msg_size, MPI_BYTE, src, MPI_ANY_TAG, comm, &status); MPI_Get_count(&status, MPI_BYTE, &size); debug(DEBUG_RECV, "recv: size = %d", size); //total_size += size; //if(min_size == 0 || min_size > size) //{ // min_size = size; //} //if(max_size == 0 || max_size < size) //{ // max_size = size; //} //printf("proc %d , recv: size = %d, from %d\n",rank, size, src); //debug(DEBUG_RECV, "code = %u", *((uint32_t *)buffer->free_addr)); if(status.MPI_SOURCE != status.MPI_TAG) { return CFIO_ERROR_MPI_RECV; } msg = cfio_msg_create(); msg->addr = buffer[client_index]->free_addr; msg->size = size; msg->src = status.MPI_SOURCE; msg->dst = rank; // get the func_code but not unpack it msg->func_code = *((uint32_t*)(msg->addr + sizeof(size_t))); *func_code = msg->func_code; debug(DEBUG_RECV, "func_code = %u", *func_code); #ifndef SVR_RECV_ONLY use_buf(buffer[client_index], size); #endif /* need lock */ if((*func_code) != FUNC_IO_END) { #ifndef SVR_RECV_ONLY qlist_add_tail(&(msg->link), &(msg_head[client_index].link)); #endif } //debug(DEBUG_RECV, "uesd_size = %lu", used_buf_size(buffer)); debug(DEBUG_RECV, "success return"); return CFIO_ERROR_NONE; }