int cfio_recv_init() { int i, error; MPI_Comm_rank(MPI_COMM_WORLD, &rank); client_num = cfio_map_get_client_num_of_server(rank); msg_head = malloc(client_num * sizeof(cfio_msg_t)); if(NULL == msg_head) { return CFIO_ERROR_MALLOC; } for(i = 0; i < client_num; i ++) { INIT_QLIST_HEAD(&(msg_head[i].link)); } buffer = malloc(client_num *sizeof(cfio_buf_t*)); if(NULL == buffer) { return CFIO_ERROR_MALLOC; } for(i = 0; i < client_num; i ++) { buffer[i] = cfio_buf_open(RECV_BUF_SIZE / client_num, &error); if(NULL == buffer[i]) { error(""); return error; } } return CFIO_ERROR_NONE; }
static int queues_init(struct PINT_manager_s *manager, PINT_worker_inst *inst, PINT_worker_attr_t *attr) { struct PINT_worker_queues_s *w; int ret = 0; w = &inst->queues; w->attr = attr->u.queues; INIT_QLIST_HEAD(&w->queues); gen_mutex_init(&w->mutex); gen_cond_init(&w->cond); w->qentries = malloc(sizeof(PINT_queue_entry_t) * w->attr.ops_per_queue); if(!w->qentries) { ret = -PVFS_ENOMEM; goto error_exit; } return 0; error_exit: gen_cond_destroy(&w->cond); return ret; }
void dbpf_open_cache_initialize(void) { int i = 0, ret = 0; gen_mutex_lock(&cache_mutex); /* run through preallocated cache elements to initialize * and put them on the free list */ if (OPEN_CACHE_SIZE == 0) { gossip_err("Warning: dbpf_open_cache disabled.\n"); } for (i = 0; i < OPEN_CACHE_SIZE; i++) { prealloc[i].fd = -1; qlist_add(&prealloc[i].queue_link, &free_list); } gen_mutex_unlock(&cache_mutex); /* Initialize and create the worker thread for threaded deletes */ INIT_QLIST_HEAD(&dbpf_unlink_context.global_list); pthread_mutex_init(&dbpf_unlink_context.mutex, NULL); pthread_cond_init(&dbpf_unlink_context.data_available, NULL); ret = pthread_create(&dbpf_unlink_context.thread_id, NULL, unlink_bstream, (void*)&dbpf_unlink_context); if(ret) { gossip_err("dbpf_open_cache_initialize: failed [%d]\n", ret); return; } }
/* * lsm_lp_init * - initialize the lsm model * - sets the disk to be idle now */ static void lsm_lp_init (lsm_state_t *ns, tw_lp *lp) { memset(ns, 0, sizeof(*ns)); ns->next_idle = tw_now(lp); // set the correct model const char *anno = codes_mapping_get_annotation_by_lpid(lp->gid); if (anno == NULL) ns->model = &model_unanno; else { int id = configuration_get_annotation_index(anno, anno_map); ns->model = &models_anno[id]; } // initialize the scheduler if need be ns->use_sched = ns->model->use_sched > 0; if (ns->use_sched) { ns->sched.num_prios = ns->model->use_sched; ns->sched.active_count = 0; rc_stack_create(&ns->sched.freelist); ns->sched.queues = malloc(ns->sched.num_prios * sizeof(*ns->sched.queues)); for (int i = 0; i < ns->sched.num_prios; i++) INIT_QLIST_HEAD(&ns->sched.queues[i]); } return; }
static int threaded_queues_init(struct PINT_manager_s *manager, PINT_worker_inst *inst, PINT_worker_attr_t *attr) { struct PINT_worker_threaded_queues_s *w; int ret = 0; int i; w = &inst->threaded; w->attr = attr->u.threaded; gen_mutex_init(&w->mutex); gen_cond_init(&w->cond); INIT_QLIST_HEAD(&w->queues); INIT_QLIST_HEAD(&w->inuse_queues); w->manager = manager; w->threads = malloc(sizeof(struct PINT_worker_thread_entry) * w->attr.thread_count); if(!w->threads) { ret = -PVFS_ENOMEM; gen_cond_destroy(&w->cond); goto exit; } for(i = 0; i < w->attr.thread_count; ++i) { w->threads[i].worker = w; ret = PINT_worker_queue_thread_start(&w->threads[i]); if(ret < 0) { /* stop the other threads */ for(; i >= 0; --i) { PINT_worker_queue_thread_stop(&w->threads[i]); } free(w->threads); gen_cond_destroy(&w->cond); } } exit: return ret; }
dbpf_op_queue_p dbpf_op_queue_new(void) { struct qlist_head *tmp_queue = NULL; tmp_queue = (struct qlist_head *)malloc(sizeof(struct qlist_head)); if (tmp_queue) { INIT_QLIST_HEAD(tmp_queue); } return tmp_queue; }
void fcfs_init( const struct model_net_method * method, const model_net_sched_cfg_params * params, int is_recv_queue, void ** sched){ *sched = malloc(sizeof(mn_sched_queue)); mn_sched_queue *ss = *sched; ss->method = method; ss->is_recv_queue = is_recv_queue; ss->queue_len = 0; INIT_QLIST_HEAD(&ss->reqs); }