/*============================================================================== * - msgQ_init() * * - init MSG_QUE struct, and add it to msgQ_list */ MSG_QUE *msgQ_init (MSG_QUE *pNewMsgQ, uint32 max_num, uint32 max_len) { int cpsr_c; if (max_num == 0 || max_len == 0) { return NULL; } if (pNewMsgQ == NULL) { pNewMsgQ = malloc(sizeof(MSG_QUE)); } if (pNewMsgQ != NULL) { pNewMsgQ->max_num = max_num; pNewMsgQ->max_len = max_len; pNewMsgQ->cur_num = 0; dlist_init(&pNewMsgQ->msg_list); dlist_init(&pNewMsgQ->wait_send_list); dlist_init(&pNewMsgQ->wait_recv_list); cpsr_c = CPU_LOCK(); dlist_add(&_G_msgQ_list, (DL_NODE *)pNewMsgQ); CPU_UNLOCK(cpsr_c); } return pNewMsgQ; }
struct os * os_create(void) { struct os *newOS; newOS = os_alloc(MAX_CPU); if (newOS == NULL) return NULL; rw_lock_init(&newOS->po_mutex); logical_mmap_init(newOS); newOS->po_state = PO_STATE_CREATED; newOS->po_lpid = atomic_add(&cur_lpid, 1); /* add to global OS hash */ lock_acquire(&hype_mutex); newOS->os_hashentry.he_key = newOS->po_lpid; ht_insert(&os_hash, &newOS->os_hashentry); lock_release(&hype_mutex); lock_init(&newOS->po_events.oe_lock); dlist_init(&newOS->po_events.oe_list); dlist_init(&newOS->po_resources); return newOS; }
DList* postorder(QuadTree* tree){ Stack *s = malloc(sizeof(Stack)); dlist_init(s, &free); Queue *q = malloc(sizeof(Queue)); dlist_init(q, &free); push(s, tree->root); QuadTreeNode** current_data = malloc(sizeof(QuadTreeNode*)); while(pop(s, (void**)current_data) != -1){ if(quadtree_is_visited(*current_data)){ enqueue(q, *current_data); }else{ quadtree_mark(*current_data); push(s, *current_data); if(!quadtree_is_leaf(*current_data)){ DList* children = (*current_data)->children_list; DListElmt* current_elem = dlist_head(children); while(current_elem != NULL){ push(s, current_elem->data); current_elem = dlist_next(current_elem); } } } } return q; }
struct map *create_map(struct point2D *top_left,struct point2D *bottom_right,callback_ enter_callback,callback_ leave_callback) { //����block����� uint32_t length = abs(top_left->x - bottom_right->x); uint32_t width = abs(top_left->y - bottom_right->y); uint32_t x_count = length % BLOCK_LENGTH == 0 ? length/BLOCK_LENGTH : length/BLOCK_LENGTH + 1; uint32_t y_count = width % BLOCK_LENGTH == 0 ? width/BLOCK_LENGTH : width/BLOCK_LENGTH + 1; struct map *m = calloc(1,x_count*y_count*sizeof(struct map_block)+sizeof(struct map)); m->top_left = *top_left; m->bottom_right = *bottom_right; m->x_count = x_count; m->y_count = y_count; uint32_t x,y; for(y = 0; y < y_count; ++y) for(x = 0; x < x_count; ++x) { struct map_block * b = get_block(m,y,x); dlist_init(&b->aoi_objs); b->x = x; b->y = y; } dlist_init(&m->super_aoi_objs); m->enter_callback = enter_callback; m->leave_callback = leave_callback; return m; }
static int rxm_recv_queue_init(struct rxm_recv_queue *recv_queue, size_t size) { recv_queue->recv_fs = rxm_recv_fs_create(size); if (!recv_queue->recv_fs) return -FI_ENOMEM; dlist_init(&recv_queue->recv_list); dlist_init(&recv_queue->unexp_msg_list); return 0; }
/* Init tty related queues and pcbs */ void init_tty() { int i; for(i = 0; i < NUM_TERMINALS; i++) { tty_write_queues[i] = dlist_init(); tty_read_queues[i] = dlist_init(); tty_writing_procs[i] = NULL; tty_reading_procs[i] = NULL; } }
static void mrail_recv_queue_init(struct fi_provider *prov, struct mrail_recv_queue *recv_queue, dlist_func_t match_recv, dlist_func_t match_unexp, mrail_get_unexp_msg_entry_func get_unexp_msg_entry) { recv_queue->prov = prov; dlist_init(&recv_queue->recv_list); dlist_init(&recv_queue->unexp_msg_list); recv_queue->match_recv = match_recv; recv_queue->match_unexp = match_unexp; recv_queue->get_unexp_msg_entry = get_unexp_msg_entry; }
struct rxd_rx_entry *rxd_get_rx_entry(struct rxd_ep *ep) { struct rxd_rx_entry *rx_entry; if (freestack_isempty(ep->rx_entry_fs)) return NULL; rx_entry = freestack_pop(ep->rx_entry_fs); rx_entry->key = rx_entry - &ep->rx_entry_fs->buf[0]; dlist_init(&rx_entry->entry); dlist_init(&rx_entry->wait_entry); dlist_insert_tail(&rx_entry->entry, &ep->rx_entry_list); return rx_entry; }
/* Init basic process management, timer and a dummy kernel proc */ void init_processes() { next_pid = 0; log_info("Inside %s", __func__); timer_init(); log_info("Init timer done"); ready_queue = dlist_init(); wait_queue = dlist_init(); if(!ready_queue || !ready_queue) { log_err("Cannot init ready queue!"); } log_info("Init queue done"); init_idle_proc(); log_info("Init idle done"); }
struct msg_que* new_msgque(uint32_t syn_size,item_destroyer destroyer) { pthread_once(&g_msg_que_key_once,msg_que_once_routine); struct msg_que *que = calloc(1,sizeof(*que)); pthread_key_create(&que->t_key,delete_per_thread_que); dlist_init(&que->blocks); que->mtx = mutex_create(); que->refbase.destroyer = delete_msgque; llist_init(&que->share_que); dlist_init(&que->can_interrupt); que->syn_size = syn_size; que->destroy_function = destroyer; get_per_thread_que(que,MSGQ_NONE); return que; }
/* * define methods needed for the GNI fabric provider */ static int gnix_fabric_open(struct fi_fabric_attr *attr, struct fid_fabric **fabric, void *context) { struct gnix_fid_fabric *fab; if (strcmp(attr->name, gnix_fab_name)) { return -FI_ENODATA; } fab = calloc(1, sizeof(*fab)); if (!fab) { return -FI_ENOMEM; } /* * set defaults related to use of GNI datagrams */ fab->n_bnd_dgrams = gnix_def_gni_n_dgrams; fab->n_wc_dgrams = gnix_def_gni_n_wc_dgrams; fab->datagram_timeout = gnix_def_gni_datagram_timeouts; fab->fab_fid.fid.fclass = FI_CLASS_FABRIC; fab->fab_fid.fid.context = context; fab->fab_fid.fid.ops = &gnix_fab_fi_ops; fab->fab_fid.ops = &gnix_fab_ops; _gnix_ref_init(&fab->ref_cnt, 1, __fabric_destruct); dlist_init(&fab->domain_list); *fabric = &fab->fab_fid; return FI_SUCCESS; }
/* * Initialize all available memory monitors */ void ofi_monitor_init(void) { fastlock_init(&uffd_monitor->lock); dlist_init(&uffd_monitor->list); fi_param_define(NULL, "mr_cache_max_size", FI_PARAM_SIZE_T, "Defines the total number of bytes for all memory" " regions that may be tracked by the MR cache." " Setting this will reduce the amount of memory" " not actively in use that may be registered." " (default: 0 no limit is enforced)"); fi_param_define(NULL, "mr_cache_max_count", FI_PARAM_SIZE_T, "Defines the total number of memory regions that" " may be store in the cache. Setting this will" " reduce the number of registered regions, regardless" " of their size, stored in the cache. Setting this" " to zero will disable MR caching. (default: 1024)"); fi_param_define(NULL, "mr_cache_merge_regions", FI_PARAM_BOOL, "If set to true, overlapping or adjacent memory" " regions will be combined into a single, larger" " region. Merging regions can reduce the cache" " memory footprint, but can negatively impact" " performance in some situations. (default: false)"); fi_param_get_size_t(NULL, "mr_cache_max_size", &cache_params.max_size); fi_param_get_size_t(NULL, "mr_cache_max_count", &cache_params.max_cnt); fi_param_get_bool(NULL, "mr_cache_merge_regions", &cache_params.merge_regions); if (!cache_params.max_size) cache_params.max_size = SIZE_MAX; }
void _hashtable_rehash(HASHTABLE *t, size_t newsize) { ARRAY table; DLIST *bucket; size_t i, j, hash; DLIST_ITER it, end; array_init(&table, sizeof(DLIST)); array_resize(&table, newsize); for (i = 0; i != newsize; ++i) { dlist_init((DLIST*)array_at(&table, i), t->element_size); } j = array_size(&t->table); for (i = 0; i != j; ++i) { bucket = (DLIST*)array_at(&t->table, i); if (dlist_size(bucket)) { end = dlist_end(bucket); for (it = dlist_begin(bucket); it != end; it = dlist_next(it)) { hash = _hashtable_hash(t, dlist_at(it)) % newsize; dlist_push((DLIST*)array_at(&table, hash), dlist_at(it)); } } } array_destroy(&t->table); memcpy(&t->table, &table, sizeof(ARRAY)); }
/* init timer */ void timer_init(void) { timer.round_robin_quantumn = DEFAULT_QUANTUMN; timer.tick = timer.round_robin_quantumn; log_info("Init delay queue"); delay_queue = dlist_init(); log_info("Done init delay queue"); }
static void util_fabric_init(struct util_fabric *fabric, const char *name) { atomic_initialize(&fabric->ref, 0); dlist_init(&fabric->domain_list); fastlock_init(&fabric->lock); fabric->name = name; }
int util_buf_pool_create_attr(struct util_buf_attr *attr, struct util_buf_pool **buf_pool) { size_t entry_sz; ssize_t hp_size; (*buf_pool) = calloc(1, sizeof(**buf_pool)); if (!*buf_pool) return -FI_ENOMEM; (*buf_pool)->attr = *attr; entry_sz = (attr->size + sizeof(struct util_buf_footer)); (*buf_pool)->entry_sz = fi_get_aligned_sz(entry_sz, attr->alignment); hp_size = ofi_get_hugepage_size(); if ((*buf_pool)->attr.chunk_cnt * (*buf_pool)->entry_sz < hp_size) (*buf_pool)->attr.is_mmap_region = 0; else (*buf_pool)->attr.is_mmap_region = 1; if (!(*buf_pool)->attr.indexing.ordered) slist_init(&(*buf_pool)->list.buffers); else dlist_init(&(*buf_pool)->list.regions); return FI_SUCCESS; }
struct rxd_x_entry *rxd_rx_entry_init(struct rxd_ep *ep, const struct iovec *iov, size_t iov_count, uint64_t tag, uint64_t ignore, void *context, fi_addr_t addr, uint32_t op, uint32_t flags) { struct rxd_x_entry *rx_entry; rx_entry = rxd_get_rx_entry(ep, op); if (!rx_entry) { FI_WARN(&rxd_prov, FI_LOG_EP_CTRL, "could not get rx entry\n"); return NULL; } rx_entry->peer = addr; rx_entry->flags = flags; rx_entry->bytes_done = 0; rx_entry->offset = 0; rx_entry->next_seg_no = 0; rx_entry->iov_count = iov_count; rx_entry->op = op; rx_entry->ignore = ignore; memcpy(rx_entry->iov, iov, sizeof(*rx_entry->iov) * iov_count); rx_entry->cq_entry.op_context = context; rx_entry->cq_entry.len = ofi_total_iov_len(iov, iov_count); rx_entry->cq_entry.buf = iov[0].iov_base; rx_entry->cq_entry.tag = tag; rx_entry->cq_entry.flags = ofi_rx_cq_flags(op); dlist_init(&rx_entry->entry); return rx_entry; }
int sock_poll_open(struct fid_domain *domain, struct fi_poll_attr *attr, struct fid_poll **pollset) { struct sock_domain *dom; struct sock_poll *poll; if (attr && sock_poll_verify_attr(attr)) return -FI_EINVAL; dom = container_of(domain, struct sock_domain, dom_fid); poll = calloc(1, sizeof(*poll)); if (!poll) return -FI_ENOMEM; dlist_init(&poll->fid_list); poll->poll_fid.fid.fclass = FI_CLASS_POLL; poll->poll_fid.fid.context = 0; poll->poll_fid.fid.ops = &sock_poll_fi_ops; poll->poll_fid.ops = &sock_poll_ops; poll->domain = dom; atomic_inc(&dom->ref); *pollset = &poll->poll_fid; return 0; }
static int fi_ibv_add_rai(struct dlist_entry *verbs_devs, struct rdma_cm_id *id, struct rdma_addrinfo *rai) { struct verbs_dev_info *dev; struct verbs_addr *addr; const char *dev_name; if (!(addr = malloc(sizeof(*addr)))) return -FI_ENOMEM; addr->rai = rai; dev_name = ibv_get_device_name(id->verbs->device); dlist_foreach_container(verbs_devs, struct verbs_dev_info, dev, entry) if (!strcmp(dev_name, dev->name)) goto add_rai; if (!(dev = malloc(sizeof(*dev)))) goto err1; if (!(dev->name = strdup(dev_name))) goto err2; dlist_init(&dev->addrs); dlist_insert_tail(&dev->entry, verbs_devs); add_rai: dlist_insert_tail(&addr->entry, &dev->addrs); return 0; err2: free(dev); err1: free(addr); return -FI_ENOMEM; }
int tcpx_conn_mgr_init(struct tcpx_fabric *tcpx_fabric) { int ret; dlist_init(&tcpx_fabric->poll_mgr.list); fastlock_init(&tcpx_fabric->poll_mgr.lock); ret = fd_signal_init(&tcpx_fabric->poll_mgr.signal); if (ret) { FI_WARN(&tcpx_prov, FI_LOG_FABRIC,"signal init failed\n"); goto err; } tcpx_fabric->poll_mgr.run = 1; ret = pthread_create(&tcpx_fabric->conn_mgr_thread, 0, tcpx_conn_mgr_thread, (void *) tcpx_fabric); if (ret) { FI_WARN(&tcpx_prov, FI_LOG_FABRIC, "Failed creating tcpx connection manager thread"); goto err1; } return 0; err1: fd_signal_free(&tcpx_fabric->poll_mgr.signal); err: fastlock_destroy(&tcpx_fabric->poll_mgr.lock); return ret; }
int ofi_monitor_subscribe(struct ofi_notification_queue *nq, void *addr, size_t len, struct ofi_subscription *subscription) { int ret; FI_DBG(&core_prov, FI_LOG_MR, "subscribing addr=%p len=%zu subscription=%p nq=%p\n", addr, len, subscription, nq); /* Ensure the subscription is initialized before we can get events */ dlist_init(&subscription->entry); subscription->nq = nq; subscription->addr = addr; subscription->len = len; fastlock_acquire(&nq->lock); nq->refcnt++; fastlock_release(&nq->lock); ret = nq->monitor->subscribe(nq->monitor, addr, len, subscription); if (OFI_UNLIKELY(ret)) { FI_WARN(&core_prov, FI_LOG_MR, "Failed (ret = %d) to monitor addr=%p len=%zu", ret, addr, len); fastlock_acquire(&nq->lock); nq->refcnt--; fastlock_release(&nq->lock); } return ret; }
status_t task_daemon_init(osa_console_object_t *pobj) { int i; status_t status = OSA_SOK; status |= dlist_init(&pobj->m_cmds_list); //for (i = 0; i < OSA_ARRAYSIZE(glb_module_cmds); i++) { for (i = 0; i < OSA_ARRAYSIZE(glb_vcs_cmds); i++) { status |= dlist_initialize_element((dlist_element_t *)&glb_vcs_cmds[i]); status |= dlist_put_tail(&pobj->m_cmds_list, (dlist_element_t *)&glb_vcs_cmds[i]); } snprintf((char *)pobj->m_name, sizeof(pobj->m_name), "%s", "MAIN_TSK"); pobj->m_task_obj.m_name = (unsigned char *)pobj->m_name; pobj->m_task_obj.m_main = NULL; pobj->m_task_obj.m_find = __osa_console_cmd_find; pobj->m_task_obj.m_pri = 0; pobj->m_task_obj.m_stack_size = 0; pobj->m_task_obj.m_init_state = 0; pobj->m_task_obj.m_userdata = (void *)pobj; pobj->m_task_obj.m_task = TASK_INVALID_TSK; status = task_mgr_register(&pobj->m_task_obj); status = osa_console_init(NULL); return status; }
static inline int __gnix_buddy_create_lists(gnix_buddy_alloc_handle_t *alloc_handle) { uint32_t i, offset = 0; alloc_handle->nlists = (uint32_t) __gnix_buddy_log2(alloc_handle->max / MIN_BLOCK_SIZE) + 1; alloc_handle->lists = calloc(1, sizeof(struct dlist_entry) * alloc_handle->nlists); if (unlikely(!alloc_handle->lists)) { GNIX_WARN(FI_LOG_EP_CTRL, "Could not create buddy allocator lists.\n"); return -FI_ENOMEM; } for (i = 0; i < alloc_handle->nlists; i++) { dlist_init(alloc_handle->lists + i); } /* Insert free blocks of size max in sorted order into last list */ for (i = 0; i < alloc_handle->len / alloc_handle->max; i++) { dlist_insert_tail((void *) ((uint8_t *) alloc_handle->base + offset), alloc_handle->lists + alloc_handle->nlists - 1); offset += alloc_handle->max; } return FI_SUCCESS; }
static inline void dlist_remove( struct dlist_entry_t *entry) { entry->prev->next = entry->next; entry->next->prev = entry->prev; dlist_init(entry); }
static int sock_poll_add(struct fid_poll *pollset, struct fid *event_fid, uint64_t flags) { struct sock_poll *poll; struct sock_fid_list *list_item; struct sock_cq *cq; struct sock_cntr *cntr; poll = container_of(pollset, struct sock_poll, poll_fid.fid); list_item = calloc(1, sizeof(*list_item)); if (!list_item) return -FI_ENOMEM; list_item->fid = event_fid; dlist_init(&list_item->entry); dlist_insert_after(&list_item->entry, &poll->fid_list); switch (list_item->fid->fclass) { case FI_CLASS_CQ: cq = container_of(list_item->fid, struct sock_cq, cq_fid); ofi_atomic_inc32(&cq->ref); break; case FI_CLASS_CNTR: cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid); ofi_atomic_inc32(&cntr->ref); break; default: SOCK_LOG_ERROR("Invalid fid class\n"); return -FI_EINVAL; } return 0; }
static int dlist_int_test() { printf("double list test ... MAX=%d\n", MAX); int arr[MAX]; int i; int sum = 0; int max = INT_MIN; printf("MAX = %d\n", MAX); struct dlist *list = dlist_init(); if (list) { /* * init arr */ for (i = 0; i < MAX; i++) { arr[i] = i; } /* * dlist_add/dlist_length test */ for (i = 0; i < MAX; i++) { dlist_add(list, arr + i); assert(dlist_length(list) == i + 1); } /* * dlist_serch test */ for (i = 0; i < MAX; i++) { assert(dlist_search(list, arr + i) == DLIST_RET_OK); } /* * dlist_printf test */ assert(dlist_printf(list, user_printf) == DLIST_RET_OK); assert(dlist_foreach(list, sum_cb, &sum) == DLIST_RET_OK); assert(dlist_foreach(list, max_cb, &max) == DLIST_RET_OK); /* * dlist_delete test */ for (i = MAX - 1; i >= 0; i--) { assert(dlist_length(list) == i + 1); assert(dlist_delete(list, arr + i) == DLIST_RET_OK); assert(dlist_length(list) == i); } /* * dlist_destroy test */ assert(dlist_destroy(list) == DLIST_RET_OK); } printf("sum = %d\n", sum); printf("max = %d\n", max); return 0; }
sval register_event(struct os *os, struct lpar_event *le) { dlist_init(&le->le_list); lock_acquire(&os->po_events.oe_lock); dlist_insert(&os->po_events.oe_list, &le->le_list); lock_release(&os->po_events.oe_lock); return 0; }
static int map_range_append_( struct dlist_entry_t *ranges, struct map_t_ *maps, const size_t map_count, size_t *i) { size_t idx = *i; if (idx >= map_count) { return 0; } uint32_t from_code_max = 0; uint32_t to_code_max = 0; struct map_range_t_ range = { .start_map_idx = idx, .end_map_idx = idx, .type = map_type_(&maps[idx]) }; dlist_init(&range.list); do { if (from_code_max < maps[idx].from) { from_code_max = maps[idx].from; } if (to_code_max < maps[idx].to) { to_code_max = maps[idx].to; } range.end_map_idx = idx++; } while ( idx < map_count && maps[range.end_map_idx].from + 1 == maps[idx].from && map_type_(&maps[idx]) == range.type); range.end_map_idx = idx; struct map_range_t_ *r = malloc(sizeof(*r)); if (r == NULL) { return -1; } *r = range; r->from_code_width = map_code_width_(from_code_max); r->to_code_width = map_code_width_(to_code_max); dlist_insert_before(ranges, &r->list); *i = idx; return 1; }
dlist *dlist_create() { dlist *tmp = (dlist *)malloc(sizeof(dlist)); if (!tmp) return NULL; dlist_init(tmp); return tmp; }
poller_t poller_new() { poller_t e = malloc(sizeof(*e)); if(e) { e->Init = epoll_init; e->Loop = epoll_loop; e->Register = epoll_register; e->UnRegister = epoll_unregister; e->UnRegisterRecv = epoll_unregister_recv; e->UnRegisterSend = epoll_unregister_send; e->WakeUp = epoll_wakeup; e->actived_index = 0; dlist_init(&e->actived[0]); dlist_init(&e->actived[1]); dlist_init(&e->connecting); } return e; }