NwGtpv1uRcT nwGtpv1uInitialize( NW_INOUT NwGtpv1uStackHandleT *hGtpuStackHandle, uint32_t stackType) { NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; NwGtpv1uStackT *thiz; thiz = (NwGtpv1uStackT *) malloc( sizeof(NwGtpv1uStackT)); memset(thiz, 0, sizeof(NwGtpv1uStackT)); if(thiz) { thiz->id = (NwPtrT)thiz; thiz->stackType = stackType; thiz->seq = (uint16_t) ((uint32_t)thiz) ; // FIXME interesting casts... don't know what this is good for... RB_INIT(&(thiz->outstandingTxSeqNumMap)); RB_INIT(&(thiz->outstandingRxSeqNumMap)); RB_INIT(&(thiz->sessionMap)); RB_INIT(&(thiz->teidMap)); if (0 == 1) { nwGtpv1uDisplayBanner(); } rc = NW_GTPV1U_OK; } else { rc = NW_GTPV1U_FAILURE; } *hGtpuStackHandle = (NwGtpv1uStackHandleT) thiz; return rc; }
void idnode_init(void) { RB_INIT(&idnodes); RB_INIT(&idclasses); RB_INIT(&idrootclasses); }
int uv__loop_init(uv_loop_t* loop, int default_loop) { #if HAVE_KQUEUE int flags = EVBACKEND_KQUEUE; #else int flags = EVFLAG_AUTO; #endif memset(loop, 0, sizeof(*loop)); RB_INIT(&loop->ares_handles); RB_INIT(&loop->timer_handles); ngx_queue_init(&loop->active_reqs); ngx_queue_init(&loop->idle_handles); ngx_queue_init(&loop->async_handles); ngx_queue_init(&loop->check_handles); ngx_queue_init(&loop->prepare_handles); ngx_queue_init(&loop->handle_queue); loop->closing_handles = NULL; loop->channel = NULL; loop->time = uv_hrtime() / 1000000; loop->async_pipefd[0] = -1; loop->async_pipefd[1] = -1; loop->ev = (default_loop ? ev_default_loop : ev_loop_new)(flags); ev_set_userdata(loop->ev, loop); eio_channel_init(&loop->uv_eio_channel, loop); #if __linux__ RB_INIT(&loop->inotify_watchers); loop->inotify_fd = -1; #endif #if HAVE_PORTS_FS loop->fs_fd = -1; #endif return 0; }
static void uv_loop_init(uv_loop_t* loop) { /* Create an I/O completion port */ loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); if (loop->iocp == NULL) { uv_fatal_error(GetLastError(), "CreateIoCompletionPort"); } loop->refs = 0; uv_update_time(loop); loop->pending_reqs_tail = NULL; loop->endgame_handles = NULL; RB_INIT(&loop->timers); RB_INIT(&loop->uv_ares_handles_); loop->check_handles = NULL; loop->prepare_handles = NULL; loop->idle_handles = NULL; loop->next_prepare_handle = NULL; loop->next_check_handle = NULL; loop->next_idle_handle = NULL; loop->ares_active_sockets = 0; loop->ares_chan = NULL; loop->active_tcp_streams = 0; loop->active_udp_streams = 0; loop->last_err = uv_ok_; }
void s1ap_eNB_handle_register_eNB(instance_t instance, s1ap_register_enb_req_t *s1ap_register_eNB) { s1ap_eNB_instance_t *new_instance; uint8_t index; DevAssert(s1ap_register_eNB != NULL); /* Look if the provided instance already exists */ new_instance = s1ap_eNB_get_instance(instance); if (new_instance != NULL) { /* Checks if it is a retry on the same eNB */ DevCheck(new_instance->eNB_id == s1ap_register_eNB->eNB_id, new_instance->eNB_id, s1ap_register_eNB->eNB_id, 0); DevCheck(new_instance->cell_type == s1ap_register_eNB->cell_type, new_instance->cell_type, s1ap_register_eNB->cell_type, 0); DevCheck(new_instance->tac == s1ap_register_eNB->tac, new_instance->tac, s1ap_register_eNB->tac, 0); DevCheck(new_instance->mcc == s1ap_register_eNB->mcc, new_instance->mcc, s1ap_register_eNB->mcc, 0); DevCheck(new_instance->mnc == s1ap_register_eNB->mnc, new_instance->mnc, s1ap_register_eNB->mnc, 0); DevCheck(new_instance->mnc_digit_length == s1ap_register_eNB->mnc_digit_length, new_instance->mnc_digit_length, s1ap_register_eNB->mnc_digit_length, 0); DevCheck(new_instance->default_drx == s1ap_register_eNB->default_drx, new_instance->default_drx, s1ap_register_eNB->default_drx, 0); } else { new_instance = calloc(1, sizeof(s1ap_eNB_instance_t)); DevAssert(new_instance != NULL); RB_INIT(&new_instance->s1ap_ue_head); RB_INIT(&new_instance->s1ap_mme_head); /* Copy usefull parameters */ new_instance->instance = instance; new_instance->eNB_name = s1ap_register_eNB->eNB_name; new_instance->eNB_id = s1ap_register_eNB->eNB_id; new_instance->cell_type = s1ap_register_eNB->cell_type; new_instance->tac = s1ap_register_eNB->tac; new_instance->mcc = s1ap_register_eNB->mcc; new_instance->mnc = s1ap_register_eNB->mnc; new_instance->mnc_digit_length = s1ap_register_eNB->mnc_digit_length; new_instance->default_drx = s1ap_register_eNB->default_drx; /* Add the new instance to the list of eNB (meaningfull in virtual mode) */ s1ap_eNB_insert_new_instance(new_instance); S1AP_INFO("Registered new eNB[%d] and %s eNB id %u\n", instance, s1ap_register_eNB->cell_type == CELL_MACRO_ENB ? "macro" : "home", s1ap_register_eNB->eNB_id); } DevCheck(s1ap_register_eNB->nb_mme <= S1AP_MAX_NB_MME_IP_ADDRESS, S1AP_MAX_NB_MME_IP_ADDRESS, s1ap_register_eNB->nb_mme, 0); /* Trying to connect to provided list of MME ip address */ for (index = 0; index < s1ap_register_eNB->nb_mme; index++) { s1ap_eNB_register_mme(new_instance, &s1ap_register_eNB->mme_ip_address[index], &s1ap_register_eNB->enb_ip_address, s1ap_register_eNB->sctp_in_streams, s1ap_register_eNB->sctp_out_streams); } }
/* * Obtain a new vnode. The returned vnode is VX locked & vrefd. * * All new vnodes set the VAGE flags. An open() of the vnode will * decrement the (2-bit) flags. Vnodes which are opened several times * are thus retained in the cache over vnodes which are merely stat()d. * * We always allocate the vnode. Attempting to recycle existing vnodes * here can lead to numerous deadlocks, particularly with softupdates. */ struct vnode * allocvnode(int lktimeout, int lkflags) { struct vnode *vp; /* * Do not flag for synchronous recyclement unless there are enough * freeable vnodes to recycle and the number of vnodes has * significantly exceeded our target. We want the normal vnlru * process to handle the cleaning (at 9/10's) before we are forced * to flag it here at 11/10's for userexit path processing. */ if (numvnodes >= maxvnodes * 11 / 10 && cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { struct thread *td = curthread; if (td->td_lwp) atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU); } /* * lktimeout only applies when LK_TIMELOCK is used, and only * the pageout daemon uses it. The timeout may not be zero * or the pageout daemon can deadlock in low-VM situations. */ if (lktimeout == 0) lktimeout = hz / 10; vp = kmalloc(sizeof(*vp), M_VNODE, M_ZERO | M_WAITOK); lwkt_token_init(&vp->v_token, "vnode"); lockinit(&vp->v_lock, "vnode", lktimeout, lkflags); TAILQ_INIT(&vp->v_namecache); RB_INIT(&vp->v_rbclean_tree); RB_INIT(&vp->v_rbdirty_tree); RB_INIT(&vp->v_rbhash_tree); spin_init(&vp->v_spin, "allocvnode"); lockmgr(&vp->v_lock, LK_EXCLUSIVE); atomic_add_int(&numvnodes, 1); vp->v_refcnt = 1; vp->v_flag = VAGE0 | VAGE1; vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; KKASSERT(TAILQ_EMPTY(&vp->v_namecache)); /* exclusive lock still held */ vp->v_filesize = NOOFFSET; vp->v_type = VNON; vp->v_tag = 0; vp->v_state = VS_CACHED; _vactivate(vp); return (vp); }
void idnode_init(void) { idnode_queue = NULL; RB_INIT(&idnodes); RB_INIT(&idclasses); RB_INIT(&idrootclasses); pthread_mutex_init(&idnode_mutex, NULL); pthread_cond_init(&idnode_cond, NULL); tvhthread_create(&idnode_tid, NULL, idnode_thread, NULL); }
void up_down(struct rde_peer *peer) { up_clear(&peer->updates, &peer->withdraws); up_clear(&peer->updates6, &peer->withdraws6); RB_INIT(&peer->up_prefix); RB_INIT(&peer->up_attrs); peer->up_pcnt = 0; peer->up_acnt = 0; peer->up_nlricnt = 0; peer->up_wcnt = 0; }
void up_init(struct rde_peer *peer) { TAILQ_INIT(&peer->updates); TAILQ_INIT(&peer->withdraws); TAILQ_INIT(&peer->updates6); TAILQ_INIT(&peer->withdraws6); RB_INIT(&peer->up_prefix); RB_INIT(&peer->up_attrs); peer->up_pcnt = 0; peer->up_acnt = 0; peer->up_nlricnt = 0; peer->up_wcnt = 0; }
INT32 stp_btm_reset_btm_wq(MTKSTP_BTM_T *stp_btm) { UINT32 i = 0; osal_lock_unsleepable_lock(&(stp_btm->wq_spinlock)); RB_INIT(&stp_btm->rFreeOpQ, STP_BTM_OP_BUF_SIZE); RB_INIT(&stp_btm->rActiveOpQ, STP_BTM_OP_BUF_SIZE); osal_unlock_unsleepable_lock(&(stp_btm->wq_spinlock)); /* Put all to free Q */ for (i = 0; i < STP_BTM_OP_BUF_SIZE; i++) { osal_signal_init(&(stp_btm->arQue[i].signal)); _stp_btm_put_op(stp_btm, &stp_btm->rFreeOpQ, &(stp_btm->arQue[i])); } return 0; }
/* * This function initializes a wf2q structure */ void wf2q_init(struct wf2q_t *pwf2q) { RB_INIT(&pwf2q->wf2q_augtree); pwf2q->wf2q_virtual_time = 0; pwf2q->wf2q_tdio_count = 0; }
/* * return boolean whether or not the last ctfile_list contained * filename. */ int ct_file_on_server(struct ct_global_state *state, char *filename) { struct ctfile_list_tree results; struct ctfile_list_file *file = NULL; char *filelist[2]; int exists = 0; RB_INIT(&results); filelist[0] = filename; filelist[1] = NULL; ctfile_list_complete(&state->ctfile_list_files, CT_MATCH_GLOB, filelist, NULL, &results); /* Check to see if we already have a secrets file on the server */ if (RB_MIN(ctfile_list_tree, &results) != NULL) { exists = 1; } while ((file = RB_ROOT(&results)) != NULL) { RB_REMOVE(ctfile_list_tree, &results, file); e_free(&file); } return (exists); }
/** * Initialize the watch set. * * @param[in] ws A pointer to the watch set. **/ void watch_set_init (watch_set *ws) { assert (ws != NULL); RB_INIT (ws); }
int vardb_init(struct vardb *v) { RB_INIT(&v->root); v->count = 0; v->external_vardb = NULL; return 0; }
void hammer_clear_undo_history(hammer_mount_t hmp) { RB_INIT(&hmp->rb_undo_root); TAILQ_INIT(&hmp->undo_lru_list); hmp->undo_alloc = 0; }
static void uv_loop_init() { /* Create an I/O completion port */ LOOP->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); if (LOOP->iocp == NULL) { uv_fatal_error(GetLastError(), "CreateIoCompletionPort"); } LOOP->refs = 0; uv_update_time(); LOOP->pending_reqs_tail = NULL; LOOP->endgame_handles = NULL; RB_INIT(&LOOP->timers); LOOP->check_handles = NULL; LOOP->prepare_handles = NULL; LOOP->idle_handles = NULL; LOOP->next_prepare_handle = NULL; LOOP->next_check_handle = NULL; LOOP->next_idle_handle = NULL; LOOP->last_error = uv_ok_; LOOP->err_str = NULL; }
void ieee80211_node_attach(struct ifnet *ifp) { struct ieee80211com *ic = (void *)ifp; int size; IEEE80211_NODE_LOCK_INIT(ic, ifp->if_xname); RB_INIT(&ic->ic_tree); ic->ic_node_alloc = ieee80211_node_alloc; ic->ic_node_free = ieee80211_node_free; ic->ic_node_copy = ieee80211_node_copy; ic->ic_node_getrssi = ieee80211_node_getrssi; ic->ic_scangen = 1; ic->ic_max_nnodes = ieee80211_cache_size; if (ic->ic_max_aid == 0) ic->ic_max_aid = IEEE80211_AID_DEF; else if (ic->ic_max_aid > IEEE80211_AID_MAX) ic->ic_max_aid = IEEE80211_AID_MAX; size = howmany(ic->ic_max_aid, 32) * sizeof(u_int32_t); MALLOC(ic->ic_aid_bitmap, u_int32_t *, size, M_DEVBUF, M_NOWAIT); if (ic->ic_aid_bitmap == NULL) { /* XXX no way to recover */ printf("%s: no memory for AID bitmap!\n", __func__); ic->ic_max_aid = 0; } else memset(ic->ic_aid_bitmap, 0, size); }
static void set_defaults(void) { u_int8_t *buf; u_int32_t t = htonl(DEFAULT_LEASE_TIME); /* Initialize some default timers essential for many clients. */ if ((buf = malloc(1 + sizeof t)) == NULL) goto nomem; buf[0] = sizeof t; memcpy(buf + 1, &t, sizeof t); default_group.options[DHCP_OPT_ADDR_LEASETIME] = buf; /* Initialize the default group and shared_network. */ default_group.refcnt = 1; strlcpy(default_group.name, "default", sizeof "default"); RB_INSERT(group_tree, &groups, &default_group); default_shared_network.name = "default"; default_shared_network.group = group_use(&default_group); RB_INIT(&default_shared_network.hosts); RB_INSERT(shared_network_tree, &shared_networks, &default_shared_network); return; nomem: fatalx("out of memory really quite soon"); }
void lka_session(uint64_t id, struct envelope *envelope) { struct lka_session *lks; struct expandnode xn; if (init == 0) { init = 1; tree_init(&sessions); } lks = xcalloc(1, sizeof(*lks), "lka_session"); lks->id = id; RB_INIT(&lks->expand.tree); TAILQ_INIT(&lks->deliverylist); tree_xset(&sessions, lks->id, lks); lks->envelope = *envelope; TAILQ_INIT(&lks->nodes); bzero(&xn, sizeof xn); xn.type = EXPAND_ADDRESS; xn.u.mailaddr = lks->envelope.rcpt; lks->expand.rule = NULL; lks->expand.queue = &lks->nodes; expand_insert(&lks->expand, &xn); lka_resume(lks); }
void up_init(struct rde_peer *peer) { u_int8_t i; for (i = 0; i < AID_MAX; i++) { TAILQ_INIT(&peer->updates[i]); TAILQ_INIT(&peer->withdraws[i]); } RB_INIT(&peer->up_prefix); RB_INIT(&peer->up_attrs); peer->up_pcnt = 0; peer->up_acnt = 0; peer->up_nlricnt = 0; peer->up_wcnt = 0; }
void up_down(struct rde_peer *peer) { u_int8_t i; for (i = 0; i < AID_MAX; i++) up_clear(&peer->updates[i], &peer->withdraws[i]); RB_INIT(&peer->up_prefix); RB_INIT(&peer->up_attrs); peer->up_pcnt = 0; peer->up_acnt = 0; peer->up_nlricnt = 0; peer->up_wcnt = 0; }
/* * uvm_objinit: initialise a uvm object. */ void uvm_objinit(struct uvm_object *uobj, struct uvm_pagerops *pgops, int refs) { uobj->pgops = pgops; RB_INIT(&uobj->memt); uobj->uo_npages = 0; uobj->uo_refs = refs; }
MTKSTP_BTM_T *stp_btm_init(void) { INT32 i = 0x0; INT32 ret =-1; osal_unsleepable_lock_init(&stp_btm->wq_spinlock); osal_event_init(&stp_btm->STPd_event); stp_btm->wmt_notify = wmt_lib_btm_cb; RB_INIT(&stp_btm->rFreeOpQ, STP_BTM_OP_BUF_SIZE); RB_INIT(&stp_btm->rActiveOpQ, STP_BTM_OP_BUF_SIZE); /* Put all to free Q */ for (i = 0; i < STP_BTM_OP_BUF_SIZE; i++) { osal_signal_init(&(stp_btm->arQue[i].signal)); _stp_btm_put_op(stp_btm, &stp_btm->rFreeOpQ, &(stp_btm->arQue[i])); } /*Generate PSM thread, to servie STP-CORE for packet retrying and core dump receiving*/ stp_btm->BTMd.pThreadData = (VOID *)stp_btm; stp_btm->BTMd.pThreadFunc = (VOID *)_stp_btm_proc; osal_memcpy(stp_btm->BTMd.threadName, BTM_THREAD_NAME , osal_strlen(BTM_THREAD_NAME)); ret = osal_thread_create(&stp_btm->BTMd); if (ret < 0) { STP_BTM_ERR_FUNC("osal_thread_create fail...\n"); goto ERR_EXIT1; } /* Start STPd thread*/ ret = osal_thread_run(&stp_btm->BTMd); if(ret < 0) { STP_BTM_ERR_FUNC("osal_thread_run FAILS\n"); goto ERR_EXIT1; } return stp_btm; ERR_EXIT1: return NULL; }
/* * uvm_objinit: initialise a uvm object. */ void uvm_objinit(struct uvm_object *uobj, struct uvm_pagerops *pgops, int refs) { uobj->pgops = pgops; RB_INIT(&uobj->memt); mtx_init(&uobj->vmobjlock, IPL_NONE); uobj->uo_npages = 0; uobj->uo_refs = refs; }
mpls_tree_handle mpls_tree_create(int depth) { struct mpls_tree *tree; tree = mpls_malloc(sizeof(struct mpls_tree)); if(tree) RB_INIT(tree); return tree; }
int kif_init(void) { RB_INIT(&kit); if (fetchifs(0) == -1) return (-1); return (0); }
/* Initialise the environment. */ struct environ * environ_create(void) { struct environ *env; env = xcalloc(1, sizeof *env); RB_INIT(env); return (env); }
struct options * options_create(struct options *parent) { struct options *oo; oo = xcalloc(1, sizeof *oo); RB_INIT(&oo->tree); oo->parent = parent; return (oo); }
static void uv_loop_init(uv_loop_t* loop) { /* Create an I/O completion port */ loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); if (loop->iocp == NULL) { uv_fatal_error(GetLastError(), "CreateIoCompletionPort"); } uv_update_time(loop); #ifndef UV_LEAN_AND_MEAN ngx_queue_init(&loop->active_handles); ngx_queue_init(&loop->active_reqs); #else loop->active_handles = 0; loop->active_reqs = 0; #endif loop->pending_reqs_tail = NULL; loop->endgame_handles = NULL; RB_INIT(&loop->timers); loop->check_handles = NULL; loop->prepare_handles = NULL; loop->idle_handles = NULL; loop->next_prepare_handle = NULL; loop->next_check_handle = NULL; loop->next_idle_handle = NULL; memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets); loop->channel = NULL; RB_INIT(&loop->ares_handles); loop->active_tcp_streams = 0; loop->active_udp_streams = 0; loop->last_err = uv_ok_; memset(&loop->counters, 0, sizeof loop->counters); }
static int uv__loop_init(uv_loop_t* loop, int default_loop) { unsigned int i; int err; uv__signal_global_once_init(); memset(loop, 0, sizeof(*loop)); RB_INIT(&loop->timer_handles); QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->active_reqs); QUEUE_INIT(&loop->idle_handles); QUEUE_INIT(&loop->async_handles); QUEUE_INIT(&loop->check_handles); QUEUE_INIT(&loop->prepare_handles); QUEUE_INIT(&loop->handle_queue); loop->nfds = 0; loop->watchers = NULL; loop->nwatchers = 0; QUEUE_INIT(&loop->pending_queue); QUEUE_INIT(&loop->watcher_queue); loop->closing_handles = NULL; loop->time = uv__hrtime() / 1000000; uv__async_init(&loop->async_watcher); loop->signal_pipefd[0] = -1; loop->signal_pipefd[1] = -1; loop->backend_fd = -1; loop->emfile_fd = -1; loop->timer_counter = 0; loop->stop_flag = 0; err = uv__platform_loop_init(loop, default_loop); if (err) return err; uv_signal_init(loop, &loop->child_watcher); uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) QUEUE_INIT(loop->process_handles + i); if (uv_mutex_init(&loop->wq_mutex)) abort(); if (uv_async_init(loop, &loop->wq_async, uv__work_done)) abort(); uv__handle_unref(&loop->wq_async); loop->wq_async.flags |= UV__HANDLE_INTERNAL; return 0; }