/** * Create a new leg object for incoming request message. * * @param agent agent object * @param callback function which is called for each * incoming request belonging to this leg * @param magic call leg context * @param msg a request message * * @note The ownership of @a msg will pass back to NTA upon successful call * to the function nta_msg_leg(). In other words, if the call to @a * nta_msg_leg() is successful, the application may not do anything with @a * msg anymore. Instead of that, NTA will create of a new incoming request * object for the @a msg and eventually return the request to application by * calling the @a callback function. * * @deprecated Use nta_leg_stateful() instead. */ nta_leg_t *nta_msg_leg(nta_agent_t *agent, msg_t *msg, nta_request_f *callback, nta_leg_magic_t *magic, ...) { nta_leg_t *leg; sip_t *sip = sip_object(msg); SU_DEBUG_9(("\tnta_msg_leg(): called\n")); assert(msg && sip && sip->sip_request); if (!msg || !sip || !sip->sip_request || !callback) return NULL; leg = nta_leg_tcreate(agent, callback, magic, SIPTAG_CALL_ID(sip->sip_call_id), SIPTAG_FROM(sip->sip_to), /* local address */ SIPTAG_TO(sip->sip_from), /* remote address */ TAG_END()); if (!leg) /* xyzzy */; else if (nta_leg_server_route(leg, sip->sip_record_route, sip->sip_contact) < 0) nta_leg_destroy(leg), leg = NULL; else if (nta_leg_stateful(leg, msg) < 0) nta_leg_destroy(leg), leg = NULL; SU_DEBUG_9(("\tnta_msg_leg(): returns %p\n", leg)); return leg; }
/**Free a memory block. * * Frees a single memory block. The @a home must be the owner of the memory * block (usually the memory home used to allocate the memory block, or NULL * if no home was used). * * @param home pointer to home object * @param data pointer to the memory block to be freed */ void su_free(su_home_t *home, void *data) { if (!data) return; if (home) { #ifdef DEBUG SU_DEBUG_9(("su_free, home: %p, data: %p\n", (void*)home, (void*)data)) ; #endif su_alloc_t *allocation; su_block_t *sub = MEMLOCK(home); assert(sub); allocation = su_block_find(sub, data); assert(allocation); if (su_alloc_check(sub, allocation)) { void *preloaded = NULL; /* Is this preloaded data? */ if (su_is_preloaded(sub, data)) preloaded = data; if (sub->sub_stats) su_home_stats_free(sub, data, preloaded, allocation->sua_size); if (allocation->sua_home) { su_home_t *subhome = data; su_block_t *sub = MEMLOCK(subhome); assert(sub->sub_ref != REF_MAX); /* assert(sub->sub_ref > 0); */ sub->sub_ref = 0; /* Zap all references */ _su_home_deinit(subhome); } #if MEMCHECK != 0 memset(data, 0xaa, (size_t)allocation->sua_size); #endif memset(allocation, 0, sizeof (*allocation)); sub->sub_used--; #ifdef DEBUG SU_DEBUG_9(("%s: block %p sub_used decremented, now %ld used %d\n", __func__, sub, sub->sub_used, su_get_used_count(sub))) ; #endif if (preloaded) data = NULL; } UNLOCK(home); } safefree(data); }
su_inline su_alloc_t *su_block_find(su_block_t const *b, void const *p) { size_t h, h0, probe; #ifdef DEBUG SU_DEBUG_9(("su_block_find: searching block %p for data %p\n", b, p)) ; #endif #if SU_ALLOC_STATS size_t collision = 0; count_su_block_find++; size_su_block_find += b->sub_n; used_su_block_find += b->sub_used; if (b->sub_n > max_size_su_block_find) max_size_su_block_find = b->sub_n; if (b->sub_used > max_used_su_block_find) max_used_su_block_find = b->sub_used; #endif assert(p != NULL); h = h0 = (size_t)((uintptr_t)p % b->sub_n); probe = (b->sub_n > SUB_P) ? SUB_P : 1; do { if (b->sub_nodes[h].sua_data == p) { #ifdef DEBUG SU_DEBUG_9(("su_block_find: found block %p data %p at h %ld\n", b, p, h)) ; #endif su_alloc_t const *retval = &b->sub_nodes[h]; return (su_alloc_t *)retval; /* discard const */ } h += probe; if (h >= b->sub_n) h -= b->sub_n; #if SU_ALLOC_STATS if (++collision > su_block_find_collision) su_block_find_collision = collision, su_block_find_collision_used = b->sub_used, su_block_find_collision_size = b->sub_n; count_su_block_find_loop++; #endif } while (h != h0); return NULL; }
/**Create a new su_home_t object. * * Create a home object used to collect multiple memory allocations under * one handle. The memory allocations made using this home object is freed * either when this home is destroyed. * * The maximum @a size of a home object is INT_MAX (2 gigabytes). * * @param size size of home object * * The memory home object allocated with su_home_new() can be reclaimed with * su_home_unref(). * * @return * This function returns a pointer to an su_home_t object, or NULL upon * an error. */ void *su_home_new(isize_t size) { su_home_t *home; assert(size >= sizeof (*home)); if (size < sizeof (*home)) return (void)(errno = EINVAL), NULL; else if (size > INT_MAX) return (void)(errno = ENOMEM), NULL; home = calloc(1, size); if (home) { home->suh_size = (int)size; home->suh_blocks = su_hash_alloc(SUB_N); if (home->suh_blocks) home->suh_blocks->sub_hauto = 0; else safefree(home), home = NULL; } #ifdef DEBUG SU_DEBUG_9(("su_home_new: created new home for size %d at %p\n", size, home)) ; #endif return home; }
su_inline su_alloc_t *su_block_add(su_block_t *b, void *p) { size_t h, probe; assert(p != NULL); h = (size_t)((uintptr_t)p % b->sub_n); probe = (b->sub_n > SUB_P) ? SUB_P : 1; while (b->sub_nodes[h].sua_data) { h += probe; if (h >= b->sub_n) h -= b->sub_n; } b->sub_used++; b->sub_nodes[h].sua_data = p; #ifdef DEBUG SU_DEBUG_9(("su_block_add: block %p data %p stored at h %ld b->sub_used %ld b->sub_n %ld used %d\n", b, p, h, b->sub_used, b->sub_n, su_get_used_count(b))) ; #endif return &b->sub_nodes[h]; }
/** Internal deinitialization */ static void _su_home_deinit(su_home_t *home) { if (home->suh_blocks) { size_t i; su_block_t *b; void *suh_lock = home->suh_lock; home->suh_lock = NULL; if (home->suh_blocks->sub_destructor) { void (*destructor)(void *) = home->suh_blocks->sub_destructor; home->suh_blocks->sub_destructor = NULL; destructor(home); } b = home->suh_blocks; #ifdef DEBUG SU_DEBUG_9(("%s: block %p sub_used is %ld sub_n %ld used %d\n", __func__, b, b->sub_used, b->sub_n, su_get_used_count(b))) ; #endif su_home_check_blocks(b); for (i = 0; i < b->sub_n; i++) { if (b->sub_nodes[i].sua_data) { if (b->sub_nodes[i].sua_home) { su_home_t *subhome = b->sub_nodes[i].sua_data; su_block_t *subb = MEMLOCK(subhome); assert(subb); assert(subb->sub_ref >= 1); #if 0 if (subb->sub_ref > 0) SU_DEBUG_7(("su_home_unref: subhome %p with destructor %p has still %u refs\n", subhome, subb->sub_destructor, subb->sub_ref)); #endif subb->sub_ref = 0; /* zap them all */ _su_home_deinit(subhome); } else if (su_is_preloaded(b, b->sub_nodes[i].sua_data)) continue; safefree(b->sub_nodes[i].sua_data); } } if (b->sub_preload && !b->sub_preauto) free(b->sub_preload); if (b->sub_stats) free(b->sub_stats); if (!b->sub_auto) free(b); home->suh_blocks = NULL; if (suh_lock) { /* Unlock, or risk assert() or leak handles on Windows */ _su_home_unlocker(suh_lock); _su_home_destroy_mutexes(suh_lock); } } }
static void su_poll_port_deinit(void *arg) { su_port_t *self = arg; SU_DEBUG_9(("%s(%p) called\n", "su_poll_port_deinit", (void *)self)); su_socket_port_deinit(self->sup_base); }
/**Unreference a su_home_t object. * * Decrements the reference count on home object and destroys and frees it * and the memory allocations using it if the reference count reaches 0. * * @param home memory pool object to be unreferenced * * @retval 1 if object was freed * @retval 0 if object is still alive */ int su_home_unref(su_home_t *home) { su_block_t *sub; if (home == NULL) return 0; sub = MEMLOCK(home); if (sub == NULL) { /* Xyzzy */ return 0; } else if (sub->sub_ref == REF_MAX) { UNLOCK(home); return 0; } else if (--sub->sub_ref > 0) { UNLOCK(home); #ifdef DEBUG SU_DEBUG_9(("su_home_unref: home %p now has refdcount %ld\n", home, sub->sub_ref)) ; #endif return 0; } else if (sub->sub_parent) { su_home_t *parent = sub->sub_parent; UNLOCK(home); #ifdef DEBUG SU_DEBUG_9(("su_home_unref: actually freeing home %p\n", home)) ; #endif su_free(parent, home); return 1; } else { int hauto = sub->sub_hauto; #ifdef DEBUG SU_DEBUG_9(("su_home_unref: actually freeing home %p hauto %d\n", home, hauto)) ; #endif _su_home_deinit(home); if (!hauto) safefree(home); /* UNLOCK(home); */ return 1; } }
/**@internal * * Initializes a message port. It creates a mailbox used to wake up the * thread waiting on the port if needed. Currently, the mailbox is a * socketpair or an UDP socket connected to itself. */ int su_pthread_port_init(su_port_t *self, su_port_vtable_t const *vtable) { SU_DEBUG_9(("su_pthread_port_init(%p, %p) called\n", (void *)self, (void *)vtable)); pthread_mutex_init(self->sup_obtained, NULL); return su_base_port_init(self, vtable); }
static int nta_agent_message_callback(nta_agent_magic_t *context, nta_agent_t *agent, msg_t *msg, sip_t *sip) { int error; luasofia_nta_agent_t* u_nta_agent = (luasofia_nta_agent_t*) context; lua_State *L = u_nta_agent->L; SU_DEBUG_9(("nta_agent_message_callback: context[%p] agent[%p] msg[%p] sip[%p]\n", context, agent, msg, sip)); /* put nta_agent userdatum at stack and check if it is ok. */ luasofia_userdata_table_get(L, agent); luaL_checkudata(L, -1, NTA_AGENT_MTABLE); /* put callback function at stack */ lua_rawgeti(L, LUA_REGISTRYINDEX, u_nta_agent->callback_ref); if (lua_isnil(L, -1)) { lua_pop(L, 2); SU_DEBUG_1(("nta_agent_message_callback: callback function not found!\n")); return -1; //error, lets not return 0 (should always return 0). } // Lets pass the nta_agent userdata as the first parameter of the callback. lua_pushvalue(L, -2); lua_pushlightuserdata(L, msg); lua_pushlightuserdata(L, sip); SU_DEBUG_9(("nta_agent_message_callback: calling lua callback\n")); if ((error = lua_pcall(L, 3, 0, 0)) != 0) { if (error == LUA_ERRMEM) SU_DEBUG_0(("nta_agent_message_callback: memory allocation error! error[%s]\n", lua_tostring(L, -1))); else SU_DEBUG_1(("nta_agent_message_callback: error on calling callback! error[%s]\n", lua_tostring(L, -1))); lua_pop(L, 1); } lua_pop(L, 1); return 0; }
/** Destroy a home object * * Frees all memory blocks associated with a home object. Note that the home * object structure is not freed. * * @param home pointer to a home object * * @deprecated * su_home_destroy() is included for backwards compatibility only. Use * su_home_unref() instead of su_home_destroy(). */ void su_home_destroy(su_home_t *home) { #ifdef DEBUG SU_DEBUG_9(("su_home_destroy: destroying home at %p\n", home)) ; #endif if (MEMLOCK(home)) { assert(home->suh_blocks); assert(home->suh_blocks->sub_ref == 1); if (!home->suh_blocks->sub_hauto) /* should warn user */; home->suh_blocks->sub_hauto = 1; _su_home_deinit(home); /* UNLOCK(home); */ } }
/** Allocate the block hash table. * * @internal * * Allocate a block hash table of @a n elements. * * @param home pointer to home object * @param n number of buckets in hash table * * @return * This function returns a pointer to the allocated hash table or * NULL if an error occurred. */ su_inline su_block_t *su_hash_alloc(size_t n) { su_block_t *b = calloc(1, offsetof(su_block_t, sub_nodes[n])); if (b) { /* Implicit su_home_init(); */ b->sub_ref = 1; b->sub_hauto = 1; b->sub_n = n; } #ifdef DEBUG SU_DEBUG_9(("su_hash_alloc: block %p added\n", b)) ; #endif return b; }
/**Allocate and zero a memory block. * * Allocates a memory block with a given size from * given memory home @a home and zeroes the allocated block. * * @param home pointer to memory pool object * @param size size of the memory block * * @note The memory home pointer @a home may be @c NULL. In that case, the * allocated memory block is not associated with any memory home, and it * must be freed by calling su_free() or free(). * * @return * The function su_zalloc() returns a pointer to the allocated memory block, * or NULL upon an error. */ void *su_zalloc(su_home_t *home, isize_t size) { void *data; assert (size >= 0); if (home) { data = sub_alloc(home, MEMLOCK(home), size, (enum sub_zero)1); UNLOCK(home); } else data = calloc(1, size); #ifdef DEBUG SU_DEBUG_9(("su_zalloc: allocated size %d at: %p\n", size, data)) ; #endif return data; }
/** Initialize an su_home_t struct. * * Initializes an su_home_t structure. It can be used when the home * structure is allocated from stack or when the home structure is part of * an another object. * * @param home pointer to home object * * @retval 0 when successful * @retval -1 upon an error. * * @sa SU_HOME_INIT(), su_home_deinit(), su_home_new(), su_home_clone() * * @bug * Prior to @VERSION_1_12_8 the su_home_t structure should have been * initialized with SU_HOME_INIT() or otherwise zeroed before calling * su_home_init(). */ int su_home_init(su_home_t *home) { su_block_t *sub; if (home == NULL) return -1; home->suh_blocks = sub = su_hash_alloc(SUB_N); home->suh_lock = NULL; if (!sub) return -1; #ifdef DEBUG SU_DEBUG_9(("%s: allocated block %p sub_used is %ld sub_n %ld used %d\n", __func__, sub, sub->sub_used, sub->sub_n, su_get_used_count(sub))) ; #endif return 0; }
/** Preload a memory home from stack. * * Initializes a memory home using an area allocated from stack. Poor man's * alloca(). */ su_home_t *su_home_auto(void *area, isize_t size) { su_home_t *home; su_block_t *sub; size_t homesize = __ALIGN(sizeof *home); size_t subsize = __ALIGN(offsetof(su_block_t, sub_nodes[SUB_N_AUTO])); size_t prepsize; char *p = area; prepsize = homesize + subsize + (__ALIGN((intptr_t)p) - (intptr_t)p); if (area == NULL || size < prepsize) return NULL; if (size > INT_MAX) size = INT_MAX; home = memset(p, 0, homesize); home->suh_size = (int)size; sub = memset(p + homesize, 0, subsize); home->suh_blocks = sub; if (size > prepsize + 65535) size = prepsize + 65535; sub->sub_n = SUB_N_AUTO; sub->sub_ref = 1; sub->sub_preload = p + prepsize; sub->sub_prsize = (unsigned)(size - prepsize); sub->sub_hauto = 1; sub->sub_auto = 1; sub->sub_preauto = 1; sub->sub_auto_all = 1; #ifdef DEBUG SU_DEBUG_9(("%s: start - block %p sub_used is %ld sub_n %ld used %d\n", __func__, sub, sub->sub_used, sub->sub_n, su_get_used_count(sub))) ; #endif return home; }
int su_get_used_count(su_block_t const *b) { #if MEMCHECK != 0 if (b) { size_t i, used; for (i = 0, used = 0; i < b->sub_n; i++) { if (b->sub_nodes[i].sua_data) { used++; } } SU_DEBUG_9(("%s: block %p sub_used is %ld sub_n %ld used %ld\n", __func__, b, b->sub_used, b->sub_n, used)) ; return used ; } return -1 ; #else return -1; #endif }
/** Check home blocks. */ static void su_home_check_blocks(su_block_t const *b) { #if MEMCHECK != 0 if (b) { size_t i, used; assert(b->sub_used <= b->sub_n); for (i = 0, used = 0; i < b->sub_n; i++) if (b->sub_nodes[i].sua_data) { su_alloc_check(b, &b->sub_nodes[i]), used++; if (b->sub_nodes[i].sua_home) su_home_check((su_home_t *)b->sub_nodes[i].sua_data); } #ifdef DEBUG SU_DEBUG_9(("%s: block %p sub_used is %ld sub_n %ld used %ld\n", __func__, b, b->sub_used, b->sub_n, used)) ; #endif assert(used == b->sub_used); } #endif }
/** Create a port using /dev/poll or poll(). */ su_port_t *su_devpoll_port_create(void) { su_port_t *self; int devpoll = open("/dev/poll", O_RDWR); if (devpoll == -1) { /* Fallback to poll() */ SU_DEBUG_3(("%s(): open(\"%s\") => %u: %s\n", "su_devpoll_port_create", "/dev/poll", errno, strerror(errno))); return su_poll_port_create(); } self = su_home_new(sizeof *self); if (!self) { close(devpoll); return self; } if (su_home_destructor(su_port_home(self), su_devpoll_port_deinit) < 0 || !(self->sup_indices = su_zalloc(su_port_home(self), (sizeof self->sup_indices[0]) * (self->sup_size_indices = 64)))) { su_home_unref(su_port_home(self)); close(devpoll); return NULL; } self->sup_devpoll = devpoll; self->sup_multishot = SU_ENABLE_MULTISHOT_POLL; if (su_socket_port_init(self->sup_base, su_devpoll_port_vtable) < 0) return su_home_unref(su_port_home(self)), NULL; SU_DEBUG_9(("%s(%p): devpoll_create() => %u: %s\n", "su_port_create", (void *)self, self->sup_devpoll, "OK")); return self; }
/**Move allocations from a su_home_t object to another. * * Moves allocations made through the @a src home object under the @a dst * home object. It is handy, for example, if an operation allocates some * number of blocks that should be freed upon an error. It uses a temporary * home and moves the blocks from temporary to a proper home when * successful, but frees the temporary home upon an error. * * If @a src has destructor, it is called before starting to move. * * @param dst destination home * @param src source home * * @retval 0 if succesful * @retval -1 upon an error */ int su_home_move(su_home_t *dst, su_home_t *src) { size_t i, n, n2, used; su_block_t *s, *d, *d2; if (src == NULL || dst == src) return 0; if (dst) { s = MEMLOCK(src); d = MEMLOCK(dst); if (s && s->sub_n) { #ifdef DEBUG SU_DEBUG_9(("%s: start - block %p sub_used is %ld sub_n %ld used %d\n", __func__, s, s->sub_used, s->sub_n, su_get_used_count(s))) ; #endif if (s->sub_destructor) { void (*destructor)(void *) = s->sub_destructor; s->sub_destructor = NULL; destructor(src); } if (d) used = s->sub_used + d->sub_used; else used = s->sub_used; if (used && (d == NULL || 3 * used > 2 * d->sub_n)) { if (d) for (n = n2 = d->sub_n; 3 * used > 2 * n2; n2 = 4 * n2 + 3) ; else n = 0, n2 = s->sub_n; if (!(d2 = su_hash_alloc(n2))) { UNLOCK(dst); UNLOCK(src); return -1; } dst->suh_blocks = d2; for (i = 0; i < n; i++) if (d->sub_nodes[i].sua_data) su_block_add(d2, d->sub_nodes[i].sua_data)[0] = d->sub_nodes[i]; if (d) { d2->sub_parent = d->sub_parent; d2->sub_ref = d->sub_ref; d2->sub_preload = d->sub_preload; d2->sub_prsize = d->sub_prsize; d2->sub_prused = d->sub_prused; d2->sub_preauto = d->sub_preauto; d2->sub_stats = d->sub_stats; } if (d && !d->sub_auto) free(d); d = d2; } if (s->sub_used) { n = s->sub_n; for (i = 0; i < n; i++) if (s->sub_nodes[i].sua_data) { su_block_add(d, s->sub_nodes[i].sua_data)[0] = s->sub_nodes[i]; if (s->sub_nodes[i].sua_home) { su_home_t *subhome = s->sub_nodes[i].sua_data; su_block_t *subsub = MEMLOCK(subhome); subsub->sub_parent = dst; UNLOCK(subhome); } } s->sub_used = 0; memset(s->sub_nodes, 0, n * sizeof (s->sub_nodes[0])); } if (s->sub_stats) { /* XXX */ } } UNLOCK(dst); UNLOCK(src); } else { s = MEMLOCK(src); if (s && s->sub_used) { n = s->sub_n; for (i = 0; i < n; i++) { if (s->sub_nodes[i].sua_data && s->sub_nodes[i].sua_home) { su_home_t *subhome = s->sub_nodes[i].sua_data; su_block_t *subsub = MEMLOCK(subhome); subsub->sub_parent = dst; UNLOCK(subhome); } } s->sub_used = 0; memset(s->sub_nodes, 0, s->sub_n * sizeof (s->sub_nodes[0])); s->sub_used = 0; } #ifdef DEBUG SU_DEBUG_9(("%s: end - block %p sub_used is %ld sub_n %ld used %d\n", __func__, s, s->sub_used, s->sub_n, su_get_used_count(s))) ; #endif UNLOCK(src); } return 0; }
/** Reallocate a memory block. * * Allocates a memory block of @a size bytes. * It copies the old block contents to the new block and frees the old * block. * * If @a home is NULL, this function behaves exactly like realloc(). * * @param home pointer to memory pool object * @param data pointer to old memory block * @param size size of the memory block to be allocated * * @return * A pointer to the allocated memory block or * NULL if an error occurred. */ void *su_realloc(su_home_t *home, void *data, isize_t size) { void *ndata; su_alloc_t *sua; su_block_t *sub; size_t p; size_t term = 0 - size; if (!home) return realloc(data, size); if (size == 0) { if (data) su_free(home, data); return NULL; } sub = MEMLOCK(home); if (!data) { data = sub_alloc(home, sub, size, (enum sub_zero)0); UNLOCK(home); return data; } sua = su_block_find(sub, data); if (!su_alloc_check(sub, sua)) return UNLOCK(home); assert(!sua->sua_home); if (sua->sua_home) return UNLOCK(home); if (!su_is_preloaded(sub, data)) { ndata = realloc(data, size + MEMCHECK_EXTRA); if (ndata) { if (sub->sub_stats) { su_home_stats_free(sub, data, 0, sua->sua_size); su_home_stats_alloc(sub, data, 0, size, 1); } #if MEMCHECK_EXTRA memcpy((char *)ndata + size, &term, sizeof (term)); #else (void)term; #endif memset(sua, 0, sizeof *sua); sub->sub_used--; su_block_add(sub, ndata)->sua_size = (unsigned)size; } UNLOCK(home); return ndata; } p = (char *)data - home->suh_blocks->sub_preload; p += sua->sua_size + MEMCHECK_EXTRA; p = __ALIGN(p); if (p == sub->sub_prused) { size_t p2 = (char *)data - sub->sub_preload + size + MEMCHECK_EXTRA; p2 = __ALIGN(p2); if (p2 <= sub->sub_prsize) { /* Extend/reduce existing preload */ if (sub->sub_stats) { su_home_stats_free(sub, data, data, sua->sua_size); su_home_stats_alloc(sub, data, data, size, 0); } sub->sub_prused = (unsigned)p2; sua->sua_size = (unsigned)size; #if MEMCHECK_EXTRA memcpy((char *)data + size, &term, sizeof (term)); #endif UNLOCK(home); return data; } } else if (size < (size_t)sua->sua_size) { /* Reduce existing preload */ if (sub->sub_stats) { su_home_stats_free(sub, data, data, sua->sua_size); su_home_stats_alloc(sub, data, data, size, 0); } #if MEMCHECK_EXTRA memcpy((char *)data + size, &term, sizeof (term)); #endif sua->sua_size = (unsigned)size; UNLOCK(home); return data; } ndata = malloc(size + MEMCHECK_EXTRA); if (ndata) { if (p == sub->sub_prused) { /* Free preload */ sub->sub_prused = (char *)data - home->suh_blocks->sub_preload; if (sub->sub_stats) su_home_stats_free(sub, data, data, sua->sua_size); } memcpy(ndata, data, (size_t)sua->sua_size < size ? (size_t)sua->sua_size : size); #if MEMCHECK_EXTRA memcpy((char *)ndata + size, &term, sizeof (term)); #endif if (sub->sub_stats) su_home_stats_alloc(sub, data, 0, size, 1); memset(sua, 0, sizeof *sua); sub->sub_used--; su_block_add(sub, ndata)->sua_size = (unsigned)size; #ifdef DEBUG SU_DEBUG_9(("%s: block %p sub_used is %ld sub_n %ld used %d\n", __func__, sub, sub->sub_used, sub->sub_n, su_get_used_count(sub))) ; #endif } UNLOCK(home); return ndata; }
/** Allocate a memory block. * * @internal * * Precondition: locked home * * @param home home to allocate * @param sub block structure used to allocate * @param size * @param zero if true, zero allocated block; * if > 1, allocate a subhome * */ static void *sub_alloc(su_home_t *home, su_block_t *sub, size_t size, enum sub_zero zero) { void *data, *preload = NULL; assert (size < (((size_t)1) << SIZEBITS)); #ifdef DEBUG SU_DEBUG_9(("sub_alloc: allocating size %ld from home: %p using block %p\n", size, home, sub)) ; #endif if (size >= ((size_t)1) << SIZEBITS) return (void)(errno = ENOMEM), NULL; if (!size) return NULL; if (sub == NULL || 3 * sub->sub_used > 2 * sub->sub_n) { /* Resize the hash table */ size_t i, n, n2; su_block_t *b2; if (sub) n = home->suh_blocks->sub_n, n2 = 4 * n + 3; //, used = sub->sub_used; else n = 0, n2 = SUB_N; //, used = 0; #ifdef DEBUG SU_DEBUG_9(("sub_alloc: realloc block hash of size %ld\n", n2)) ; #endif if (!(b2 = su_hash_alloc(n2))) return NULL; for (i = 0; i < n; i++) { if (sub->sub_nodes[i].sua_data) su_block_add(b2, sub->sub_nodes[i].sua_data)[0] = sub->sub_nodes[i]; } if (sub) { b2->sub_parent = sub->sub_parent; b2->sub_ref = sub->sub_ref; b2->sub_preload = sub->sub_preload; b2->sub_prsize = sub->sub_prsize; b2->sub_prused = sub->sub_prused; b2->sub_hauto = sub->sub_hauto; b2->sub_preauto = sub->sub_preauto; b2->sub_destructor = sub->sub_destructor; /* auto_all is not copied! */ b2->sub_stats = sub->sub_stats; } home->suh_blocks = b2; if (sub && !sub->sub_auto) free(sub); sub = b2; } if (sub && zero < do_clone && sub->sub_preload && size <= sub->sub_prsize) { /* Use preloaded memory */ size_t prused = sub->sub_prused + size + MEMCHECK_EXTRA; prused = __ALIGN(prused); if (prused <= sub->sub_prsize) { preload = (char *)sub->sub_preload + sub->sub_prused; sub->sub_prused = (unsigned)prused; } #ifdef DEBUG SU_DEBUG_9(("sub_alloc: using %s memory\n", "preloaded")) ; #endif } if (preload && zero) { data = memset(preload, 0, size); } else if (preload) { data = preload; } else if (zero) { data = calloc(1, size + MEMCHECK_EXTRA); } else { data = malloc(size + MEMCHECK_EXTRA); } if (data) { su_alloc_t *sua; #if MEMCHECK_EXTRA size_t term = 0 - size; memcpy((char *)data + size, &term, sizeof (term)); #endif #ifdef DEBUG SU_DEBUG_9(("sub_alloc: data will be located at %p\n", data)) ; #endif if (!preload) sub->sub_auto_all = 0; if (zero >= do_clone) { /* Prepare cloned home */ su_home_t *subhome = data; assert(preload == 0); subhome->suh_blocks = su_hash_alloc(SUB_N); if (!subhome->suh_blocks) return (void)safefree(data), NULL; subhome->suh_size = (unsigned)size; subhome->suh_blocks->sub_parent = home; subhome->suh_blocks->sub_hauto = 0; } /* OK, add the block to the hash table. */ sua = su_block_add(sub, data); assert(sua); sua->sua_size = (unsigned)size; sua->sua_home = zero > 1; if (sub->sub_stats) su_home_stats_alloc(sub, data, preload, size, zero); } return data; }
/** Get a list of matching records from cache. */ int sres_cache_get(sres_cache_t *cache, uint16_t type, char const *domain, sres_record_t ***return_cached) { sres_record_t **result = NULL; sres_rr_hash_entry_t **slot; int result_size, i, j; unsigned hash; time_t now; char b[8]; if (!domain || !return_cached) return -1; *return_cached = NULL; SU_DEBUG_9(("%s(%p, %s, \"%s\") called\n", "sres_cache_get", (void *)cache, sres_record_type(type, b), domain)); hash = sres_hash_key(domain); if (!LOCK(cache)) return -1; time(&now); /* First pass: just count the number of rr:s for array allocation */ slot = sres_htable_hash(cache->cache_hash, hash); i = sres_cache_get0(cache->cache_hash, slot, type, domain, now, NULL, 0, NULL); if (i <= 0) { UNLOCK(cache); return 0; } result_size = (sizeof *result) * (i + 1); result = su_zalloc(cache->cache_home, result_size); if (result == NULL) { UNLOCK(cache); return -1; } /* Second pass: add the rr pointers to the allocated array */ j = sres_cache_get0(cache->cache_hash, slot, type, domain, now, result, i, NULL); if (i != j) { /* Uh-oh. */ SU_DEBUG_9(("%s(%p, %s, \"%s\") got %d != %d\n", "sres_cache_get", (void *)cache, sres_record_type(type, b), domain, i, j)); for (i = 0; i < result_size; i++) { if (result[i]) result[i]->sr_refcount--; } su_free(cache->cache_home, result); return 0; } result[i] = NULL; UNLOCK(cache); SU_DEBUG_9(("%s(%p, %s, \"%s\") returned %d entries\n", "sres_cache_get", (void *)cache, sres_record_type(type, b), domain, i)); *return_cached = result; return i; }
/** Send to stream */ ssize_t tport_send_stream_ws(tport_t const *self, msg_t *msg, msg_iovec_t iov[], size_t iovlen) { size_t i, j, n, m, size = 0; ssize_t nerror; tport_ws_t *wstp = (tport_ws_t *)self; enum { WSBUFSIZE = 2048 }; for (i = 0; i < iovlen; i = j) { char *buf = wstp->wstp_buffer; unsigned wsbufsize = WSBUFSIZE; if (i + 1 == iovlen) { buf = NULL; /* Don't bother copying single chunk */ } if (buf && (char *)iov[i].siv_base - buf < WSBUFSIZE && (char *)iov[i].siv_base - buf >= 0) { wsbufsize = buf + WSBUFSIZE - (char *)iov[i].siv_base; assert(wsbufsize <= WSBUFSIZE); } for (j = i, m = 0; buf && j < iovlen; j++) { if (m + iov[j].siv_len > wsbufsize) { break; } if (buf + m != iov[j].siv_base) { memcpy(buf + m, iov[j].siv_base, iov[j].siv_len); } m += iov[j].siv_len; iov[j].siv_len = 0; } if (j == i) { buf = iov[i].siv_base, m = iov[i].siv_len, j++; } else { iov[j].siv_base = buf, iov[j].siv_len = m; } nerror = ws_feed_buf(&wstp->ws, buf, m); SU_DEBUG_9(("tport_ws_writevec: vec %p %p %lu ("MOD_ZD")\n", (void *)&wstp->ws, (void *)iov[i].siv_base, (LU)iov[i].siv_len, nerror)); if (nerror == -1) { int err = su_errno(); if (su_is_blocking(err)) break; SU_DEBUG_3(("ws_write: %s\n", strerror(err))); return -1; } n = (size_t)nerror; size += n; /* Return if the write buffer is full for now */ if (n != m) break; } ws_send_buf(&wstp->ws, WSOC_TEXT); return size; }
/** Build a list of local IPv4 addresses and append it to *rresult. */ static int localinfo4(su_localinfo_t const *hints, su_localinfo_t **rresult) { su_localinfo_t *tbf = NULL, **lli = &tbf; su_localinfo_t *li = NULL, *li_first = NULL; su_sockaddr_t *su; int error = ELI_NOADDRESS; char *canonname = NULL; su_socket_t s; #if SU_HAVE_IN6 int su_xtra = (hints->li_flags & LI_V4MAPPED) ? sizeof(*su) : 0; #else int const su_xtra = 0; #endif struct ifconf ifc; int numifs; char *buffer; struct ifreq *ifr, *ifr_next; #if HAVE_OPEN_C su_sockaddr_t *sa; socklen_t salen = sizeof(*sa); #endif s = su_socket(AF_INET, SOCK_DGRAM, 0); if (s == -1) { SU_DEBUG_1(("su_localinfo: su_socket failed: %s\n", su_strerror(su_errno()))); return ELI_SYSTEM; } # if HAVE_IFNUM /* Get the list of known IP address from the kernel */ if (ioctl(s, SIOCGIFNUM, (char *) &numifs) < 0) { /* can't get number of interfaces -- fall back */ SU_DEBUG_1(("su_localinfo: SIOCGIFNUM failed: %s\n", su_strerror(su_errno()))); error = ELI_SYSTEM; goto err; } SU_DEBUG_9(("su_localinfo: %d active interfaces according to SIOCGIFNUM\n", numifs)); if (numifs < 0) # endif /* Default to 64 interfaces. Enough? */ numifs = 64; if (numifs == 0) return 0; /* * Allocate memory for SIOCGIFCONF ioctl buffer. This memory block is also * used as li_first, first localinfo struct that is returned, so it can be * freed by freelocalinfo() without any complications. */ ifc.ifc_len = numifs * sizeof (struct ifreq); buffer = malloc(sizeof(su_localinfo_t) + ifc.ifc_len + su_xtra); if (!buffer) { SU_DEBUG_1(("su_localinfo: memory exhausted\n")); error = ELI_MEMORY; goto err; } li_first = (su_localinfo_t *)buffer; memset(li_first, 0, sizeof(su_localinfo_t) + su_xtra); ifc.ifc_buf = buffer + sizeof(su_localinfo_t) + su_xtra; #if HAVE_OPEN_C if (ioctl(s, SIOCGIFACTIVECONF, (char *)&ifc) < 0) { SU_DEBUG_1(("su_localinfo: SIOCGIFCONF failed: %s\n", su_strerror(su_errno()))); error = ELI_SYSTEM; goto err; } #else if (ioctl(s, SIOCGIFCONF, (char *)&ifc) < 0) { SU_DEBUG_1(("su_localinfo: SIOCGIFCONF failed: %s\n", su_strerror(su_errno()))); error = ELI_SYSTEM; goto err; } #endif buffer = ifc.ifc_buf + ifc.ifc_len; for (ifr = ifc.ifc_req; (void *)ifr < (void *)buffer; ifr = ifr_next) { struct ifreq ifreq[1]; int scope, if_index, flags = 0, gni_flags = 0; char *if_name; #if SU_HAVE_IN6 su_sockaddr_t su2[1]; #endif #if SA_LEN if (ifr->ifr_addr.sa_len > sizeof(ifr->ifr_addr)) ifr_next = (struct ifreq *) (ifr->ifr_addr.sa_len + (char *)(&ifr->ifr_addr)); else #else ifr_next = ifr + 1; #endif if_name = ifr->ifr_name; #if defined(SIOCGIFINDEX) ifreq[0] = *ifr; if (ioctl(s, SIOCGIFINDEX, ifreq) < 0) { SU_DEBUG_1(("su_localinfo: SIOCGIFINDEX failed: %s\n", su_strerror(su_errno()))); error = ELI_SYSTEM; goto err; } #if HAVE_IFR_INDEX if_index = ifreq->ifr_index; #elif HAVE_IFR_IFINDEX if_index = ifreq->ifr_ifindex; #else #error Unknown index field in struct ifreq #endif #else #warning su_localinfo() cannot map interface name to number if_index = 0; #endif SU_DEBUG_9(("su_localinfo: if %s with index %d\n", if_name, if_index)); #if HAVE_OPEN_C su_close(s); li = calloc(1, sizeof(su_localinfo_t)); sa = calloc(1, sizeof(su_sockaddr_t)); if (su_get_local_ip_addr(sa) < 0) goto err; li->li_family = sa->su_family; li->li_scope = LI_SCOPE_GLOBAL /* scope */; li->li_index = if_index; li->li_addrlen = su_sockaddr_size(sa); li->li_addr = sa; if ((error = li_name(hints, gni_flags, sa, &canonname)) < 0) goto err; if (canonname) { if (strchr(canonname, ':') || strspn(canonname, "0123456789.") == strlen(canonname)) li->li_flags |= LI_NUMERIC; } else li->li_flags = 0; li->li_canonname = canonname; canonname = NULL; *rresult = li; return 0; #endif #if defined(SIOCGIFFLAGS) ifreq[0] = *ifr; if (ioctl(s, SIOCGIFFLAGS, ifreq) < 0) { SU_DEBUG_1(("su_localinfo: SIOCGIFFLAGS failed: %s\n", su_strerror(su_errno()))); error = ELI_SYSTEM; goto err; } /* Do not include interfaces that are down unless explicitly asked */ if ((ifreq->ifr_flags & IFF_UP) == 0 && (hints->li_flags & LI_DOWN) == 0) { SU_DEBUG_9(("su_localinfo: if %s with index %d is down\n", if_name, if_index)); continue; } #elif defined(SIOCGIFACTIVECONF) /* Handled above in SIOCGIFACTIVECONF vs. SIOCGIFCONF*/ #else #error su_localinfo() cannot determine interface status #endif #if 0 *ifreq = *ifr; ifreq->ifr_addr.sa_family = AF_INET; if (ioctl(s, SIOCGIFADDR, ifreq) < 0) { SU_DEBUG_1(("su_localinfo: SIOCGIFADDR failed: %s\n", su_strerror(su_errno()))); error = ELI_SYSTEM; goto err; } ifr->ifr_addr = ifreq->ifr_addr; #endif su = (su_sockaddr_t *)&ifr->ifr_addr; if (SU_HAS_INADDR_ANY(su)) continue; scope = li_scope4(su->su_sin.sin_addr.s_addr); if ((hints->li_scope && (hints->li_scope & scope) == 0) || (hints->li_ifname && strcmp(hints->li_ifname, if_name) != 0) || (hints->li_index && hints->li_index != if_index)) continue; #if SU_HAVE_IN6 if (su_xtra) { /* Map IPv4 address to IPv6 address */ memset(su2, 0, sizeof(*su2)); su2->su_family = AF_INET6; ((int32_t*)&su2->su_sin6.sin6_addr)[2] = htonl(0xffff); ((int32_t*)&su2->su_sin6.sin6_addr)[3] = su->su_sin.sin_addr.s_addr; su = su2; } #endif if (scope == LI_SCOPE_HOST || scope == LI_SCOPE_LINK) gni_flags = NI_NUMERICHOST; if ((error = li_name(hints, gni_flags, su, &canonname)) < 0) goto err; else if (error > 0) continue; if (canonname) if (strchr(canonname, ':') || strspn(canonname, "0123456789.") == strlen(canonname)) flags |= LI_NUMERIC; if (li_first) li = li_first; /* Use li_first with all ifr structs to be freed */ else if (!(li = calloc(1, (sizeof *li) + su_xtra))) { error = ELI_MEMORY; goto err; } if (!tbf) tbf = li; *lli = li; lli = &li->li_next; if (su_xtra) su = (su_sockaddr_t *)memcpy(li + 1, su, su_xtra); li->li_flags = flags; li->li_family = su->su_family; li->li_scope = scope; li->li_index = if_index; li->li_addrlen = su_sockaddr_size(su); li->li_addr = su; li->li_canonname = canonname; li->li_ifname = if_name; canonname = NULL; li_first = NULL; } if (canonname) free(canonname); if (li_first) free(li_first); su_close(s); if (tbf) *rresult = tbf; return 0; err: if (canonname) free(canonname); if (li_first) free(li_first); su_freelocalinfo(tbf); su_close(s); return error; }
/* NUA event callback */ static void nua_event_callback(nua_event_t event, int status, char const *phrase, nua_t *nua, nua_magic_t *magic, nua_handle_t *nh, nua_hmagic_t *hmagic, sip_t const *sip, tagi_t tags[]) { int error; lua_State *L = (lua_State *)magic; SU_DEBUG_9(("nua_event_callback: event[%s] status[%d] phrase[%s] " "nua[%p] magic[%p] nh[%p] hmagic[%p] sip[%p] tags[%p]\n", nua_event_name(event), status, phrase, nua, magic, nh, hmagic, sip, tags)); /* put nua userdatum at stack and check if it is ok. */ luasofia_userdata_table_get(L, nua); if (lua_isnil(L, -1)) { SU_DEBUG_1(("nua_event_callback: nua userdata not found on userdata_table!\n")); return; } luaL_checkudata(L, -1, NUA_MTABLE); /* put env table at stack */ lua_getfenv(L, -1); /* put callback table at stack */ lua_rawgeti(L, -1, ENV_CALLBACK_INDEX); if (lua_isnil(L, -1)) { lua_pop(L, 3); SU_DEBUG_1(("nua_event_callback: callback table not found!\n")); return; } /* get event callback */ lua_rawgeti(L, -1, event); if (lua_isnil(L, -1)) { lua_pop(L, 1); /* get event default callback */ lua_rawgeti(L, -1, NUA_EVENT_DEFAULT_INDEX); if (lua_isnil(L, -1)) { lua_pop(L, 4); SU_DEBUG_9(("nua_event_callback: event[%s] callback not found!\n", nua_event_name(event))); return; } } lua_pushinteger(L, event); lua_pushinteger(L, status); lua_pushstring(L, phrase); lua_pushvalue(L, -7); /* get magic field */ lua_rawgeti(L, -7, ENV_MAGIC_INDEX); if (nh) { /* put nua_handle userdatum at stack */ luasofia_userdata_table_get(L, nh); if (lua_isnil(L, -1)) { /* create a new nua_handle userdatum */ lua_pop(L, 1); luasofia_nua_handle_create_userdata(L, nh); lua_pushnil(L); } else { /* check if it is a nua_handle */ luaL_checkudata(L, -1, NUA_HANDLE_MTABLE); /* put env table at stack */ lua_getfenv(L, -1); } } else { lua_pushnil(L); lua_pushnil(L); } sip ? lua_pushlightuserdata(L, (void*)sip) : lua_pushnil(L); tags ? lua_pushlightuserdata(L, (void*)tags) : lua_pushnil(L); SU_DEBUG_9(("nua_event_callback: calling lua callback\n")); if ((error = lua_pcall(L, 9, 0, 0)) != 0) { if (error == LUA_ERRMEM) SU_DEBUG_0(("nua_event_callback: memory allocation error! error[%s]\n", lua_tostring(L, -1))); else SU_DEBUG_1(("nua_event_callback: error on calling callback! error[%s]\n", lua_tostring(L, -1))); lua_pop(L, 1); } lua_pop(L, 3); }
/**Update registered socket. * * @retval 0 if success * @retval -1 upon failure */ static int sres_sofia_update(sres_sofia_t *srs, su_socket_t new_socket, su_socket_t old_socket) { char const *what = NULL; su_wait_t wait[1]; sres_sofia_register_t *reg = NULL; sres_sofia_register_t *old_reg = NULL; int i, index = -1, error = 0; int N = SRES_MAX_NAMESERVERS; SU_DEBUG_9(("sres_sofia_update(%p, %d, %d)\n", (void *)srs, (int)new_socket, (int)old_socket)); if (srs == NULL) return 0; if (srs->srs_root == NULL) return -1; if (old_socket == new_socket) { if (old_socket == INVALID_SOCKET) { sres_resolver_set_async(srs->srs_resolver, sres_sofia_update, NULL, 0); /* Destroy srs */ for (i = 0; i < N; i++) { if (!srs->srs_reg[i].reg_index) continue; su_root_deregister(srs->srs_root, srs->srs_reg[i].reg_index); memset(&srs->srs_reg[i], 0, sizeof(srs->srs_reg[i])); } su_timer_destroy(srs->srs_timer), srs->srs_timer = NULL; su_free(NULL, srs); } return 0; } if (old_socket != INVALID_SOCKET) for (i = 0; i < N; i++) if ((srs->srs_reg + i)->reg_socket == old_socket) { old_reg = srs->srs_reg + i; break; } if (new_socket != INVALID_SOCKET) { if (old_reg == NULL) { for (i = 0; i < N; i++) { if (!(srs->srs_reg + i)->reg_ptr) break; } if (i > N) return su_seterrno(ENOMEM); reg = srs->srs_reg + i; } else reg = old_reg; } if (reg) { if (su_wait_create(wait, new_socket, SU_WAIT_IN | SU_WAIT_ERR) == -1) { reg = NULL; what = "su_wait_create"; error = su_errno(); } if (reg) index = su_root_register(srs->srs_root, wait, sres_sofia_poll, reg, 0); if (index < 0) { reg = NULL; what = "su_root_register"; error = su_errno(); su_wait_destroy(wait); } } if (old_reg) { if (old_socket == srs->srs_socket) srs->srs_socket = INVALID_SOCKET; su_root_deregister(srs->srs_root, old_reg->reg_index); memset(old_reg, 0, sizeof *old_reg); } if (reg) { srs->srs_socket = new_socket; reg->reg_ptr = srs; reg->reg_socket = new_socket; reg->reg_index = index; } if (!what) return 0; /* success */ SU_DEBUG_3(("sres: %s: %s\n", what, su_strerror(error))); return su_seterrno(error); }
static tport_t *tport_http_connect(tport_primary_t *pri, su_addrinfo_t *ai, tp_name_t const *tpn) { tport_http_connect_t *thc = (tport_http_connect_t *)pri; tport_http_connect_instance_t *thci; tport_master_t *mr = pri->pri_master; msg_t *msg, *response; char hostport[TPORT_HOSTPORTSIZE]; tport_t *tport; http_request_t *rq; msg = msg_create(http_default_mclass(), 0); if (!msg) return NULL; tport_hostport(hostport, sizeof hostport, (void *)ai->ai_addr, 1); rq = http_request_format(msg_home(msg), "CONNECT %s HTTP/1.1", hostport); if (msg_header_insert(msg, NULL, (void *)rq) < 0 || msg_header_add_str(msg, NULL, "User-Agent: Sofia-SIP/" VERSION "\n") < 0 || msg_header_add_str(msg, NULL, "Proxy-Connection: keepalive\n") < 0 || msg_header_add_make(msg, NULL, http_host_class, hostport) < 0 || msg_header_add_make(msg, NULL, http_separator_class, "\r\n") < 0 || msg_serialize(msg, NULL) < 0 || msg_prepare(msg) < 0) return (void)msg_destroy(msg), NULL; /* * Create a response message that ignores the body * if there is no Content-Length */ response = msg_create(http_default_mclass(), mr->mr_log | MSG_FLG_MAILBOX); tport = tport_base_connect(pri, thc->thc_proxy, ai, tpn); if (!tport) { msg_destroy(msg); msg_destroy(response); return tport; } thci = (tport_http_connect_instance_t*)tport; thci->thci_response = response; tport->tp_msg = response; msg_set_next(response, thci->thci_stackmsg = tport_msg_alloc(tport, 512)); if (tport_send_msg(tport, msg, tpn, NULL) < 0) { SU_DEBUG_9(("tport_send_msg failed in tpot_http_connect\n" VA_NONE)); msg_destroy(msg); tport_zap_secondary(tport); return NULL; } tport_set_secondary_timer(tport); return tport; }