static void _eio_pool_free(Eio_Alloc_Pool *pool, void *data) { if (pool->count >= EIO_PROGRESS_LIMIT) { eina_spinlock_take(&memory_pool_lock); memory_pool_usage -= pool->mem_size; eina_spinlock_release(&memory_pool_lock); free(data); if (memory_pool_limit > 0 && memory_pool_usage < memory_pool_limit) { eina_lock_take(&(memory_pool_mutex)); if (memory_pool_suspended) eina_condition_broadcast(&(memory_pool_cond)); eina_lock_release(&(memory_pool_mutex)); } } else { eina_lock_take(&(pool->lock)); eina_trash_push(&pool->trash, data); pool->count++; eina_lock_release(&(pool->lock)); } }
static void _store_cache_trim(Elm_Store *st) { while ((st->realized) && (((int)eina_list_count(st->realized) - st->realized_count) > st->cache_max)) { Elm_Store_Item *sti = st->realized->data; if (sti->realized) { st->realized = eina_list_remove_list(st->realized, st->realized); sti->realized = EINA_FALSE; } eina_lock_take(&sti->lock); if (!sti->fetched) { eina_lock_release(&sti->lock); ELM_SAFE_FREE(sti->fetch_th, ecore_thread_cancel); eina_lock_take(&sti->lock); } sti->fetched = EINA_FALSE; //// let fetch/unfetch do the locking // eina_lock_release(&sti->lock); if (st->cb.unfetch.func) st->cb.unfetch.func(st->cb.unfetch.data, sti); // eina_lock_take(&sti->lock); sti->data = NULL; eina_lock_release(&sti->lock); } }
void evas_gl_preload_pop(Evas_GL_Texture *tex) { Evas_GL_Texture_Async_Preload *async; Eina_List *l; if (!async_loader_init) return ; eina_lock_take(&async_loader_lock); if (async_gl_make_current && async_current && async_current->tex == tex) { Eina_Bool running = async_loader_running; evas_gl_make_current_cb tmp_cb = async_gl_make_current; Evas_GL_Texture_Async_Preload *current = async_current; void *tmp_data = async_engine_data; async_current_cancel = EINA_TRUE; async_current = NULL; eina_lock_release(&async_loader_lock); if (running) evas_gl_preload_render_lock(tmp_cb, tmp_data); evas_gl_common_texture_free(current->tex, EINA_FALSE); #ifdef EVAS_CSERVE2 if (evas_cache2_image_cached(¤t->im->cache_entry)) evas_cache2_image_close(¤t->im->cache_entry); else #endif evas_cache_image_drop(¤t->im->cache_entry); free(current); if (running) evas_gl_preload_render_unlock(tmp_cb, tmp_data); return ; } EINA_LIST_FOREACH(async_loader_tex, l, async) if (async->tex == tex) { async_loader_tex = eina_list_remove_list(async_loader_tex, l); evas_gl_common_texture_free(async->tex, EINA_FALSE); #ifdef EVAS_CSERVE2 if (evas_cache2_image_cached(&async->im->cache_entry)) evas_cache2_image_close(&async->im->cache_entry); else #endif evas_cache_image_drop(&async->im->cache_entry); free(async); break; } eina_lock_release(&async_loader_lock); }
Eina_List * eio_pack_send(Ecore_Thread *thread, Eina_List *pack, double *start) { double current; current = ecore_time_get(); if (current - *start > EIO_PACKED_TIME) { *start = current; ecore_thread_feedback(thread, pack); return NULL; } if (memory_pool_limit > 0 && memory_pool_usage > memory_pool_limit) { eina_lock_take(&(memory_pool_mutex)); memory_pool_suspended = EINA_TRUE; eina_condition_wait(&(memory_pool_cond)); memory_pool_suspended = EINA_FALSE; eina_lock_release(&(memory_pool_mutex)); } return pack; }
EAPI void eina_stringshare_del(Eina_Stringshare *str) { int slen; if (!str) return; /* special cases */ if (str[0] == '\0') slen = 0; else if (str[1] == '\0') slen = 1; else if (str[2] == '\0') slen = 2; else if (str[3] == '\0') slen = 3; else slen = 4; /* handled later */ if (slen < 2) return; else if (slen < 4) { eina_share_common_population_del(stringshare_share, slen); eina_lock_take(&_mutex_small); _eina_stringshare_small_del(str, slen); eina_lock_release(&_mutex_small); return; } if (!eina_share_common_del(stringshare_share, str)) CRITICAL("EEEK trying to del non-shared stringshare \"%s\"", str); }
Eina_Bool evas_gl_preload_push(Evas_GL_Texture_Async_Preload *async) { if (!async_loader_init) return EINA_FALSE; eina_lock_take(&async_loader_lock); async_loader_tex = eina_list_append(async_loader_tex, async); eina_lock_release(&async_loader_lock); return EINA_TRUE; }
EAPI size_t eina_tmpstr_strlen(Eina_Tmpstr *tmpstr) { Str *s; if (!tmpstr) return 0; if (!strs) return strlen(tmpstr) + 1; eina_lock_take(&_mutex); for (s = strs; s; s = s->next) { if (s->str == tmpstr) { eina_lock_release(&_mutex); return s->length; } } eina_lock_release(&_mutex); return strlen(tmpstr) + 1; }
EAPI void eio_memory_burst_limit_set(size_t limit) { eina_lock_take(&(memory_pool_mutex)); memory_pool_limit = limit; if (memory_pool_suspended) { if (memory_pool_usage < memory_pool_limit) eina_condition_broadcast(&(memory_pool_cond)); } eina_lock_release(&(memory_pool_mutex)); }
static void _ecore_main_loop_thread_safe_call(Ecore_Safe_Call *order) { Eina_Bool count; eina_lock_take(&_thread_safety); count = _thread_cb ? 0 : 1; _thread_cb = eina_list_append(_thread_cb, order); if (count) ecore_pipe_write(_thread_call, &wakeup, sizeof (int)); eina_lock_release(&_thread_safety); }
EAPI int ecore_thread_main_loop_begin(void) { Ecore_Safe_Call *order; if (eina_main_loop_is()) { return ++_thread_loop; } order = malloc(sizeof (Ecore_Safe_Call)); if (!order) return -1; eina_lock_take(&_thread_id_lock); order->current_id = ++_thread_id_max; if (order->current_id < 0) { _thread_id_max = 0; order->current_id = ++_thread_id_max; } eina_lock_release(&_thread_id_lock); eina_lock_new(&order->m); eina_condition_new(&order->c, &order->m); order->suspend = EINA_TRUE; _ecore_main_loop_thread_safe_call(order); eina_lock_take(&order->m); while (order->current_id != _thread_id) eina_condition_wait(&order->c); eina_lock_release(&order->m); eina_main_loop_define(); _thread_loop = 1; return EINA_TRUE; }
EINA_LIST_FREE(callback, call) { if (call->suspend) { eina_lock_take(&_thread_mutex); eina_lock_take(&call->m); _thread_id = call->current_id; eina_condition_broadcast(&call->c); eina_lock_release(&call->m); while (_thread_id_update != _thread_id) eina_condition_wait(&_thread_cond); eina_lock_release(&_thread_mutex); eina_main_loop_define(); eina_lock_take(&_thread_feedback_mutex); _thread_id = -1; eina_condition_broadcast(&_thread_feedback_cond); eina_lock_release(&_thread_feedback_mutex); _thread_safe_cleanup(call); free(call); } else if (call->sync) { call->data = call->cb.sync(call->data); eina_condition_broadcast(&call->c); } else { call->cb.async(call->data); free(call); } }
Eina_Bool enesim_barrier_wait(Enesim_Barrier *barrier) { eina_lock_take(&(barrier->cond_lock)); barrier->called++; if (barrier->called == barrier->needed) { barrier->called = 0; eina_condition_broadcast(&(barrier->cond)); } else eina_condition_wait(&(barrier->cond)); eina_lock_release(&(barrier->cond_lock)); return EINA_TRUE; }
static void eina_one_big_free(void *data, void *ptr) { One_Big *pool = data; if (!eina_lock_take(&pool->mutex)) { #ifdef EINA_HAVE_DEBUG_THREADS assert(eina_thread_equal(pool->self, eina_thread_self())); #endif } if ((void *)pool->base <= ptr && ptr < (void *)(pool->base + (pool->max * pool->item_size))) { eina_trash_push(&pool->empty, ptr); pool->usage--; #ifndef NVALGRIND VALGRIND_MEMPOOL_FREE(pool, ptr); #endif } else { #ifndef NDEBUG Eina_Inlist *it; #endif Eina_Inlist *il; il = OVER_MEM_TO_LIST(pool, ptr); #ifndef NDEBUG for (it = pool->over_list; it != NULL; it = it->next) if (it == il) break; assert(it != NULL); #endif pool->over_list = eina_inlist_remove(pool->over_list, il); #ifndef NVALGRIND VALGRIND_MEMPOOL_FREE(pool, ptr); #endif free(ptr); pool->over--; } eina_lock_release(&pool->mutex); }
EAPI int ecore_thread_main_loop_end(void) { int current_id; if (_thread_loop == 0) { ERR("the main loop is not locked ! No matching call to ecore_thread_main_loop_begin()."); return -1; } /* until we unlock the main loop, this thread has the main loop id */ if (!eina_main_loop_is()) { ERR("Not in a locked thread !"); return -1; } _thread_loop--; if (_thread_loop > 0) return _thread_loop; current_id = _thread_id; eina_lock_take(&_thread_mutex); _thread_id_update = _thread_id; eina_condition_broadcast(&_thread_cond); eina_lock_release(&_thread_mutex); eina_lock_take(&_thread_feedback_mutex); while (current_id == _thread_id && _thread_id != -1) eina_condition_wait(&_thread_feedback_cond); eina_lock_release(&_thread_feedback_mutex); return 0; }
static void eina_one_big_shutdown(void *data) { One_Big *pool = data; if (!pool) return; if (!eina_lock_take(&pool->mutex)) { #ifdef EINA_HAVE_DEBUG_THREADS assert(eina_thread_equal(pool->self, eina_thread_self())); #endif } if (pool->over > 0) { // FIXME: should we warn here? one_big mempool exceeded its alloc and now // mempool is cleaning up the mess created. be quiet for now as we were before // but edje seems to be a big offender at the moment! bad cedric! :) // WRN( // "Pool [%s] over by %i. cleaning up for you", // pool->name, pool->over); while (pool->over_list) { Eina_Inlist *il = pool->over_list; void *ptr = OVER_MEM_FROM_LIST(pool, il); pool->over_list = eina_inlist_remove(pool->over_list, il); free(ptr); pool->over--; } } if (pool->over > 0) { WRN( "Pool [%s] still over by %i\n", pool->name, pool->over); } #ifndef NVALGRIND VALGRIND_DESTROY_MEMPOOL(pool); #endif if (pool->base) free(pool->base); eina_lock_release(&pool->mutex); eina_lock_free(&pool->mutex); free(pool); }
EAPI Eina_Tmpstr * eina_tmpstr_add_length(const char *str, size_t length) { Str *s; if (!str || !length) return NULL; s = malloc(sizeof(Str) + length + 1); if (!s) return NULL; s->length = length; s->str = ((char *)s) + sizeof(Str); strcpy(s->str, str); eina_lock_take(&_mutex); s->next = strs; strs = s; eina_lock_release(&_mutex); return s->str; }
EAPI void eina_tmpstr_del(Eina_Tmpstr *tmpstr) { Str *s, *sp; if ((!strs) || (!tmpstr)) return; eina_lock_take(&_mutex); for (sp = NULL, s = strs; s; sp = s, s = s->next) { if (s->str == tmpstr) { if (sp) sp->next = s->next; else strs = s->next; free(s); break; } } eina_lock_release(&_mutex); }
EAPI Eina_Stringshare * eina_stringshare_add_length(const char *str, unsigned int slen) { if ((!str) || (slen <= 0)) return ""; else if (slen == 1) return (Eina_Stringshare *) _eina_stringshare_single + ((*str) << 1); else if (slen < 4) { const char *s; eina_lock_take(&_mutex_small); s = _eina_stringshare_small_add(str, slen); eina_lock_release(&_mutex_small); return s; } return eina_share_common_add_length(stringshare_share, str, slen * sizeof(char), sizeof(char)); }
EAPI void * ecore_main_loop_thread_safe_call_sync(Ecore_Data_Cb callback, void *data) { Ecore_Safe_Call *order; void *ret; if (!callback) return NULL; if (eina_main_loop_is()) { return callback(data); } order = malloc(sizeof (Ecore_Safe_Call)); if (!order) return NULL; order->cb.sync = callback; order->data = data; eina_lock_new(&order->m); eina_condition_new(&order->c, &order->m); order->sync = EINA_TRUE; order->suspend = EINA_FALSE; _ecore_main_loop_thread_safe_call(order); eina_lock_take(&order->m); eina_condition_wait(&order->c); eina_lock_release(&order->m); ret = order->data; order->sync = EINA_FALSE; order->cb.async = _thread_safe_cleanup; order->data = order; _ecore_main_loop_thread_safe_call(order); return ret; }
EAPI Eina_Stringshare * eina_stringshare_ref(Eina_Stringshare *str) { int slen; if (!str) return eina_share_common_ref(stringshare_share, str); /* special cases */ if (str[0] == '\0') slen = 0; else if (str[1] == '\0') slen = 1; else if (str[2] == '\0') slen = 2; else if (str[3] == '\0') slen = 3; else slen = 3 + (int)strlen(str + 3); if (slen < 2) { eina_share_common_population_add(stringshare_share, slen); return str; } else if (slen < 4) { const char *s; eina_share_common_population_add(stringshare_share, slen); eina_lock_take(&_mutex_small); s = _eina_stringshare_small_add(str, slen); eina_lock_release(&_mutex_small); return s; } return eina_share_common_ref(stringshare_share, str); }
static void * _eio_pool_malloc(Eio_Alloc_Pool *pool) { void *result = NULL; if (pool->count) { eina_lock_take(&(pool->lock)); result = eina_trash_pop(&pool->trash); if (result) pool->count--; eina_lock_release(&(pool->lock)); } if (!result) { result = malloc(pool->mem_size); eina_spinlock_take(&memory_pool_lock); if (result) memory_pool_usage += pool->mem_size; eina_spinlock_release(&memory_pool_lock); } return result; }
EAPI void ecore_fork_reset(void) { Eina_List *l, *ln; Ecore_Fork_Cb *fcb; eina_lock_take(&_thread_safety); ecore_pipe_del(_thread_call); _thread_call = ecore_pipe_add(_thread_callback, NULL); /* If there was something in the pipe, trigger a wakeup again */ if (_thread_cb) ecore_pipe_write(_thread_call, &wakeup, sizeof (int)); eina_lock_release(&_thread_safety); // should this be done withing the eina lock stuff? fork_cbs_walking++; EINA_LIST_FOREACH(fork_cbs, l, fcb) { fcb->func(fcb->data); }
static void * eina_one_big_malloc(void *data, EINA_UNUSED unsigned int size) { One_Big *pool = data; unsigned char *mem = NULL; if (!eina_lock_take(&pool->mutex)) { #ifdef EINA_HAVE_DEBUG_THREADS assert(eina_thread_equal(pool->self, eina_thread_self())); #endif } if (pool->empty) { #ifndef NVALGRIND VALGRIND_MAKE_MEM_DEFINED(pool->empty, pool->item_size); #endif mem = eina_trash_pop(&pool->empty); pool->usage++; goto on_exit; } if (!pool->base) { pool->base = malloc(pool->item_size * pool->max); if (!pool->base) goto retry_smaller; #ifndef NVALGRIND VALGRIND_MAKE_MEM_NOACCESS(pool->base, pool->item_size * pool->max); #endif } if (pool->served < pool->max) { mem = pool->base + (pool->served++ *pool->item_size); pool->usage++; goto on_exit; } retry_smaller: mem = malloc(sizeof(Eina_Inlist) + pool->offset_to_item_inlist); if (mem) { Eina_Inlist *node = OVER_MEM_TO_LIST(pool, mem); pool->over++; /* Only need to zero list elements and not the payload here */ memset(node, 0, sizeof(Eina_Inlist)); pool->over_list = eina_inlist_append(pool->over_list, node); } #ifndef NVALGRIND VALGRIND_MAKE_MEM_NOACCESS(mem, pool->item_size); #endif on_exit: eina_lock_release(&pool->mutex); #ifndef NVALGRIND VALGRIND_MEMPOOL_ALLOC(pool, mem, pool->item_size); #endif return mem; }