int smcache_init2(smcache_t *smc, const char *name, int nlimit_cache, OSPX_pthread_mutex_t *external_lock, long lflags, void *opaque, void *(*creater)(void *opaque), void (*destroy)(void *obj, void *opaque), int (*need_destroy)(void *obj, void *opaque)) { smc->desc = name; smc->xq.n = 0; smc->xq.qfirst = smc->xq.qlast = NULL; smc->nlimit_cache = nlimit_cache ? nlimit_cache : -1; smc->lock = external_lock; smc->lflags = lflags; smc->opaque = opaque; smc->creater = creater; smc->destroy = destroy; smc->need_destroy = need_destroy; MSG_log(M_CACHE, LOG_INFO, "Initializing cache (\"%s\"/%p) ...\n", smc->desc, smc); assert (!smc->need_destroy || smc->destroy); if (!smc->lock) { if ((errno=OSPX_pthread_mutex_init(&smc->stack_lock, 0))) { MSG_log(M_CACHE, LOG_ERR, "mutex_init:%s\n", OSPX_sys_strerror(errno)); return -1; } smc->lock = &smc->stack_lock; } return 0; }
EXPORT struct sttask * stpool_task_new(stpool_t *pool, const char *name, void (*run)(struct sttask *ptask), void (*err_handler)(struct sttask *ptask, long reasons), void *arg) { int e; ctask_t *ptask; /** * Does the pool support the Waitable tasks ? */ if (pool && !(eFUNC_F_TASK_EX & pool->efuncs)) { MSG_log(M_POOL, LOG_ERR, "Only ROUTINE tasks are supported by this pool(%s/%p) efuncs(%p).\n", pool->desc, pool, pool->efuncs); return NULL; } if (!(ptask = __stpool_cache_get(NULL))) return NULL; __stpool_task_INIT(ptask, name, run, err_handler, arg); if (pool && (e = __stpool_task_set_p(ptask, pool))) { MSG_log2(M_POOL, LOG_ERR, "__task_set_p: code(%d).", e); return NULL; } return TASK_CAST_UP(ptask); }
void smcache_reset(smcache_t *smc, smlink_q_t *oq) { int nflushed = 0; smlink_q_t rmq = {0, NULL, NULL}; MSG_log(M_CACHE, LOG_INFO, "Reseting cache (\"%s\"/%p) ...\n", smc->desc, smc); smcache_lock(smc); /* Flush all of the objects who can be destroyed */ if (smcache_flushablel(smc, 0)) nflushed = smcache_get_flush_ql(smc, 0, &rmq); /* Dump the queue */ if (oq) { *oq = smc->xq; /* Reset the cache */ smc->xq.n = 0; smc->xq.qfirst = smc->xq.qlast = NULL; } smcache_unlock(smc); /* Destroy the objects */ if (nflushed) smcache_destroy_q(smc, &rmq); }
static cpool_t * __fac_gp_common_ctor(long efuncs, const cpool_method_t *me, const char *desc, int maxthreads, int minthreads, int priq_num, int suspend) { int e; cpool_t *pool = calloc(1, sizeof(cpool_t) + strlen(desc) + 1); if (!pool) return NULL; pool->desc = (char *)(pool + 1); strcpy(pool->desc, desc); pool->efuncs = efuncs; pool->me = me; pool->free = __fac_gp_common_dtor; /** * Create the group pool instance */ if ((e=cpool_gp_create_instance((cpool_gp_t **)&pool->ins, pool->desc, maxthreads, minthreads, priq_num, suspend, efuncs))) { MSG_log(M_GROUP, LOG_ERR, "Failed to create gp pool. code(%d)\n", e); free(pool); return NULL; } return pool; }
EXPORT int stpool_task_set_p(struct sttask *ptask, stpool_t *pool) { int e = 0; ctask_t *ptask0 = TASK_CAST_DOWN(ptask); if (ptask0->pool != pool) { if (ptask0->ref || ptask0->f_stat) { cpool_t *pool = ptask0->pool; assert (pool); if (ptask0->f_stat) { MSG_log(M_POOL, LOG_WARN, "@%s:Task(%s/%p) is in progress. ref(%hhd) stat(%p)\n", __FUNCTION__, ptask0->task_desc, ptask0, ptask0->ref, pool ? ME_CALL(pool, task_stat)(pool->ins, ptask0, NULL) : (long)NULL); return POOL_ERR_BUSY; } if (!ME_EX_HAS(pool, task_wsync) || (e = ME_EX_CALL(pool, task_wsync)(pool->ins, ptask0))) { MSG_log(M_POOL, LOG_WARN, "It is not safe to change the task's pool since its reference " "is not zero. task(%s/%p) ref(%hhd) stat(%p)\n", ptask->task_name, ptask, ptask0->ref, ME_CALL(pool, task_stat)(pool->ins, ptask0, NULL)); return __stpool_liberror(e); } assert (!ptask0->ref && !ptask0->f_stat); } /** * We initialize it if the task has not been initialized before */ if (ptask0->pool && ME_HAS(ptask0->pool, task_deinit)) { ME_CALL(ptask0->pool, task_deinit)(ptask0->pool->ins, ptask0); ptask0->pool = NULL; } return __stpool_task_set_p(ptask0, pool); } return 0; }
int smcache_flush(smcache_t *smc, int ncached_limit) { if (!ncached_limit) MSG_log(M_CACHE, LOG_INFO, "Flushing cache (\"%s\"/%p) ...\n", smc->desc, smc); return smcache_add_limit(smc, NULL, ncached_limit); }
EXPORT void stpool_resume(stpool_t *pool) { MSG_log(M_POOL, LOG_INFO, "{\"%s\"/%p} resume ... \n", pool->desc, pool); assert (ME_HAS(pool, resume)); ME_CALL(pool, resume)(pool->ins); }
EXPORT int stpool_remove_all(stpool_t *pool, int dispatched_by_pool) { MSG_log(M_POOL, LOG_INFO, "{\"%s\"/%p} remove all tasks ... (%d)\n", pool->desc, pool, dispatched_by_pool); if (!ME_HAS(pool, remove_all)) return 0; return ME_CALL(pool, remove_all)(pool->ins, dispatched_by_pool); }
void objpool_dtor(objpool_t *p) { MSG_log(M_FOBJP, LOG_INFO, "Destroying fast objpool(\"%s\"/%p) ...\n", objpool_name(p), p); smcache_deinit(&p->smc); assert (p->iblocks == 0); if (p->blocks) free(p->blocks); }
EXPORT int stpool_throttle_enable(stpool_t *pool, int enable) { MSG_log(M_POOL, LOG_INFO, "{\"%s\"/%p} %s the throttle ...\n", pool->desc, pool, enable ? "ENABLING" : "DISABLING"); if (!ME_EX_HAS(pool, throttle_enable)) return POOL_ERR_NSUPPORT; ME_EX_CALL(pool, throttle_enable)(pool->ins, enable); return 0; }
EXPORT int stpool_task_pthrottle_wait(struct sttask *ptask, long ms) { stpool_t *pool = TASK_CAST_DOWN(ptask)->pool; if (!pool) { MSG_log(M_POOL, LOG_WARN, "tsk(%s/%p): Firstly, you should call @stpool_task_set_p to specify its destination\n", ptask->task_name, ptask); return POOL_TASK_ERR_DESTINATION; } return stpool_throttle_wait(pool, ms); }
EXPORT long stpool_mark_all(stpool_t *pool, long lflags) { MSG_log(M_POOL, LOG_INFO, "{\"%s\"/%p} Marking all tasks with %p ...\n", pool->desc, pool, lflags); if (!ME_HAS(pool, mark_all)) { if (ME_HAS(pool, remove_all) && TASK_VMARK_REMOVE & lflags) return ME_CALL(pool, remove_all)(pool->ins, lflags & TASK_VMARK_REMOVE_BYPOOL); return 0; } return ME_CALL(pool, mark_all)(pool->ins, lflags); }
EXPORT int stpool_wait_all(stpool_t * pool, long ms) { int e; MSG_log(M_POOL, LOG_INFO, "{\"%s\"/%p} start waiting for all tasks's being done ... (%ld ms)\n", pool->desc, pool, ms); if (!ME_HAS(pool, wait_all)) return POOL_ERR_NSUPPORT; if ((e = ME_CALL(pool, wait_all)(pool->ins, ms))) e = __stpool_liberror(e); return e; }
EXPORT int stpool_suspend(stpool_t *pool, long ms) { int e; MSG_log(M_POOL, LOG_INFO, "{\"%s\"/%p} suspend ... (%ld ms)\n", pool->desc, pool, ms); if (!ME_HAS(pool, suspend)) return POOL_ERR_NSUPPORT; if ((e=ME_CALL(pool, suspend)(pool->ins, ms))) return __stpool_liberror(e); return 0; }
int objpool_ctor2(objpool_t *p, const char *name, size_t objlen, size_t nreserved, int nlimit_cache, OSPX_pthread_mutex_t *cache_lock) { /** * A block must can store at least 20 objects */ int n = 20, page_size = 4096, dummy = 50; if (objlen >= 256) { n = 8; page_size = 8096; } p->objlen = objlen; p->blocks = NULL; p->iblocks = p->ialloc = 0; p->block_size = (n + sizeof(obj_block_t) + dummy + page_size - 1) / page_size * page_size; p->block_nobjs = (p->block_size - sizeof(obj_block_t)) / objlen; p->ntotal = p->nput = 0; MSG_log(M_FOBJP, LOG_INFO, "Initializing fast objpool(\"%s\"/%p) ...\n", name, p); /** * Initialize the cache object */ if (smcache_init2(&p->smc, name, !nlimit_cache ? p->block_nobjs : nlimit_cache, cache_lock, CACHE_F_LOCK_CREATER, p, objpool_get, objpool_put, FUNC_ALWAYS_NEED_DESTROY)) { MSG_log2(M_FOBJP, LOG_ERR, "cache_init error"); return -1; } /** * Reserve some objects for the app if it has been * requested by user */ if (nreserved > 0) smcache_reserve(&p->smc, nreserved); return 0; }
void smcache_deinit(smcache_t *smc) { SMLINK_Q_HEAD(q); MSG_log(M_CACHE, LOG_INFO, "Destroying cache (\"%s\"/%p) ...\n", smc->desc, smc); /* NOTE: * We do not hold the lock */ if (smcache_flushablel(smc, 0)) { smcache_get_flush_ql(smc, 0, &q); smcache_destroy_q(smc, &q); } if (smc->lock != &smc->stack_lock) OSPX_pthread_mutex_destroy(smc->lock); }
EXPORT void stpool_task_delete(struct sttask *ptask) { assert (ptask && ___smc); assert (!(eTASK_VM_F_CACHE & TASK_CAST_DOWN(ptask)->f_vmflags)); if (TASK_CAST_DOWN(ptask)->f_stat || TASK_CAST_DOWN(ptask)->ref) { ctask_t *ptask0 = TASK_CAST_DOWN(ptask); cpool_t *pool = ptask0->pool; assert (pool); if (TASK_CAST_DOWN(ptask)->f_stat || (!ME_EX_HAS(pool, task_wsync) || ME_EX_CALL(pool, task_wsync)(pool->ins, ptask0))) { MSG_log(M_POOL, LOG_ERR, "It is not safe to destroy the task now. task(%s/%p) ref(%hhd) code(%d) stat:%p\n", ptask0->task_desc, ptask0, ptask0->ref, ptask0->task_code, pool ? ME_CALL(pool, task_stat)(pool->ins, ptask0, NULL) : ptask0->f_stat); } assert (!ptask0->ref); } __stpool_cache_put(NULL, TASK_CAST_DOWN(ptask)); }
static cpool_t * __fac_gp_common_ctor(long efuncs, const cpool_method_t *me, const char *desc, int maxthreads, int minthreads, int priq_num, int suspend) { int e; cpool_gp_t *gpool; cpool_t *pool = calloc(1, sizeof(cpool_t) + sizeof(cpool_core_t) + sizeof(cpool_gp_t)); if (!pool) return NULL; pool->desc = desc; pool->efuncs = efuncs; pool->me = me; pool->ins = (cpool_core_t *)(pool + 1); pool->destroy = __fac_gp_common_dtor; /** * Retreive the memory address of the rt pool and set its core */ gpool = (cpool_gp_t *)(pool->ins + 1); gpool->core = pool->ins; /** * Create the rt pool instance */ if ((e=cpool_gp_create_instance(gpool, desc, maxthreads, minthreads, priq_num, suspend, efuncs))) { MSG_log(M_GROUP, LOG_ERR, "Failed to create gp pool. code(%d)\n", e); free(pool); return NULL; } return pool; }
EXPORT stpool_t * stpool_create(const char *desc, long eCAPs, int maxthreads, int minthreads, int suspend, int pri_q_num) { cpool_t *pool = NULL; long elibCAPs; int nfuncs; const char *fac_desc; const cpool_factory_t *fac; char eCAPs_buffer[400]; struct fac_candidate { const char *fac_desc; int nfuncs; long eCAPs; const cpool_factory_t *fac; } fac_sel[20]; int idx, sel_idx = 0; /** * It does not need to load the ospx library since * we do not call any APIs who must use the TLS datas. */ MSG_log(M_POOL, LOG_INFO, "Request creating a pool(\"%s\") efuncs(%s) ...\n", desc, __eCAPs_desc(eCAPs, eCAPs_buffer)); /** * Select the best templates to create the pool */ for (fac=first_factory(&fac_desc); fac; fac=next_factory(&fac_desc)) { elibCAPs = __enum_CAPs(fac, &nfuncs); if ((elibCAPs & eCAPs) == eCAPs) { MSG_log(M_POOL, LOG_DEBUG, "Find a Factory(\"%s\" scores(%d), nfuns(%d)): %s\n\n", fac_desc, fac->scores, nfuncs, __eCAPs_desc(elibCAPs, eCAPs_buffer)); /** * We skip it if the entry is full */ if (sel_idx == sizeof(fac_sel)/sizeof(*fac_sel)) continue; /** * Add the factory into our candidate entries */ for (idx=0; idx<sel_idx; idx++) { if (fac->scores > fac_sel[idx].fac->scores || (fac->scores == fac_sel[idx].fac->scores && nfuncs > fac_sel[idx].nfuncs)) { memmove(fac_sel + idx + 1, fac_sel + idx, (sel_idx - idx) * sizeof(struct fac_candidate)); break; } } fac_sel[idx].fac_desc = fac_desc; fac_sel[idx].nfuncs = nfuncs; fac_sel[idx].eCAPs = elibCAPs; fac_sel[idx].fac = fac; ++ sel_idx; } } if (!sel_idx) { MSG_log(M_POOL, LOG_ERR, "Can not find any pool template to satify user. eCAPs(%p) (%s)\n", eCAPs, stpool_version()); return NULL; } for (idx=0; idx<sel_idx; idx++) { MSG_log(M_POOL, LOG_INFO, "Factory(\"%s\" scores(%d) nfuns(%d)) try to service us. lib_eCAPs(%p) user_eCAPs(%p)\n", fac_sel[idx].fac_desc, fac_sel[idx].fac->scores, fac_sel[idx].nfuncs, fac_sel[idx].eCAPs, eCAPs); if ((pool = fac_sel[idx].fac->create(desc, maxthreads, minthreads, pri_q_num, suspend))) break; MSG_log2(M_POOL, LOG_ERR, "Failed to create the pool: Factory(\"%s\"/%p).", fac_sel[idx].fac_desc, fac_sel[idx].fac); } if (idx != sel_idx && ME_HAS(pool, atexit)) ME_CALL(pool, atexit)(pool->ins, __stpool_atexit, pool); return pool; }