gf_timer_t * gf_timer_call_after (glusterfs_ctx_t *ctx, struct timespec delta, gf_timer_cbk_t callbk, void *data) { gf_timer_registry_t *reg = NULL; gf_timer_t *event = NULL; gf_timer_t *trav = NULL; uint64_t at = 0; if (ctx == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } reg = gf_timer_registry_init (ctx); if (!reg) { gf_msg_callingfn ("timer", GF_LOG_ERROR, 0, LG_MSG_TIMER_REGISTER_ERROR, "!reg"); return NULL; } event = GF_CALLOC (1, sizeof (*event), gf_common_mt_gf_timer_t); if (!event) { return NULL; } timespec_now (&event->at); timespec_adjust_delta (&event->at, delta); at = TS (event->at); event->callbk = callbk; event->data = data; event->xl = THIS; LOCK (®->lock); { trav = reg->active.prev; while (trav != ®->active) { if (TS (trav->at) < at) break; trav = trav->prev; } event->prev = trav; event->next = event->prev->next; event->prev->next = event; event->next->prev = event; } UNLOCK (®->lock); return event; }
int32_t quota_conf_read_header (int fd, char *buf) { int header_len = 0; int ret = 0; header_len = strlen (QUOTA_CONF_HEADER); ret = gf_nread (fd, buf, header_len); if (ret <= 0) { goto out; } else if (ret > 0 && ret != header_len) { ret = -1; goto out; } buf[header_len-1] = 0; out: if (ret < 0) gf_msg_callingfn ("quota", GF_LOG_ERROR, 0, LG_MSG_QUOTA_CONF_ERROR, "failed to read " "header from a quota conf"); return ret; }
int32_t gf_timer_call_cancel (glusterfs_ctx_t *ctx, gf_timer_t *event) { gf_timer_registry_t *reg = NULL; if (ctx == NULL || event == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return 0; } reg = gf_timer_registry_init (ctx); if (!reg) { gf_msg ("timer", GF_LOG_ERROR, 0, LG_MSG_INIT_TIMER_FAILED, "!reg"); GF_FREE (event); return 0; } pthread_mutex_lock (®->lock); { event->next->prev = event->prev; event->prev->next = event->next; } pthread_mutex_unlock (®->lock); GF_FREE (event); return 0; }
void runner_log (runner_t *runner, const char *dom, gf_loglevel_t lvl, const char *msg) { char *buf = NULL; size_t len = 0; int i = 0; if (runner->runerr) return; for (i = 0;; i++) { if (runner->argv[i] == NULL) break; len += (strlen (runner->argv[i]) + 1); } buf = GF_CALLOC (1, len + 1, gf_common_mt_run_logbuf); if (!buf) { runner->runerr = errno; return; } for (i = 0;; i++) { if (runner->argv[i] == NULL) break; strcat (buf, runner->argv[i]); strcat (buf, " "); } if (len > 0) buf[len - 1] = '\0'; gf_msg_callingfn (dom, lvl, 0, LG_MSG_RUNNER_LOG, "%s: %s", msg, buf); GF_FREE (buf); }
int32_t quota_conf_read_gfid (int fd, void *buf, char *type, float version) { int ret = 0; ret = gf_nread (fd, buf, 16); if (ret <= 0) goto out; if (ret != 16) { ret = -1; goto out; } if (version >= 1.2f) { ret = gf_nread (fd, type, 1); if (ret != 1) { ret = -1; goto out; } ret = 17; } else { *type = GF_QUOTA_CONF_TYPE_USAGE; } out: if (ret < 0) gf_msg_callingfn ("quota", GF_LOG_ERROR, 0, LG_MSG_QUOTA_CONF_ERROR, "failed to " "read gfid from a quota conf"); return ret; }
int32_t quota_conf_read_version (int fd, float *version) { int ret = 0; char buf[PATH_MAX] = ""; char *tail = NULL; float value = 0.0f; ret = quota_conf_read_header (fd, buf); if (ret == 0) { /* quota.conf is empty */ value = GF_QUOTA_CONF_VERSION; goto out; } else if (ret < 0) { goto out; } value = strtof ((buf + strlen(buf) - 3), &tail); if (tail[0] != '\0') { ret = -1; gf_msg_callingfn ("quota", GF_LOG_ERROR, 0, LG_MSG_QUOTA_CONF_ERROR, "invalid quota conf" " version"); goto out; } ret = 0; out: if (ret >= 0) *version = value; else gf_msg_callingfn ("quota", GF_LOG_ERROR, 0, LG_MSG_QUOTA_CONF_ERROR, "failed to " "read version from a quota conf header"); return ret; }
/* hold lock while calling this function */ int __cb_add_entry_buffer (buffer_t *buffer, void *item) { circular_buffer_t *ptr = NULL; int ret = -1; //DO we really need the assert here? GF_ASSERT (buffer->used_len <= buffer->size_buffer); if (buffer->use_once == _gf_true && buffer->used_len == buffer->size_buffer) { gf_msg ("circ-buff", GF_LOG_WARNING, 0, LG_MSG_BUFFER_ERROR, "buffer %p is use once buffer", buffer); return -1; } else { if (buffer->used_len == buffer->size_buffer) { if (buffer->cb[buffer->w_index]) { ptr = buffer->cb[buffer->w_index]; if (ptr->data) { cb_destroy_data (ptr, buffer->destroy_buffer_data); ptr->data = NULL; GF_FREE (ptr); } buffer->cb[buffer->w_index] = NULL; ptr = NULL; } } buffer->cb[buffer->w_index] = GF_CALLOC (1, sizeof (circular_buffer_t), gf_common_mt_circular_buffer_t); if (!buffer->cb[buffer->w_index]) return -1; buffer->cb[buffer->w_index]->data = item; ret = gettimeofday (&buffer->cb[buffer->w_index]->tv, NULL); if (ret == -1) gf_msg_callingfn ("circ-buff", GF_LOG_WARNING, 0, LG_MSG_GETTIMEOFDAY_FAILED, "getting time of the day failed"); buffer->w_index++; buffer->w_index %= buffer->size_buffer; //used_buffer size cannot be greater than the total buffer size if (buffer->used_len < buffer->size_buffer) buffer->used_len++; return buffer->w_index; } }
void cb_buffer_dump (buffer_t *buffer, void *data, int (fn) (circular_buffer_t *buffer, void *data)) { int index = 0; circular_buffer_t *entry = NULL; int entries = 0; int ul = 0; int w_ind = 0; int size_buff = 0; int i = 0; ul = buffer->used_len; w_ind = buffer->w_index; size_buff = buffer->size_buffer; pthread_mutex_lock (&buffer->lock); { if (buffer->use_once == _gf_false) { index = (size_buff + (w_ind - ul))%size_buff; for (entries = 0; entries < buffer->used_len; entries++) { entry = buffer->cb[index]; if (entry) fn (entry, data); else gf_msg_callingfn ("circ-buff", GF_LOG_WARNING, 0, LG_MSG_NULL_PTR, "Null entry in " "circular buffer at " "index %d.", index); index++; index %= buffer->size_buffer; } } else { for (i = 0; i < buffer->used_len ; i++) { entry = buffer->cb[i]; fn (entry, data); } } } pthread_mutex_unlock (&buffer->lock); }
int32_t gf_timer_call_cancel (glusterfs_ctx_t *ctx, gf_timer_t *event) { gf_timer_registry_t *reg = NULL; gf_boolean_t fired = _gf_false; if (ctx == NULL || event == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return 0; } LOCK (&ctx->lock); { reg = ctx->timer; } UNLOCK (&ctx->lock); if (!reg) { gf_msg ("timer", GF_LOG_ERROR, 0, LG_MSG_INIT_TIMER_FAILED, "!reg"); GF_FREE (event); return 0; } LOCK (®->lock); { fired = event->fired; if (fired) goto unlock; event->next->prev = event->prev; event->prev->next = event->next; } unlock: UNLOCK (®->lock); if (!fired) { GF_FREE (event); return 0; } return -1; }
static int gf_client_clienttable_expand (clienttable_t *clienttable, uint32_t nr) { cliententry_t *oldclients = NULL; uint32_t oldmax_clients = -1; int ret = -1; if (clienttable == NULL || nr <= clienttable->max_clients) { gf_msg_callingfn ("client_t", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); ret = EINVAL; goto out; } oldclients = clienttable->cliententries; oldmax_clients = clienttable->max_clients; clienttable->cliententries = GF_CALLOC (nr, sizeof (cliententry_t), gf_common_mt_cliententry_t); if (!clienttable->cliententries) { clienttable->cliententries = oldclients; ret = 0; goto out; } clienttable->max_clients = nr; if (oldclients) { uint32_t cpy = oldmax_clients * sizeof (cliententry_t); memcpy (clienttable->cliententries, oldclients, cpy); } gf_client_chain_client_entries (clienttable->cliententries, oldmax_clients, clienttable->max_clients); /* Now that expansion is done, we must update the client list * head pointer so that the client allocation functions can continue * using the expanded table. */ clienttable->first_free = oldmax_clients; GF_FREE (oldclients); ret = 0; out: return ret; }
static int __is_member (struct mem_pool *pool, void *ptr) { if (!pool || !ptr) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return -1; } if (ptr < pool->pool || ptr >= pool->pool_end) return 0; if ((mem_pool_ptr2chunkhead (ptr) - pool->pool) % pool->padded_sizeof_type) return -1; return 1; }
void* mem_get0 (struct mem_pool *mem_pool) { void *ptr = NULL; if (!mem_pool) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } ptr = mem_get(mem_pool); if (ptr) memset(ptr, 0, mem_pool->real_sizeof_type); return ptr; }
int32_t gf_timer_call_stale (gf_timer_registry_t *reg, gf_timer_t *event) { if (reg == NULL || event == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return 0; } event->next->prev = event->prev; event->prev->next = event->next; event->next = ®->stale; event->prev = event->next->prev; event->next->prev = event; event->prev->next = event; return 0; }
int32_t quota_data_to_meta (data_t *data, char *key, quota_meta_t *meta) { int32_t ret = -1; quota_meta_t *value = NULL; int64_t *size = NULL; if (!data || !key || !meta) goto out; if (data->len > sizeof (int64_t)) { value = (quota_meta_t *) data->data; meta->size = ntoh64 (value->size); meta->file_count = ntoh64 (value->file_count); if (data->len > (sizeof (int64_t)) * 2) meta->dir_count = ntoh64 (value->dir_count); else meta->dir_count = 0; } else { size = (int64_t *) data->data; meta->size = ntoh64 (*size); meta->file_count = 0; meta->dir_count = 0; /* This can happen during software upgrade. * Older version of glusterfs will not have inode count. * Return failure, this will be healed as part of lookup */ gf_msg_callingfn ("quota", GF_LOG_DEBUG, 0, LG_MSG_QUOTA_XATTRS_MISSING, "Object quota " "xattrs missing: len = %d", data->len); ret = -2; goto out; } ret = 0; out: return ret; }
static int gf_client_chain_client_entries (cliententry_t *entries, uint32_t startidx, uint32_t endcount) { uint32_t i = 0; if (!entries) { gf_msg_callingfn ("client_t", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG, "!entries"); return -1; } /* Chain only till the second to last entry because we want to * ensure that the last entry has GF_CLIENTTABLE_END. */ for (i = startidx; i < (endcount - 1); i++) entries[i].next_free = i + 1; /* i has already been incremented up to the last entry. */ entries[i].next_free = GF_CLIENTTABLE_END; return 0; }
int32_t quota_dict_set_meta (dict_t *dict, char *key, const quota_meta_t *meta, ia_type_t ia_type) { int32_t ret = -ENOMEM; quota_meta_t *value = NULL; value = GF_CALLOC (1, sizeof (quota_meta_t), gf_common_quota_meta_t); if (value == NULL) { goto out; } value->size = hton64 (meta->size); value->file_count = hton64 (meta->file_count); value->dir_count = hton64 (meta->dir_count); if (ia_type == IA_IFDIR) { ret = dict_set_bin (dict, key, value, sizeof (*value)); } else { /* For a file we don't need to store dir_count in the * quota size xattr, so we set the len of the data in the dict * as 128bits, so when the posix xattrop reads the dict, it only * performs operations on size and file_count */ ret = dict_set_bin (dict, key, value, sizeof (*value) - sizeof (int64_t)); } if (ret < 0) { gf_msg_callingfn ("quota", GF_LOG_ERROR, 0, LG_MSG_DICT_SET_FAILED, "dict set failed"); GF_FREE (value); } out: return ret; }
void gf_client_clienttable_destroy (clienttable_t *clienttable) { client_t *client = NULL; cliententry_t *cliententries = NULL; uint32_t client_count = 0; int32_t i = 0; if (!clienttable) { gf_msg_callingfn ("client_t", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG, "!clienttable"); return; } LOCK (&clienttable->lock); { client_count = clienttable->max_clients; clienttable->max_clients = 0; cliententries = clienttable->cliententries; clienttable->cliententries = NULL; } UNLOCK (&clienttable->lock); if (cliententries != NULL) { for (i = 0; i < client_count; i++) { client = cliententries[i].client; if (client != NULL) { gf_client_unref (client); } } GF_FREE (cliententries); LOCK_DESTROY (&clienttable->lock); GF_FREE (clienttable); } }
static void fill_defaults (xlator_t *xl) { if (xl == NULL) { gf_msg_callingfn ("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return; } SET_DEFAULT_FOP (create); SET_DEFAULT_FOP (open); SET_DEFAULT_FOP (stat); SET_DEFAULT_FOP (readlink); SET_DEFAULT_FOP (mknod); SET_DEFAULT_FOP (mkdir); SET_DEFAULT_FOP (unlink); SET_DEFAULT_FOP (rmdir); SET_DEFAULT_FOP (symlink); SET_DEFAULT_FOP (rename); SET_DEFAULT_FOP (link); SET_DEFAULT_FOP (truncate); SET_DEFAULT_FOP (readv); SET_DEFAULT_FOP (writev); SET_DEFAULT_FOP (statfs); SET_DEFAULT_FOP (flush); SET_DEFAULT_FOP (fsync); SET_DEFAULT_FOP (setxattr); SET_DEFAULT_FOP (getxattr); SET_DEFAULT_FOP (fsetxattr); SET_DEFAULT_FOP (fgetxattr); SET_DEFAULT_FOP (removexattr); SET_DEFAULT_FOP (fremovexattr); SET_DEFAULT_FOP (opendir); SET_DEFAULT_FOP (readdir); SET_DEFAULT_FOP (readdirp); SET_DEFAULT_FOP (fsyncdir); SET_DEFAULT_FOP (access); SET_DEFAULT_FOP (ftruncate); SET_DEFAULT_FOP (fstat); SET_DEFAULT_FOP (lk); SET_DEFAULT_FOP (inodelk); SET_DEFAULT_FOP (finodelk); SET_DEFAULT_FOP (entrylk); SET_DEFAULT_FOP (fentrylk); SET_DEFAULT_FOP (lookup); SET_DEFAULT_FOP (rchecksum); SET_DEFAULT_FOP (xattrop); SET_DEFAULT_FOP (fxattrop); SET_DEFAULT_FOP (setattr); SET_DEFAULT_FOP (fsetattr); SET_DEFAULT_FOP (fallocate); SET_DEFAULT_FOP (discard); SET_DEFAULT_FOP (zerofill); SET_DEFAULT_FOP (ipc); SET_DEFAULT_FOP (getspec); SET_DEFAULT_CBK (release); SET_DEFAULT_CBK (releasedir); SET_DEFAULT_CBK (forget); if (!xl->notify) xl->notify = default_notify; if (!xl->mem_acct_init) xl->mem_acct_init = default_mem_acct_init; return; }
void mem_put (void *ptr) { struct list_head *list = NULL; int *in_use = NULL; void *head = NULL; struct mem_pool **tmp = NULL; struct mem_pool *pool = NULL; if (!ptr) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return; } list = head = mem_pool_ptr2chunkhead (ptr); tmp = mem_pool_from_ptr (head); if (!tmp) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, 0, LG_MSG_PTR_HEADER_CORRUPTED, "ptr header is corrupted"); return; } pool = *tmp; if (!pool) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, 0, LG_MSG_MEMPOOL_PTR_NULL, "mem-pool ptr is NULL"); return; } LOCK (&pool->lock); { switch (__is_member (pool, ptr)) { case 1: in_use = (head + GF_MEM_POOL_LIST_BOUNDARY + GF_MEM_POOL_PTR); if (!is_mem_chunk_in_use(in_use)) { gf_msg_callingfn ("mem-pool", GF_LOG_CRITICAL, 0, LG_MSG_MEMPOOL_INVALID_FREE, "mem_put called on freed ptr" " %p of mem pool %p", ptr, pool); break; } pool->hot_count--; pool->cold_count++; *in_use = 0; list_add (list, &pool->list); break; case -1: /* For some reason, the address given is within * the address range of the mem-pool but does not align * with the expected start of a chunk that includes * the list headers also. Sounds like a problem in * layers of clouds up above us. ;) */ abort (); break; case 0: /* The address is outside the range of the mem-pool. We * assume here that this address was allocated at a * point when the mem-pool was out of chunks in mem_get * or the programmer has made a mistake by calling the * wrong de-allocation interface. We do * not have enough info to distinguish between the two * situations. */ pool->curr_stdalloc--; GF_FREE (list); break; default: /* log error */ break; } } UNLOCK (&pool->lock); }
static void * gf_timer_proc (void *data) { gf_timer_registry_t *reg = data; const struct timespec sleepts = {.tv_sec = 1, .tv_nsec = 0, }; gf_timer_t *event = NULL; xlator_t *old_THIS = NULL; while (!reg->fin) { uint64_t now; struct timespec now_ts; timespec_now (&now_ts); now = TS (now_ts); while (1) { uint64_t at; char need_cbk = 0; LOCK (®->lock); { event = reg->active.next; at = TS (event->at); if (event != ®->active && now >= at) { need_cbk = 1; event->next->prev = event->prev; event->prev->next = event->next; event->fired = _gf_true; } } UNLOCK (®->lock); if (need_cbk) { old_THIS = NULL; if (event->xl) { old_THIS = THIS; THIS = event->xl; } event->callbk (event->data); GF_FREE (event); if (old_THIS) { THIS = old_THIS; } } else { break; } } nanosleep (&sleepts, NULL); } LOCK (®->lock); { /* Do not call gf_timer_call_cancel(), * it will lead to deadlock */ while (reg->active.next != ®->active) { event = reg->active.next; /* cannot call list_del as the event doesnt have * list_head*/ __delete_entry (event); } } UNLOCK (®->lock); LOCK_DESTROY (®->lock); return NULL; } static gf_timer_registry_t * gf_timer_registry_init (glusterfs_ctx_t *ctx) { gf_timer_registry_t *reg = NULL; if (ctx == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } if (ctx->cleanup_started) { gf_msg_callingfn ("timer", GF_LOG_INFO, 0, LG_MSG_CTX_CLEANUP_STARTED, "ctx cleanup started"); return NULL; } LOCK (&ctx->lock); { reg = ctx->timer; } UNLOCK (&ctx->lock); if (!reg) { reg = GF_CALLOC (1, sizeof (*reg), gf_common_mt_gf_timer_registry_t); if (!reg) return NULL; LOCK_INIT (®->lock); reg->active.next = ®->active; reg->active.prev = ®->active; LOCK (&ctx->lock); { ctx->timer = reg; } UNLOCK (&ctx->lock); gf_thread_create (®->th, NULL, gf_timer_proc, reg); } return reg; }
struct mem_pool * mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name) { struct mem_pool *mem_pool = NULL; unsigned long padded_sizeof_type = 0; GF_UNUSED void *pool = NULL; GF_UNUSED int i = 0; int ret = 0; GF_UNUSED struct list_head *list = NULL; glusterfs_ctx_t *ctx = NULL; if (!sizeof_type || !count) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } padded_sizeof_type = sizeof_type + GF_MEM_POOL_PAD_BOUNDARY; mem_pool = GF_CALLOC (sizeof (*mem_pool), 1, gf_common_mt_mem_pool); if (!mem_pool) return NULL; ret = gf_asprintf (&mem_pool->name, "%s:%s", THIS->name, name); if (ret < 0) return NULL; if (!mem_pool->name) { GF_FREE (mem_pool); return NULL; } LOCK_INIT (&mem_pool->lock); INIT_LIST_HEAD (&mem_pool->list); INIT_LIST_HEAD (&mem_pool->global_list); mem_pool->padded_sizeof_type = padded_sizeof_type; mem_pool->real_sizeof_type = sizeof_type; #ifndef DEBUG mem_pool->cold_count = count; pool = GF_CALLOC (count, padded_sizeof_type, gf_common_mt_long); if (!pool) { GF_FREE (mem_pool->name); GF_FREE (mem_pool); return NULL; } for (i = 0; i < count; i++) { list = pool + (i * (padded_sizeof_type)); INIT_LIST_HEAD (list); list_add_tail (list, &mem_pool->list); } mem_pool->pool = pool; mem_pool->pool_end = pool + (count * (padded_sizeof_type)); #endif /* add this pool to the global list */ ctx = THIS->ctx; if (!ctx) goto out; list_add (&mem_pool->global_list, &ctx->mempool_list); out: return mem_pool; }
void * gf_timer_proc (void *ctx) { gf_timer_registry_t *reg = NULL; const struct timespec sleepts = {.tv_sec = 1, .tv_nsec = 0, }; gf_timer_t *event = NULL; xlator_t *old_THIS = NULL; if (ctx == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } reg = gf_timer_registry_init (ctx); if (!reg) { gf_msg ("timer", GF_LOG_ERROR, 0, LG_MSG_INIT_TIMER_FAILED, "!reg"); return NULL; } while (!reg->fin) { uint64_t now; struct timespec now_ts; timespec_now (&now_ts); now = TS (now_ts); while (1) { uint64_t at; char need_cbk = 0; pthread_mutex_lock (®->lock); { event = reg->active.next; at = TS (event->at); if (event != ®->active && now >= at) { need_cbk = 1; event->next->prev = event->prev; event->prev->next = event->next; event->fired = _gf_true; } } pthread_mutex_unlock (®->lock); if (need_cbk) { old_THIS = NULL; if (event->xl) { old_THIS = THIS; THIS = event->xl; } event->callbk (event->data); GF_FREE (event); if (old_THIS) { THIS = old_THIS; } } else { break; } } nanosleep (&sleepts, NULL); } pthread_mutex_lock (®->lock); { /* Do not call gf_timer_call_cancel(), * it will lead to deadlock */ while (reg->active.next != ®->active) { event = reg->active.next; /* cannot call list_del as the event doesnt have * list_head*/ __delete_entry (event); } } pthread_mutex_unlock (®->lock); pthread_mutex_destroy (®->lock); GF_FREE (((glusterfs_ctx_t *)ctx)->timer); return NULL; } gf_timer_registry_t * gf_timer_registry_init (glusterfs_ctx_t *ctx) { if (ctx == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } if (!ctx->timer) { gf_timer_registry_t *reg = NULL; reg = GF_CALLOC (1, sizeof (*reg), gf_common_mt_gf_timer_registry_t); if (!reg) goto out; pthread_mutex_init (®->lock, NULL); reg->active.next = ®->active; reg->active.prev = ®->active; ctx->timer = reg; gf_thread_create (®->th, NULL, gf_timer_proc, ctx); } out: return ctx->timer; } void gf_timer_registry_destroy (glusterfs_ctx_t *ctx) { pthread_t thr_id; gf_timer_registry_t *reg = NULL; if (ctx == NULL) return; reg = ctx->timer; thr_id = reg->th; reg->fin = 1; pthread_join (thr_id, NULL); }
gf_timer_t * gf_timer_call_after (glusterfs_ctx_t *ctx, struct timespec delta, gf_timer_cbk_t callbk, void *data) { gf_timer_registry_t *reg = NULL; gf_timer_t *event = NULL; gf_timer_t *trav = NULL; uint64_t at = 0; if (ctx == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } /* ctx and its fields are not accessed inside mutex!? * TODO: Even with this there is a possiblity of race * when cleanup_started is set after checking for it */ if (ctx->cleanup_started) { gf_msg_callingfn ("timer", GF_LOG_INFO, 0, LG_MSG_CTX_CLEANUP_STARTED, "ctx cleanup " "started"); return NULL; } reg = gf_timer_registry_init (ctx); if (!reg) { gf_msg_callingfn ("timer", GF_LOG_ERROR, 0, LG_MSG_TIMER_REGISTER_ERROR, "!reg"); return NULL; } event = GF_CALLOC (1, sizeof (*event), gf_common_mt_gf_timer_t); if (!event) { return NULL; } timespec_now (&event->at); timespec_adjust_delta (&event->at, delta); at = TS (event->at); event->callbk = callbk; event->data = data; event->xl = THIS; pthread_mutex_lock (®->lock); { trav = reg->active.prev; while (trav != ®->active) { if (TS (trav->at) < at) break; trav = trav->prev; } event->prev = trav; event->next = event->prev->next; event->prev->next = event; event->next->prev = event; } pthread_mutex_unlock (®->lock); return event; }
void * mem_get (struct mem_pool *mem_pool) { struct list_head *list = NULL; void *ptr = NULL; int *in_use = NULL; struct mem_pool **pool_ptr = NULL; if (!mem_pool) { gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } LOCK (&mem_pool->lock); { mem_pool->alloc_count++; if (mem_pool->cold_count) { list = mem_pool->list.next; list_del (list); mem_pool->hot_count++; mem_pool->cold_count--; if (mem_pool->max_alloc < mem_pool->hot_count) mem_pool->max_alloc = mem_pool->hot_count; ptr = list; in_use = (ptr + GF_MEM_POOL_LIST_BOUNDARY + GF_MEM_POOL_PTR); *in_use = 1; goto fwd_addr_out; } /* This is a problem area. If we've run out of * chunks in our slab above, we need to allocate * enough memory to service this request. * The problem is, these individual chunks will fail * the first address range check in __is_member. Now, since * we're not allocating a full second slab, we wont have * enough info perform the range check in __is_member. * * I am working around this by performing a regular allocation * , just the way the caller would've done when not using the * mem-pool. That also means, we're not padding the size with * the list_head structure because, this will not be added to * the list of chunks that belong to the mem-pool allocated * initially. * * This is the best we can do without adding functionality for * managing multiple slabs. That does not interest us at present * because it is too much work knowing that a better slab * allocator is coming RSN. */ mem_pool->pool_misses++; mem_pool->curr_stdalloc++; if (mem_pool->max_stdalloc < mem_pool->curr_stdalloc) mem_pool->max_stdalloc = mem_pool->curr_stdalloc; ptr = GF_CALLOC (1, mem_pool->padded_sizeof_type, gf_common_mt_mem_pool); /* Memory coming from the heap need not be transformed from a * chunkhead to a usable pointer since it is not coming from * the pool. */ } fwd_addr_out: pool_ptr = mem_pool_from_ptr (ptr); *pool_ptr = (struct mem_pool *)mem_pool; ptr = mem_pool_chunkhead2ptr (ptr); UNLOCK (&mem_pool->lock); return ptr; }