static bool ckh_grow(ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; size_t lg_curcells; unsigned lg_prevbuckets; #ifdef CKH_COUNT ckh->ngrows++; #endif /* * It is possible (though unlikely, given well behaved hashes) that the * table will have to be doubled more than once in order to create a * usable table. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; while (true) { size_t usize; lg_curcells++; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (usize == 0) { ret = true; goto label_return; } tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); if (tab == NULL) { ret = true; goto label_return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (ckh_rebuild(ckh, tab) == false) { idalloc(tab); break; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloc(ckh->tab); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: return (ret); }
void quarantine(tsd_t *tsd, void *ptr) { quarantine_t *quarantine; size_t usize = isalloc(ptr, config_prof); cassert(config_fill); assert(opt_quarantine); if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { idalloc(tsd, ptr); return; } /* * Drain one or more objects if the quarantine size limit would be * exceeded by appending ptr. */ if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; quarantine_drain(tsd, quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) quarantine = quarantine_grow(tsd, quarantine); /* quarantine_grow() must free a slot if it fails to grow. */ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); /* Append ptr if its size doesn't exceed the quarantine size. */ if (quarantine->curbytes + usize <= opt_quarantine) { size_t offset = (quarantine->first + quarantine->curobjs) & ((ZU(1) << quarantine->lg_maxobjs) - 1); quarantine_obj_t *obj = &quarantine->objs[offset]; obj->ptr = ptr; obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; if (config_fill && unlikely(opt_junk_free)) { /* * Only do redzone validation if Valgrind isn't in * operation. */ if ((!config_valgrind || likely(!in_valgrind)) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else memset(ptr, 0x5a, usize); } } else { assert(quarantine->curbytes == 0); idalloc(tsd, ptr); } }
static quarantine_t * quarantine_grow(quarantine_t *quarantine) { quarantine_t *ret; ret = quarantine_init(quarantine->lg_maxobjs + 1); if (ret == NULL) { quarantine_drain_one(quarantine); return (quarantine); } ret->curbytes = quarantine->curbytes; ret->curobjs = quarantine->curobjs; if (quarantine->first + quarantine->curobjs <= (ZU(1) << quarantine->lg_maxobjs)) { /* objs ring buffer data are contiguous. */ memcpy(ret->objs, &quarantine->objs[quarantine->first], quarantine->curobjs * sizeof(quarantine_obj_t)); } else { /* objs ring buffer data wrap around. */ size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - quarantine->first; size_t ncopy_b = quarantine->curobjs - ncopy_a; memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a * sizeof(quarantine_obj_t)); memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * sizeof(quarantine_obj_t)); } idalloc(quarantine); return (ret); }
void quarantine_cleanup(void *arg) { quarantine_t *quarantine = *(quarantine_t **)arg; if (quarantine == QUARANTINE_STATE_REINCARNATED) { /* * Another destructor deallocated memory after this destructor * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY * in order to receive another callback. */ quarantine = QUARANTINE_STATE_PURGATORY; quarantine_tsd_set(&quarantine); } else if (quarantine == QUARANTINE_STATE_PURGATORY) { /* * The previous time this destructor was called, we set the key * to QUARANTINE_STATE_PURGATORY so that other destructors * wouldn't cause re-creation of the quarantine. This time, do * nothing, so that the destructor will not be called again. */ } else if (quarantine != NULL) { quarantine_drain(quarantine, 0); idalloc(quarantine); quarantine = QUARANTINE_STATE_PURGATORY; quarantine_tsd_set(&quarantine); } }
static void ckh_shrink(ckh_t *ckh) { ckhc_t *tab, *ttab; size_t lg_curcells, usize; unsigned lg_prevbuckets; /* * It is possible (though unlikely, given well behaved hashes) that the * table rebuild will fail. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (usize == 0) return; tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't * prevent this or future operations from proceeding. */ return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (ckh_rebuild(ckh, tab) == false) { idalloc(tab); #ifdef CKH_COUNT ckh->nshrinks++; #endif return; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloc(ckh->tab); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT ckh->nshrinkfails++; #endif }
static void quarantine_drain_one(quarantine_t *quarantine) { quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; assert(obj->usize == isalloc(obj->ptr, config_prof)); idalloc(obj->ptr); quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->lg_maxobjs) - 1); }
void quarantine_cleanup(tsd_t *tsd) { quarantine_t *quarantine; if (!config_fill) return; quarantine = tsd_quarantine_get(tsd); if (quarantine != NULL) { quarantine_drain(tsd, quarantine, 0); idalloc(tsd, quarantine); tsd_quarantine_set(tsd, NULL); } }
void quarantine_alloc_hook_work(tsd_t *tsd) { quarantine_t *quarantine; if (!tsd_nominal(tsd)) return; quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); /* * Check again whether quarantine has been initialized, because * quarantine_init() may have triggered recursive initialization. */ if (tsd_quarantine_get(tsd) == NULL) tsd_quarantine_set(tsd, quarantine); else idalloc(tsd, quarantine); }
void ckh_delete(ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE malloc_printf( "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," " nrelocs: %"PRIu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, (unsigned long long)ckh->nrelocs); #endif idalloc(ckh->tab); if (config_debug) memset(ckh, 0x5a, sizeof(ckh_t)); }
void ckh_delete(ckh_t *ckh) { assert(ckh != NULL); dassert(ckh->magic == CKH_MAGIC); #ifdef CKH_VERBOSE malloc_printf( "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," " nrelocs: %"PRIu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, (unsigned long long)ckh->nrelocs); #endif idalloc(ckh->tab); #ifdef JEMALLOC_DEBUG memset(ckh, 0x5a, sizeof(ckh_t)); #endif }
void quarantine(void *ptr) { quarantine_t *quarantine; size_t usize = isalloc(ptr, config_prof); cassert(config_fill); assert(opt_quarantine); quarantine = *quarantine_tsd_get(); if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { if (quarantine == QUARANTINE_STATE_PURGATORY) { /* * Make a note that quarantine() was called after * quarantine_cleanup() was called. */ quarantine = QUARANTINE_STATE_REINCARNATED; quarantine_tsd_set(&quarantine); } idalloc(ptr); return; } /* * Drain one or more objects if the quarantine size limit would be * exceeded by appending ptr. */ if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; quarantine_drain(quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) quarantine = quarantine_grow(quarantine); /* quarantine_grow() must free a slot if it fails to grow. */ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); /* Append ptr if its size doesn't exceed the quarantine size. */ if (quarantine->curbytes + usize <= opt_quarantine) { size_t offset = (quarantine->first + quarantine->curobjs) & ((ZU(1) << quarantine->lg_maxobjs) - 1); quarantine_obj_t *obj = &quarantine->objs[offset]; obj->ptr = ptr; obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; if (config_fill && opt_junk) { /* * Only do redzone validation if Valgrind isn't in * operation. */ if ((config_valgrind == false || in_valgrind == false) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else memset(ptr, 0x5a, usize); } } else { assert(quarantine->curbytes == 0); idalloc(ptr); } }
// LDPCのインスタンスを生成 LDPC *generate_LDPC(int weight_row, int weight_col, int n_code, char *exename) { // for loop int i, j, k; LDPC *ldpc = (LDPC *)malloc(sizeof(LDPC)); ldpc->weight_col = weight_col; ldpc->weight_row = weight_row; ldpc->n_code = n_code; // 符号長が行重みの整数倍でない場合は列数を調整 if(n_code%weight_row!=0){ ldpc->n_col = n_code + weight_row - (n_code%weight_row); }else{ ldpc->n_col = n_code; } char str[256]; sprintf(str, "%s_IMG/G_%d_%d_%d.png", exename, ldpc->weight_row, ldpc->weight_col, ldpc->n_code); // ファイルあったらそこから読み込む.なかったら作る if(!load_imagemat(ldpc)){ // 列重み1あたりの行数 int n_row_perblock = ldpc->n_col/ldpc->weight_row; // 行数 ldpc->n_row = n_row_perblock*ldpc->weight_col; PEG(ldpc); // 検査行列のメモリを取る ldpc->H = idalloc(ldpc->n_row, ldpc->n_col); for(i=0;i<ldpc->n_col;i++){ for(j=0;j<ldpc->VC_1p[i];j++){ ldpc->H[ldpc->VC_1[i][j]][i] = 1; } } ifree2(ldpc->VC_1, ldpc->n_col); ifree2(ldpc->CV_1, ldpc->n_row); free(ldpc->VC_1p); free(ldpc->CV_1p); // 既約台形正準形への変形 int **H_irr = idalloc(ldpc->n_row, ldpc->n_col); int *idx_col = isalloc(ldpc->n_col); init_ivec_order(idx_col, ldpc->n_col); ldpc->n_parity = irreduce_binmat(ldpc->H, H_irr, ldpc->n_row, ldpc->n_col, idx_col); // 生成行列のメモリを取る ldpc->n_inf = ldpc->n_col - ldpc->n_parity; ldpc->n_effinf = ldpc->n_inf - (ldpc->n_col-ldpc->n_code); ldpc->G = idalloc(ldpc->n_inf, ldpc->n_col); ldpc->coderate = ldpc->n_effinf/(double)ldpc->n_code; // 生成行列に代入 for(i=0;i<ldpc->n_inf;i++){ ldpc->G[i][i+ldpc->n_parity] = 1; } for(i=0;i<ldpc->n_inf;i++){ for(j=0;j<ldpc->n_parity;j++){ ldpc->G[i][j] = H_irr[j][i+ldpc->n_parity]; } } for(i=0;i<ldpc->n_row;i++){ for(j=0;j<ldpc->n_col;j++){ H_irr[i][j] = ldpc->H[i][j]; } } for(i=0;i<ldpc->n_row;i++){ for(j=0;j<ldpc->n_col;j++){ ldpc->H[i][j] = H_irr[i][idx_col[j]]; } } print_matimage(ldpc); idfree(H_irr, ldpc->n_row); free(idx_col); } // Gの各列に対して1の立ってる行だけ取り出す int *tmp_1 = isalloc(10000); ldpc->G_1 = isalloc_p(ldpc->n_col); ldpc->G_1p = isalloc(ldpc->n_col); for(i=0;i<ldpc->n_col;i++){ for(j=0;j<ldpc->n_inf;j++){ if(ldpc->G[j][i]){ tmp_1[ldpc->G_1p[i]] = j; ldpc->G_1p[i]++; } } ldpc->G_1[i] = isalloc(ldpc->G_1p[i]); for(j=0;j<ldpc->G_1p[i];j++){ ldpc->G_1[i][j] = tmp_1[j]; } } // チェックノードと変数ノードの関係 ldpc->CV_1 = isalloc_p(ldpc->n_row); ldpc->CV_1p = isalloc(ldpc->n_row); for(i=0;i<ldpc->n_row;i++){ for(j=0;j<ldpc->n_col;j++){ if(ldpc->H[i][j]){ tmp_1[ldpc->CV_1p[i]] = j; ldpc->CV_1p[i]++; } } ldpc->CV_1[i] = isalloc(ldpc->CV_1p[i]); for(j=0;j<ldpc->CV_1p[i];j++){ ldpc->CV_1[i][j] = tmp_1[j]; } } ldpc->VC_1 = isalloc_p(ldpc->n_col); ldpc->VC_1p = isalloc(ldpc->n_col); for(i=0;i<ldpc->n_col;i++){ for(j=0;j<ldpc->n_row;j++){ if(ldpc->H[j][i]){ tmp_1[ldpc->VC_1p[i]] = j; ldpc->VC_1p[i]++; } } ldpc->VC_1[i] = isalloc(ldpc->VC_1p[i]); for(j=0;j<ldpc->VC_1p[i];j++){ ldpc->VC_1[i][j] = tmp_1[j]; } } // 逆参照テーブル ldpc->CV_1_inv = isalloc_p(ldpc->n_row); for(i=0;i<ldpc->n_row;i++){ ldpc->CV_1_inv[i] = isalloc(ldpc->CV_1p[i]); for(j=0;j<ldpc->CV_1p[i];j++){ for(k=0;k<ldpc->VC_1p[ldpc->CV_1[i][j]];k++){ if(ldpc->VC_1[ldpc->CV_1[i][j]][k] == i){ ldpc->CV_1_inv[i][j] = k; break; } } } } ldpc->VC_1_inv = isalloc_p(ldpc->n_col); for(i=0;i<ldpc->n_col;i++){ ldpc->VC_1_inv[i] = isalloc(ldpc->VC_1p[i]); for(j=0;j<ldpc->VC_1p[i];j++){ for(k=0;k<ldpc->CV_1p[ldpc->VC_1[i][j]];k++){ if(ldpc->CV_1[ldpc->VC_1[i][j]][k] == i){ ldpc->VC_1_inv[i][j] = k; break; } } } } ldpc->codebits = isalloc(ldpc->n_col); ldpc->infbits_in = isalloc(ldpc->n_inf); ldpc->infbits_out = isalloc(ldpc->n_inf); ldpc->checkbits = isalloc(ldpc->n_row); ldpc->variablebits = isalloc(ldpc->n_col); ldpc->C_llr = dsalloc_p(ldpc->n_row); for(i=0;i<ldpc->n_row;i++){ ldpc->C_llr[i] = dsalloc(ldpc->CV_1p[i]); } ldpc->V_llr = dsalloc_p(ldpc->n_col); for(i=0;i<ldpc->n_col;i++){ ldpc->V_llr[i] = dsalloc(ldpc->VC_1p[i]); } // LLRのケツ数ビットはショートニング分なのでLLR=∞ ldpc->llr = dsalloc(ldpc->n_col); for(i=0;i<ldpc->n_inf-ldpc->n_effinf;i++){ ldpc->llr[ldpc->n_col-i-1] = 12.5; } return ldpc; }
void * huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ ret = huge_ralloc_no_move(ptr, oldsize, size, extra); if (ret != NULL) return (ret); /* * size and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ if (alignment != 0) ret = huge_palloc(size + extra, alignment, zero); else ret = huge_malloc(size + extra, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, this time without extra. */ if (alignment != 0) ret = huge_palloc(size, alignment, zero); else ret = huge_malloc(size, zero); if (ret == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; /* * Use mremap(2) if this is a huge-->huge reallocation, and neither the * source nor the destination are in swap or dss. */ #ifdef JEMALLOC_MREMAP_FIXED if (oldsize >= chunksize # ifdef JEMALLOC_SWAP && (swap_enabled == false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) == false)) # endif # ifdef JEMALLOC_DSS && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false # endif ) { size_t newsize = huge_salloc(ret); if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, ret) == MAP_FAILED) { /* * Assuming no chunk management bugs in the allocator, * the only documented way an error can occur here is * if the application changed the map type for a * portion of the old allocation. This is firmly in * undefined behavior territory, so write a diagnostic * message, and optionally abort. */ char buf[BUFERROR_BUF]; buferror(errno, buf, sizeof(buf)); malloc_write("<jemalloc>: Error in mremap(): "); malloc_write(buf); malloc_write("\n"); if (opt_abort) abort(); memcpy(ret, ptr, copysize); idalloc(ptr); } else huge_dalloc(ptr, false); } else #endif { memcpy(ret, ptr, copysize); idalloc(ptr); } return (ret); }
static bool ctl_grow(void) { ctl_arena_stats_t *astats; arena_t **tarenas; /* Allocate extended arena stats and arenas arrays. */ astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t)); if (astats == NULL) return (true); tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * sizeof(arena_t *)); if (tarenas == NULL) { idalloc(astats); return (true); } /* Initialize the new astats element. */ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { idalloc(tarenas); idalloc(astats); return (true); } /* Swap merged stats to their new location. */ { ctl_arena_stats_t tstats; memcpy(&tstats, &astats[ctl_stats.narenas], sizeof(ctl_arena_stats_t)); memcpy(&astats[ctl_stats.narenas], &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); memcpy(&astats[ctl_stats.narenas + 1], &tstats, sizeof(ctl_arena_stats_t)); } /* Initialize the new arenas element. */ tarenas[ctl_stats.narenas] = NULL; { arena_t **arenas_old = arenas; /* * Swap extended arenas array into place. Although ctl_mtx * protects this function from other threads extending the * array, it does not protect from other threads mutating it * (i.e. initializing arenas and setting array elements to * point to them). Therefore, array copying must happen under * the protection of arenas_lock. */ malloc_mutex_lock(&arenas_lock); arenas = tarenas; memcpy(arenas, arenas_old, ctl_stats.narenas * sizeof(arena_t *)); narenas_total++; arenas_extend(narenas_total - 1); malloc_mutex_unlock(&arenas_lock); /* * Deallocate arenas_old only if it came from imalloc() (not * base_alloc()). */ if (ctl_stats.narenas != narenas_auto) idalloc(arenas_old); } ctl_stats.arenas = astats; ctl_stats.narenas++; return (false); }