/*! destroys pmem_context resources */ void pmem_context_destroy(struct bittern_cache *bc, struct pmem_context *ctx) { struct data_buffer_info *dbi; ASSERT_BITTERN_CACHE(bc); ASSERT(ctx != NULL); M_ASSERT(ctx->magic1 == PMEM_CONTEXT_MAGIC1); M_ASSERT(ctx->magic2 == PMEM_CONTEXT_MAGIC2); dbi = &ctx->dbi; /* * this code copied from pagebuf_free_dbi() * in bittern_cache_main.h. * we also add conditional free for the cases we didn't allocate * the buffer yet. */ ASSERT(dbi->di_buffer == NULL); ASSERT(dbi->di_page == NULL); ASSERT(dbi->di_flags == 0x0); ASSERT(atomic_read(&dbi->di_busy) == 0); if (dbi->di_buffer_vmalloc_buffer != NULL) { ASSERT(dbi->di_buffer_vmalloc_page != NULL); ASSERT(dbi->di_buffer_slab != NULL); ASSERT(dbi->di_buffer_slab == bc->bc_kmem_map || dbi->di_buffer_slab == bc->bc_kmem_threads); kmem_cache_free(dbi->di_buffer_slab, dbi->di_buffer_vmalloc_buffer); dbi->di_buffer_vmalloc_buffer = NULL; dbi->di_buffer_vmalloc_page = NULL; dbi->di_buffer_slab = NULL; } }
RC index_btree::index_read(idx_key_t key, itemid_t *& item, uint64_t thd_id, int64_t part_id) { RC rc = Abort; glob_param params; assert(part_id != -1); params.part_id = part_id; bt_node * leaf; find_leaf(params, key, INDEX_READ, leaf); if (leaf == NULL) M_ASSERT(false, "the leaf does not exist!"); for (UInt32 i = 0; i < leaf->num_keys; i++) if (leaf->keys[i] == key) { item = (itemid_t *)leaf->pointers[i]; release_latch(leaf); (*cur_leaf_per_thd[thd_id]) = leaf; *cur_idx_per_thd[thd_id] = i; return RCOK; } // release the latch after reading the node printf("key = %ld\n", key); M_ASSERT(false, "the key does not exist!"); return rc; }
fixed_array(const fixed_array& o) { M_ASSERT(_CAPA >= o.m_size); m_size = o.m_size; ::memcpy(m_data, o.m_data, m_size*sizeof(value_type)); }
m_result_t m_stats_threadstat_create(m_statsmgr_t *statsmgr, unsigned int tid, m_stats_threadstat_t **threadstatp) { m_stats_threadstat_t *threadstat; int i; threadstat = (m_stats_threadstat_t *) MALLOC(sizeof(m_stats_threadstat_t)); M_MUTEX_LOCK(&(statsmgr->mutex)); if (statsmgr->alloc_threadstat_list_head == NULL) { M_ASSERT(statsmgr->alloc_threadstat_list_tail == NULL); statsmgr->alloc_threadstat_list_head = threadstat; statsmgr->alloc_threadstat_list_tail = threadstat; threadstat->next = NULL; threadstat->prev = NULL; } else { statsmgr->alloc_threadstat_list_tail->next = threadstat; threadstat->prev = statsmgr->alloc_threadstat_list_tail; statsmgr->alloc_threadstat_list_tail = threadstat; } statsmgr->alloc_threadstat_num++; M_MUTEX_UNLOCK(&(statsmgr->mutex)); m_stats_statset_init(&(threadstat->summary_statset), NULL); threadstat->tid = tid; m_chhash_create(&threadstat->stats_table, M_STATS_THREADSTAT_HASHTABLE_SIZE, false); *threadstatp = threadstat; return M_R_SUCCESS; }
void Timer::StaticInit(Osal * osal) { // Make sure OSAL is not null M_ASSERT(osal); Timer::osal = osal; }
string CubeDocBase::serializeMinBin() { string ret; BinWriter wr(ret); wr.addBits(0x01, 8); // version wr.addBits(m_shp->fcn, 8); for (int i = 0; i < m_shp->fcn; ++i) { const auto& face = m_shp->faces[i]; wr.addBits(face.ex.x / 4, 6); wr.addBits(face.ex.y / 4, 6); wr.addBits(face.ex.z / 4, 6); wr.addBits(face.dr, 2); } auto slv = getCurrentSolve(); if (slv != nullptr) { M_ASSERT(slv->dt.size() == m_shp->fcn); for (int i = 0; i < m_shp->fcn; ++i) { wr.addBits(slv->dt[i].abs_sc, 8); wr.addBits(slv->dt[i].abs_rt, 3); } } wr.flush(); return wr.m_buf; }
Node * methodFileWrite(Location loc, Evaluator * ex, Function * fn, Node * self, NodeArray args) { M_ASSERT(args.size() == 2); String * filename = String::cast(args[0]); String * content = String::cast(args[1]); path::writeFileContents(*filename, *content); return &Node::UNDEFINED_NODE; }
Node * methodFileCopy(Location loc, Evaluator * ex, Function * fn, Node * self, NodeArray args) { M_ASSERT(args.size() == 2); String * source = String::cast(args[0]); String * output = String::cast(args[1]); path::copyFile(*source, *output); return &Node::UNDEFINED_NODE; }
RC index_btree::upgrade_latch(bt_node * node) { if (!ENABLE_LATCH) return RCOK; bool success = false; // if ( g_cc_alg != HSTORE ) while ( !ATOM_CAS(node->latch, false, true) ) {} // pthread_mutex_lock(&node->locked); // while (!ATOM_CAS(node->locked, false, true)) {} M_ASSERT( (node->latch_type == LATCH_SH), "Error" ); if (node->share_cnt > 1) success = false; else { // share_cnt == 1 success = true; node->latch_type = LATCH_EX; node->share_cnt = 0; } // if ( g_cc_alg != HSTORE ) bool ok = ATOM_CAS(node->latch, true, false); assert(ok); // pthread_mutex_unlock(&node->locked); // assert( ATOM_CAS(node->locked, true, false) ); if (success) return RCOK; else return Abort; }
bool index_btree::latch_node(bt_node * node, latch_t latch_type) { // TODO latch is disabled if (!ENABLE_LATCH) return true; bool success = false; // printf("%s : %d\n", __FILE__, __LINE__); // if ( g_cc_alg != HSTORE ) while ( !ATOM_CAS(node->latch, false, true) ) {} // pthread_mutex_lock(&node->locked); // printf("%s : %d\n", __FILE__, __LINE__); latch_t node_latch = node->latch_type; if (node_latch == LATCH_NONE || (node_latch == LATCH_SH && latch_type == LATCH_SH)) { node->latch_type = latch_type; if (node_latch == LATCH_NONE) M_ASSERT( (node->share_cnt == 0), "share cnt none 0!" ); if (node->latch_type == LATCH_SH) node->share_cnt ++; success = true; } else // latch_type incompatible success = false; // if ( g_cc_alg != HSTORE ) bool ok = ATOM_CAS(node->latch, true, false); assert(ok); // pthread_mutex_unlock(&node->locked); // assert(ATOM_CAS(node->locked, true, false)); return success; }
static void pmem_header_update_worker(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct bittern_cache *bc; int ret; bc = container_of(dwork, struct bittern_cache, bc_pmem_update_work); ASSERT(bc != NULL); ASSERT_BITTERN_CACHE(bc); M_ASSERT(bc->bc_pmem_update_workqueue != NULL); if (bc->error_state == ES_NOERROR) { BT_TRACE(BT_LEVEL_TRACE2, bc, NULL, NULL, NULL, NULL, "bc=%p", bc); ret = pmem_header_update(bc, 0); /* should make this a common function */ if (ret != 0) { printk_err("%s: cannot update header: %d. will fail all future requests\n", bc->bc_name, ret); bc->error_state = ES_ERROR_FAIL_ALL; } } schedule_delayed_work(&bc->bc_pmem_update_work, msecs_to_jiffies(30000)); }
void pmem_header_update_start_workqueue(struct bittern_cache *bc) { printk_debug("%s: pmem_header_start_workqueue\n", bc->bc_name); M_ASSERT(bc->bc_pmem_update_workqueue != NULL); INIT_DELAYED_WORK(&bc->bc_pmem_update_work, pmem_header_update_worker); schedule_delayed_work(&bc->bc_pmem_update_work, msecs_to_jiffies(30000)); }
/*! * Sets up resources for pmem context. * Later this will be split into implementation specific code, * one for pmem_block, one for pmem_mem. * The pmem_block implementation will allocate a double buffer, * the pmem_mem implementation will call DAX to retrieve the virtual * addresses for data and metadata for "cache_block" and "cloned_cache_block". */ int pmem_context_setup(struct bittern_cache *bc, struct kmem_cache *kmem_slab, struct cache_block *cache_block, struct cache_block *cloned_cache_block, struct pmem_context *ctx) { struct data_buffer_info *dbi; ASSERT_BITTERN_CACHE(bc); ASSERT(kmem_slab == bc->bc_kmem_map || kmem_slab == bc->bc_kmem_threads); ASSERT(ctx != NULL); M_ASSERT(ctx->magic1 == PMEM_CONTEXT_MAGIC1); M_ASSERT(ctx->magic2 == PMEM_CONTEXT_MAGIC2); dbi = &ctx->dbi; /* * this code copied from pagebuf_allocate_dbi() * in bittern_cache_main.h */ ASSERT(dbi->di_buffer_vmalloc_buffer == NULL); ASSERT(dbi->di_buffer_vmalloc_page == NULL); ASSERT(dbi->di_buffer_slab == NULL); ASSERT(dbi->di_buffer == NULL); ASSERT(dbi->di_page == NULL); ASSERT(dbi->di_flags == 0x0); ASSERT(atomic_read(&dbi->di_busy) == 0); dbi->di_buffer_vmalloc_buffer = kmem_cache_alloc(kmem_slab, GFP_NOIO); /*TODO_ADD_ERROR_INJECTION*/ if (dbi->di_buffer_vmalloc_buffer == NULL) { BT_DEV_TRACE(BT_LEVEL_ERROR, bc, NULL, cache_block, NULL, NULL, "kmem_cache_alloc kmem_slab failed"); printk_err("%s: kmem_cache_alloc kmem_slab failed\n", bc->bc_name); return -ENOMEM; } ASSERT(PAGE_ALIGNED(dbi->di_buffer_vmalloc_buffer)); dbi->di_buffer_vmalloc_page = virtual_to_page(dbi->di_buffer_vmalloc_buffer); ASSERT(dbi->di_buffer_vmalloc_page != NULL); dbi->di_buffer_slab = kmem_slab; return 0; }
obj_t* object_init() { obj_t* rv = (obj_t*)malloc(sizeof(obj_t)); M_ASSERT(rv); rv->names = NULL; return rv; }
void* var_data_alloc_G_INT(int value) { var_data_int* rv = (var_data_int*)malloc(sizeof(var_data_int)); M_ASSERT(rv); rv->v = value; return rv; }
void* var_data_alloc_G_CHAR(char value) { var_data_char* rv = (var_data_char*)malloc(sizeof(var_data_char)); M_ASSERT(rv); rv->v = value; return rv; }
void* var_data_alloc_G_FLOAT(double value) { var_data_float* rv = (var_data_float*)malloc(sizeof(var_data_float)); M_ASSERT(rv); rv->v = value; return rv; }
void* var_data_alloc_TYPE(b_type type) { var_data_type* rv = (var_data_type*)malloc(sizeof(var_data_type)); M_ASSERT(rv); rv->v = type; return rv; }
//! @brief Constructor for an exponential filter. //! @param [in] smoothingConstant The smoothing constant for the filter. This is commonly //! labelled as 'a'. This has to be between 0 and 1 (inclusive). //! @param [in] initialValue The initial value you want the output set to. This is also //! provided when you call the Reset() method. ExponentialFilter( float smoothingConstant, float initialValue) : smoothingConstant(smoothingConstant), output(initialValue) { // Make sure the smoothing constant is between 0 and 1 (inclusive) M_ASSERT((smoothingConstant >= 0.0) && (smoothingConstant <= 1.0)); }
void* var_data_alloc_PLIST(size_t size) { var_data_plist* rv = (var_data_plist*)malloc(sizeof(var_data_plist)); M_ASSERT(rv); rv->v = (b_type*)malloc(sizeof(b_type)*size); return rv; }
void pmem_header_update_stop_workqueue(struct bittern_cache *bc) { printk_debug("%s: pmem_header_stop_workqueue\n", bc->bc_name); M_ASSERT(bc->bc_pmem_update_workqueue != NULL); printk_debug("%s: cancel_delayed_work\n", bc->bc_name); cancel_delayed_work(&bc->bc_pmem_update_work); printk_debug("%s: flush_workqueue\n", bc->bc_name); flush_workqueue(bc->bc_pmem_update_workqueue); }
void SlvPainter::paint(BaseGLWidget* context, bool fTargets, int singleChoise, int upToStep, ELinesDraw cfgLines) const { M_ASSERT(m_scube != nullptr); M_ASSERT(m_scube->shape != nullptr); context->model.translate(0,0,+1); //cout << "*****" << endl; if (singleChoise < 0) { for (int f = 0; f < m_scube->dt.size(); ++f) { if (upToStep >= 0 && f >= upToStep) // step by step support break; if (m_scube->dt[f].abs_sc == -1) // piece not there (solution transformed) continue; mglCheckErrorsC("x3"); paintPiece(f, context, fTargets); mglCheckErrorsC("x4"); if ((!fTargets) && (cfgLines != LINES_NONE)) { bool linesSingle = false; if (upToStep >= 0) // step by step support { // this is somewhat of a PATCH that doesn't work completely well to make the edges have somewhat proper lines // since we don't want to regenerate to IFS for every stage (and even that doesn't work so well, see flat10x10) const int *knei = m_scube->shape->faces[f].nei; linesSingle = (knei[0] > upToStep) || (knei[1] > upToStep) || (knei[2] > upToStep) || (knei[3] > upToStep); } paintLines(f, linesSingle, context, cfgLines); mglCheckErrorsC("x5"); } } } else { paintPiece(singleChoise, context, fTargets); if ((!fTargets) && (cfgLines == LINES_ALL)) // in single choise, do lines only if ALL (and not if BLACK) paintLines(singleChoise, true, context, cfgLines); } }
Node * methodFileRead(Location loc, Evaluator * ex, Function * fn, Node * self, NodeArray args) { M_ASSERT(args.size() == 1); String * filename = String::cast(args[0]); SmallString<0> buffer; if (path::readFileContents(filename->value(), buffer)) { return String::create(buffer); } return &Node::UNDEFINED_NODE; }
void* var_data_alloc_G_STR(size_t size) { var_data_str* rv = (var_data_str*)malloc(sizeof(var_data_str)); M_ASSERT(rv); rv->size = size; rv->v = (char*)malloc(sizeof(char)*size); return rv; }
void Row_silo::write(row_t * data, uint64_t tid) { _row->copy(data); #if ATOMIC_WORD uint64_t v = _tid_word; M_ASSERT(tid > (v & (~LOCK_BIT)) && (v & LOCK_BIT), "tid=%ld, v & LOCK_BIT=%ld, v & (~LOCK_BIT)=%ld\n", tid, (v & LOCK_BIT), (v & (~LOCK_BIT))); _tid_word = (tid | LOCK_BIT); #else _tid = tid; #endif }
Node * methodPathTempName( Location loc, Evaluator * ex, Function * fn, Node * self, NodeArray args) { M_ASSERT(args.size() == 0); char buf[L_tmpnam+1]; char * ptr = ::tmpnam(buf); if (ptr == NULL) { diag::error(loc) << "Unable to generate temporary file name."; return &Node::UNDEFINED_NODE; } return String::create(loc, StringRef(buf, ::strlen(buf))); }
Node * methodPathChangeExt(Location loc, Evaluator * ex, Function * fn, Node * self, NodeArray args) { M_ASSERT(args.size() == 2); String * in = String::cast(args[0]); String * ext = String::cast(args[1]); SmallString<64> result(in->value()); path::changeExtension(result, ext->value()); if (in->value() == result) { return in; } return String::create(result); }
void * GC::alloc(size_t size) { M_ASSERT(_initialized) << "Garbage collector has not been initialized!"; GC * gc = reinterpret_cast<GC *>(malloc(size)); #if GC_DEBUG memset((void *)gc, 0xDB, size); #endif gc->_next = _allocList; gc->_cycle = _cycleIndex; _allocList = gc; return gc; }
void BucketHeader::read_item(idx_key_t key, itemid_t * &item, const char * tname) { BucketNode * cur_node = first_node; while (cur_node != NULL) { if (cur_node->key == key) break; cur_node = cur_node->next; } M_ASSERT(cur_node->key == key, "Key does not exist!"); item = cur_node->items; }
Node * methodPathAddExt(Location loc, Evaluator * ex, Function * fn, Node * self, NodeArray args) { M_ASSERT(args.size() == 2); String * in = String::cast(args[0]); String * ext = String::cast(args[1]); if (ext->value().empty()) { return in; } SmallString<64> result(in->value()); result.push_back('.'); result.append(ext->value()); return String::create(result); }