/*--------------------------------------------------------------* * ss_m::_create_mrbt_file() * *--------------------------------------------------------------*/ rc_t ss_m::_create_mrbt_file(vid_t vid, stid_t& fid, store_property_t property, shpid_t cluster_hint // = 0 ) { FUNC(ss_m::_create_mrbt_file); DBG( << "Attempting to create a file on volume " << vid.vol ); store_flag_t st_flag = _make_store_flag(property); extnum_t first_extent = extnum_t(cluster_hint? cluster_hint / ss_m::ext_sz : 0); DBGTHRD(<<"about to create a store starting about extent " << first_extent); W_DO( io->create_store(vid, 100/*unused*/, st_flag, fid, first_extent) ); DBGTHRD(<<"created first store " << fid << " now create 2nd..."); /* // create a store for holding large record pages // always allocates 1 extent -- otherwise // asserts fail elsewhere // If this fails, we have to by-hand back out the creation // of the first store */ stid_t lg_stid; w_rc_t rc= io->create_store(vid, 100/*unused*/, st_flag, lg_stid, first_extent, 1); if(rc.is_error()) { // it would be a problem if this didn't work, but // if all else fails, abort should work. DBGTHRD(<<"2nd create failed; destroying first= " << fid); W_DO( io->destroy_store(fid) ); return rc; }
/*--------------------------------------------------------------* * ss_m::_create_index() * *--------------------------------------------------------------*/ rc_t ss_m::_create_index( vid_t vid, ndx_t ntype, store_property_t property, const char* key_desc, concurrency_t cc, // = t_cc_kvl stid_t& stid ) { FUNC(ss_m::_create_index); DBG(<<" vid " << vid); uint4_t count = max_keycomp; key_type_s kcomp[max_keycomp]; lpid_t root; W_DO( key_type_s::parse_key_type(key_desc, count, kcomp) ); { DBG(<<"vid " << vid); W_DO( io->create_store(vid, 100/*unused*/, _make_store_flag(property), stid) ); DBG(<<" stid " << stid); } // Note: theoretically, some other thread could destroy // the above store before the following lock request // is granted. The only forseable way for this to // happen would be due to a bug in a vas causing // it to destroy the wrong store. We make no attempt // to prevent this. W_DO(lm->lock(stid, EX, t_long, WAIT_SPECIFIED_BY_XCT)); if( (cc != t_cc_none) && (cc != t_cc_file) && (cc != t_cc_kvl) && (cc != t_cc_modkvl) && (cc != t_cc_im) ) return RC(eBADCCLEVEL); switch (ntype) { case t_btree: case t_uni_btree: // compress prefixes only if the first part is compressed W_DO( bt->create(stid, root, kcomp[0].compressed != 0) ); break; default: return RC(eBADNDXTYPE); } sinfo_s sinfo(stid.store, t_index, 100/*unused*/, ntype, cc, root.page, count, kcomp); W_DO( dir->insert(stid, sinfo) ); return RCOK; }
/*--------------------------------------------------------------* * ss_m::_create_md_index() * *--------------------------------------------------------------*/ rc_t ss_m::_create_md_index( vid_t vid, ndx_t ntype, store_property_t property, stid_t& stid, int2_t dim ) { W_DO( io->create_store(vid, 100/*unused*/, _make_store_flag(property), stid) ); lpid_t root; // Note: theoretically, some other thread could destroy // the above store before the following lock request // is granted. The only forseable way for this to // happen would be due to a bug in a vas causing // it to destroy the wrong store. We make no attempt // to prevent this. W_DO(lm->lock(stid, EX, t_long, WAIT_SPECIFIED_BY_XCT)); switch (ntype) { case t_rtree: W_DO( rt->create(stid, root, dim) ); break; default: return RC(eBADNDXTYPE); } sinfo_s sinfo(stid.store, t_index, 100/*unused*/, ntype, t_cc_none, // cc not used for md indexes root.page, 0, 0); W_DO( dir->insert(stid, sinfo) ); return RCOK; }