JSONDB::JSONDB(const std::string &basePath) : _basePath(basePath), _ready(false) { if ((_basePath.length() > 7)&&(_basePath.substr(0,7) == "http://")) { // TODO: this doesn't yet support IPv6 since bracketed address notiation isn't supported. // Typically it's used with 127.0.0.1 anyway. std::string hn = _basePath.substr(7); std::size_t hnend = hn.find_first_of('/'); if (hnend != std::string::npos) hn = hn.substr(0,hnend); std::size_t hnsep = hn.find_last_of(':'); if (hnsep != std::string::npos) hn[hnsep] = '/'; _httpAddr.fromString(hn); if (hnend != std::string::npos) _basePath = _basePath.substr(7 + hnend); if (_basePath.length() == 0) _basePath = "/"; if (_basePath[0] != '/') _basePath = std::string("/") + _basePath; } else { OSUtils::mkdir(_basePath.c_str()); OSUtils::lockDownFile(_basePath.c_str(),true); // networks might contain auth tokens, etc., so restrict directory permissions } _ready = _reload(_basePath,std::string()); }
ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ) { scoped_lock lk( _lock ); if ( shouldReload ) _reload(); CollectionInfo& ci = _collections[ns]; massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() || ci.wasDropped() ); return ci.getCM(); }
GError* meta0_backend_destroy_meta1_ref(struct meta0_backend_s *m0, gchar *meta1) { GError *err = NULL; struct sqlx_sqlite3_s *sq3 = NULL; struct sqlx_repctx_s *repctx = NULL; GPtrArray *result; gchar *v, *addr, *ref, *nb; guint i, max, cmpaddr, cmpstate; EXTRA_ASSERT(m0 != NULL); EXTRA_ASSERT(meta1 != NULL); /* check if meta1 is disable */ if (NULL != (err = _reload(m0, TRUE))) { g_prefix_error(&err, "Reload error: "); return err; } g_rw_lock_reader_lock(&(m0->rwlock)); EXTRA_ASSERT(m0->array_meta1_ref != NULL); result = meta0_utils_array_meta1ref_dup(m0->array_meta1_ref); g_rw_lock_reader_unlock(&(m0->rwlock)); for (i=0,max=result->len; i<max ;i++) { if (!(v = result->pdata[i])) continue; meta0_utils_unpack_meta1ref(v,&addr,&ref,&nb); cmpaddr = g_ascii_strcasecmp(addr,meta1); cmpstate = g_ascii_strcasecmp(ref,"0"); g_free(addr); g_free(ref); g_free(nb); if ( cmpaddr == 0) { if (cmpstate != 0) return NEWERROR(EINVAL, "meta1 always available to prefix allocation"); err = _open_and_lock(m0, M0V2_OPENBASE_MASTERONLY, &sq3); if (NULL != err) return err; err = sqlx_transaction_begin(sq3, &repctx); if (NULL == err) { err = _delete_meta1_ref(sq3->db, meta1); err = sqlx_transaction_end(repctx, err); } _unlock_and_close(sq3); return err; } } return NEWERROR(EINVAL, "UNKNOWN meta1"); }
inline void filter(const std::string &prefix,F func) { while (!_ready) { Thread::sleep(250); _ready = _reload(_basePath,std::string()); } for(std::map<std::string,_E>::iterator i(_db.lower_bound(prefix));i!=_db.end();) { if ((i->first.length() >= prefix.length())&&(!memcmp(i->first.data(),prefix.data(),prefix.length()))) { if (!func(i->first,get(i->first))) { std::map<std::string,_E>::iterator i2(i); ++i2; this->erase(i->first); i = i2; } else ++i; } else break; } }
ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ) { BSONObj key; bool unique; { scoped_lock lk( _lock ); CollectionInfo& ci = _collections[ns]; bool earlyReload = ! ci.isSharded() && shouldReload; if ( earlyReload ) { // this is to catch cases where there this is a new sharded collection _reload(); ci = _collections[ns]; } massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() || ci.wasDropped() ); assert( ci.wasDropped() || ! ci.key().isEmpty() ); if ( ! shouldReload || earlyReload ) return ci.getCM(); key = ci.key().copy(); unique = ci.unique(); } assert( ! key.isEmpty() ); // we are not locked now, and want to load a new ChunkManager auto_ptr<ChunkManager> temp( new ChunkManager( ns , key , unique ) ); if ( temp->numChunks() == 0 ) { // maybe we're not sharded any more reload(); // this is a full reload return getChunkManager( ns , false ); } scoped_lock lk( _lock ); CollectionInfo& ci = _collections[ns]; massert( 14822 , (string)"state changed in the middle: " + ns , ci.isSharded() || ci.wasDropped() ); ci.resetCM( temp.release() ); return ci.getCM(); }
GError* meta0_backend_get_all_meta1_ref(struct meta0_backend_s *m0, GPtrArray **result) { GError *err; EXTRA_ASSERT(m0 != NULL); EXTRA_ASSERT(result != NULL); if (NULL != (err = _reload(m0, TRUE))) { g_prefix_error(&err, "Reload error: "); return err; } g_rw_lock_reader_lock(&(m0->rwlock)); EXTRA_ASSERT(m0->array_meta1_ref != NULL); *result = meta0_utils_array_meta1ref_dup(m0->array_meta1_ref); g_rw_lock_reader_unlock(&(m0->rwlock)); return NULL; }
GError* meta0_backend_get_one(struct meta0_backend_s *m0, const guint8 *prefix, gchar ***u) { GError *err; EXTRA_ASSERT(m0 != NULL); EXTRA_ASSERT(u != NULL); GRID_TRACE("%s(%p,%02X%02X,%p)", __FUNCTION__, m0, prefix[0], prefix[1], u); if (NULL != (err = _reload(m0, TRUE))) { g_prefix_error(&err, "Reload error: "); return err; } g_rw_lock_reader_lock(&(m0->rwlock)); EXTRA_ASSERT(m0->array_by_prefix != NULL); *u = meta0_utils_array_get_urlv(m0->array_by_prefix, prefix); g_rw_lock_reader_unlock(&(m0->rwlock)); return *u ? NULL : NEWERROR(EINVAL, "META0 partially missing"); }
const nlohmann::json &JSONDB::get(const std::string &n) { while (!_ready) { Thread::sleep(250); _ready = _reload(_basePath,std::string()); } if (!_isValidObjectName(n)) return _EMPTY_JSON; std::map<std::string,_E>::iterator e(_db.find(n)); if (e != _db.end()) return e->second.obj; std::string buf; if (_httpAddr) { std::map<std::string,std::string> headers; const unsigned int sc = Http::GET(1048576,ZT_JSONDB_HTTP_TIMEOUT,reinterpret_cast<const struct sockaddr *>(&_httpAddr),(_basePath+"/"+n).c_str(),_ZT_JSONDB_GET_HEADERS,headers,buf); if (sc != 200) return _EMPTY_JSON; } else { const std::string path(_genPath(n,false)); if (!path.length()) return _EMPTY_JSON; if (!OSUtils::readFile(path.c_str(),buf)) return _EMPTY_JSON; } try { _E &e2 = _db[n]; e2.obj = OSUtils::jsonParse(buf); return e2.obj; } catch ( ... ) { _db.erase(n); return _EMPTY_JSON; } }
GError * meta0_backend_reload(struct meta0_backend_s *m0) { EXTRA_ASSERT(m0 != NULL); return _reload(m0, FALSE); }
bool DBConfig::reload() { scoped_lock lk( _lock ); return _reload(); }