PWIZ_API_DECL void Reader_FASTA::read(const std::string& uri, shared_ptr<istream> uriStreamPtr, ProteomeData& result) const { result.id = uri; Serializer_FASTA::Config config; if (config_.indexed) // override default MemoryIndex with a BinaryIndexStream { {ofstream((uri + ".index").c_str(), ios::app);} // make sure the file exists shared_ptr<iostream> isPtr(new fstream((uri + ".index").c_str(), ios::in | ios::out | ios::binary)); // indexes smaller than 200mb are loaded entirely into memory boost::uintmax_t indexSize = bfs::file_size(uri + ".index"); if (indexSize > 0 && indexSize < 200000000) { stringstream* indexFileStream = new stringstream(); bio::copy(*isPtr, *indexFileStream); isPtr.reset(indexFileStream); } if (!*isPtr) // stream is unavailable or read only { isPtr.reset(new fstream((uri + ".index").c_str(), ios::in | ios::binary)); bool canOpenReadOnly = !!*isPtr; if (canOpenReadOnly) { // check that the index is up to date; // if it isn't, a read only index is worthless config.indexPtr.reset(new data::BinaryIndexStream(isPtr)); Serializer_FASTA serializer(config); serializer.read(uriStreamPtr, result); if (result.proteinListPtr->size() > 0) try { result.proteinListPtr->protein(0); return; } catch (exception&) { // TODO: log warning about stale read only index canOpenReadOnly = false; } } // TODO: try opening an index in other locations, e.g.: // * current working directory (may be read only) // * executing directory (may be read only) // * temp directory (pretty much guaranteed to be writable) if (!canOpenReadOnly) { // fall back to in-memory index config.indexPtr.reset(new data::MemoryIndex); } } else // stream is ready and writable config.indexPtr.reset(new data::BinaryIndexStream(isPtr)); } Serializer_FASTA serializer(config); serializer.read(uriStreamPtr, result); }
void partition_t::intern() { assert(fedTable); if (isPtr()) { entry_t** start = fedTable->getTable(); entry_t** end = start + fedTable->getSize(); for(; start < end; ++start) { if (*start) (*start)->fed.intern(); } } }
int _db_gc(pgctx_t *ctx, gcstats_t *stats) { int64_t t0, t1, t2, t3, t4, t5; memheap_t *heap = _ptr(ctx, ctx->root->heap); t0 = utime_now(); pmem_gc_mark(&ctx->mm, heap, 0); t1 = utime_now(); // Synchronize here. All this does is make sure anyone who was // in the database during the mark phase is out before we do the // walk phase. _dblockop(ctx, MLCK_WR, ctx->root->lock); _dblockop(ctx, MLCK_UN, ctx->root->lock); // Eliminate the structures used by the memory subsystem itself gc_keep(ctx, heap); gc_keep(ctx, _ptr(ctx, heap->pool)); // Eliminated references in the meta table if (isPtr(ctx->root->meta.id.type)) { gc_keep(ctx, dbptr(ctx, ctx->root->meta.id)); } t2 = utime_now(); gc_walk(ctx, ctx->cache); t3 = utime_now(); // Eliminate references that have parents that extend back to // the root "data" objects gc_walk(ctx, ctx->data); // Also any references owned by all currently running processes gc_walk(ctx, ctx->root->pidcache); t4 = utime_now(); // Free everything that remains //pmem_gc_free(&ctx->mm, heap, 0, (gcfreecb_t)dbcache_del, ctx); pmem_gc_free(&ctx->mm, heap, 0, NULL, ctx); t5 = utime_now(); log_debug("GC timing:"); log_debug(" mark: %lldus", t1-t0); log_debug(" sync: %lldus", t2-t1); log_debug(" cache: %lldus", t3-t2); log_debug(" walk: %lldus", t4-t3); log_debug(" free: %lldus", t5-t4); log_debug(" total: %lldus", t5-t0); return 0; }
RuntimeType Type::toRuntimeType() const { assert(!isPtr()); auto const outer = isBoxed() ? KindOfRef : toDataType(); auto const inner = isBoxed() ? innerType().toDataType() : KindOfNone; auto rtt = RuntimeType{outer, inner}; if (isSpecialized()) { if (subtypeOf(Type::Arr)) { return rtt.setArrayKind(getArrayKind()); } else if (subtypeOf(Type::Obj)) { return rtt.setKnownClass(getClass()); } } return rtt; }
void mark(int *p) { int size; int *ptr; int i = 0; //go for it ptr = isPtr(p); if (ptr != NULL && blockAllocated(ptr)){ if (!blockMarked(ptr)){ markBlock(ptr); } size = length(ptr)/4; while (++i < size){ mark(ptr+i); } } }
void partition_t::add(uintptr_t id, fed_t& fed) { assert(fedTable); if (!isPtr()) { fedTable = fedtable_t::create(edim(), eflag()); // Now it is mutable. } else { checkMutable(); } if (fedTable->add(id, fed)) { fedTable = fedTable->larger(); } }
DataType Type::toDataType() const { assert(!isPtr()); assert(isKnownDataType()); // Order is important here: types must progress from more specific // to less specific to return the most specific DataType. if (subtypeOf(Uninit)) return KindOfUninit; if (subtypeOf(InitNull)) return KindOfNull; if (subtypeOf(Bool)) return KindOfBoolean; if (subtypeOf(Int)) return KindOfInt64; if (subtypeOf(Dbl)) return KindOfDouble; if (subtypeOf(StaticStr)) return KindOfStaticString; if (subtypeOf(Str)) return KindOfString; if (subtypeOf(Arr)) return KindOfArray; if (subtypeOf(Obj)) return KindOfObject; if (subtypeOf(Res)) return KindOfResource; if (subtypeOf(BoxedCell)) return KindOfRef; if (subtypeOf(Cls)) return KindOfClass; always_assert_flog(false, "Bad Type {} in Type::toDataType()", *this); }
DataType Type::toDataType() const { assert(!isPtr()); if (isBoxed()) { return KindOfRef; } // Order is important here: types must progress from more specific // to less specific to return the most specific DataType. if (subtypeOf(Uninit)) return KindOfUninit; if (subtypeOf(Null)) return KindOfNull; if (subtypeOf(Bool)) return KindOfBoolean; if (subtypeOf(Int)) return KindOfInt64; if (subtypeOf(Dbl)) return KindOfDouble; if (subtypeOf(StaticStr)) return KindOfStaticString; if (subtypeOf(Str)) return KindOfString; if (subtypeOf(Arr)) return KindOfArray; if (subtypeOf(Obj)) return KindOfObject; if (subtypeOf(Res)) return KindOfResource; if (subtypeOf(Cls)) return KindOfClass; if (subtypeOf(UncountedInit)) return KindOfUncountedInit; if (subtypeOf(Uncounted)) return KindOfUncounted; if (subtypeOf(Gen)) return KindOfAny; not_reached(); }
T* getPtr() const noexcept { assert(isPtr()); return (T*)(raw() & VALUE_MASK); }
static void gc_walk(pgctx_t *ctx, dbtype_t root) { int i; _list_t *list; _obj_t *obj; if (root.all == 0 || !isPtr(root.type)) return; root.ptr = dbptr(ctx, root); gc_keep(ctx, root.ptr); switch(root.ptr->type) { case List: root.ptr = dbptr(ctx, root.ptr->list); gc_keep(ctx, root.ptr); // Fall-thru is intentional case _InternalList: list = (_list_t*)root.ptr; if (list) { for(i=0; i<list->len; i++) gc_walk(ctx, list->item[i]); } break; case Object: root.ptr = dbptr(ctx, root.ptr->obj); gc_keep(ctx, root.ptr); // Fall-thru is intentional case _InternalObj: obj = (_obj_t*)root.ptr; if (obj) { for(i=0; i<obj->len; i++) { gc_walk(ctx, obj->item[i].key); gc_walk(ctx, obj->item[i].value); } } break; case Collection: case MultiCollection: gc_walk(ctx, root.ptr->obj); break; case Cache: gc_walk_cache(ctx, root.ptr->cache); break; case _BonsaiNode: gc_walk(ctx, root.ptr->left); gc_walk(ctx, root.ptr->key); gc_walk(ctx, root.ptr->value); gc_walk(ctx, root.ptr->right); break; case _BonsaiMultiNode: gc_walk(ctx, root.ptr->left); gc_walk(ctx, root.ptr->key); for(i=0; i<root.ptr->nvalue; i++) { gc_walk(ctx, root.ptr->values[i]); } gc_walk(ctx, root.ptr->right); break; default: // Nothing to do break; } }
void Foam::Function1Types::CSV<Type>::read() { fileName expandedFile(fName_); autoPtr<ISstream> isPtr(fileHandler().NewIFstream(expandedFile.expand())); ISstream& is = isPtr(); if (!is.good()) { FatalIOErrorInFunction(is) << "Cannot open CSV file for reading." << exit(FatalIOError); } DynamicList<Tuple2<scalar, Type>> values; // Skip header for (label i = 0; i < nHeaderLine_; i++) { string line; is.getLine(line); } const label nEntries = max(refColumn_, max(componentColumns_)); // Read data while (is.good()) { string line; is.getLine(line); label n = 0; std::size_t pos = 0; DynamicList<string> split; if (mergeSeparators_) { std::size_t nPos = 0; while ((pos != std::string::npos) && (n <= nEntries)) { bool found = false; while (!found) { nPos = line.find(separator_, pos); if ((nPos != std::string::npos) && (nPos - pos == 0)) { pos = nPos + 1; } else { found = true; } } nPos = line.find(separator_, pos); if (nPos == std::string::npos) { split.append(line.substr(pos)); pos = nPos; n++; } else { split.append(line.substr(pos, nPos - pos)); pos = nPos + 1; n++; } } } else { while ((pos != std::string::npos) && (n <= nEntries)) { std::size_t nPos = line.find(separator_, pos); if (nPos == std::string::npos) { split.append(line.substr(pos)); pos = nPos; n++; } else { split.append(line.substr(pos, nPos - pos)); pos = nPos + 1; n++; } } } if (split.size() <= 1) { break; } scalar x = readScalar(IStringStream(split[refColumn_])()); Type value = readValue(split); values.append(Tuple2<scalar,Type>(x, value)); } this->table_.transfer(values); }
PyObject * to_python(pgctx_t *ctx, dbtype_t db, int flags) { dbtag_t type; dbval_t *dv = NULL; PyObject *ob = NULL; PyObject *k, *v; epstr_t ea; epfloat_t fa; char *ma = NULL; uint32_t len = 0; _list_t *list; _obj_t *obj; struct tm tm; time_t time; long usec; int i; int64_t ival; tphelper_t h; if (db.all == 0) Py_RETURN_NONE; type = db.type; if (type == ByteBuffer || type == String) { ea.all = db.all; len = ea.len; ea.val[len] = 0; ma = (char*)ea.val; } else if (isPtr(type)) { dv = dbptr(ctx, db); type = dv->type; if (type == ByteBuffer || type == String) { len = dv->len; ma = (char*)dv->sval; } } switch(type) { case Boolean: ob = db.val ? Py_True : Py_False; Py_INCREF(ob); break; case Int: ival = db.val; if (ival < LONG_MIN || ival > LONG_MAX) { ob = PyLong_FromLongLong(ival); } else { ob = PyInt_FromLong((long)ival); } break; case Float: fa.ival = (int64_t)db.val << 4; ob = PyFloat_FromDouble(fa.fval); break; #ifdef WANT_UUID_TYPE case Uuid: ob = PyObject_CallFunction(uuid_constructor, "Os#", Py_None, dv->uuval, 16); break; #endif case ByteBuffer: ob = PyString_FromStringAndSize(ma, len); break; case String: ob = PyUnicode_FromStringAndSize(ma, len); break; case Datetime: time = db.val / 1000000LL; usec = db.val % 1000000LL; #ifdef WIN32 memcpy(&tm, gmtime(&time), sizeof(tm)); #else gmtime_r(&time, &tm); #endif ob = PyDateTime_FromDateAndTime( tm.tm_year+1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, usec); break; case List: if (flags & TP_PROXY) { ob = PongoList_Proxy(ctx, db); pidcache_put(ctx, ob, db); } else { if (flags & TP_PROXYCHLD) flags = (flags & ~TP_PROXYCHLD) | TP_PROXY; list = dbptr(ctx, dv->list); ob = PyList_New(list->len); for(i=0; i<list->len; i++) { v = to_python(ctx, list->item[i], flags); PyList_SET_ITEM(ob, i, v); // Don't need to decref v since SET_ITEM steals the reference } } break; case Object: if (flags & TP_PROXY) { ob = PongoDict_Proxy(ctx, db); pidcache_put(ctx, ob, db); } else { if (flags & TP_PROXYCHLD) flags = (flags & ~TP_PROXYCHLD) | TP_PROXY; obj = dbptr(ctx, dv->obj); ob = PyDict_New(); for(i=0; i<obj->len; i++) { k = to_python(ctx, obj->item[i].key, flags); v = to_python(ctx, obj->item[i].value, flags); PyDict_SetItem(ob, k, v); Py_DECREF(k); Py_DECREF(v); } } break; case Cache: // The cache is a collection case Collection: case MultiCollection: if (flags & TP_PROXY) { ob = PongoCollection_Proxy(ctx, db); pidcache_put(ctx, ob, db); } else { if (flags & TP_PROXYCHLD) flags = (flags & ~TP_PROXYCHLD) | TP_PROXY; h.flags = flags | (TP_NODEKEY|TP_NODEVAL); h.type = Collection; h.ob = ob = PyDict_New(); bonsai_foreach(ctx, dv->obj, to_python_helper, &h); } break; case _BonsaiNode: case _BonsaiMultiNode: k = v = NULL; if (flags & TP_NODEKEY) { k = to_python(ctx, dv->key, flags & ~(TP_NODEKEY|TP_NODEVAL)); ob = k; } if (flags & TP_NODEVAL) { if (type == _BonsaiMultiNode) { v = PyTuple_New(dv->nvalue); for(i=0; i<dv->nvalue; i++) { ob = to_python(ctx, dv->values[i], flags & ~(TP_NODEKEY|TP_NODEVAL)); PyTuple_SET_ITEM(v, i, ob); // Don't need to decref ob since SET_ITEM steals the reference } } else { v = to_python(ctx, dv->value, flags & ~(TP_NODEKEY|TP_NODEVAL)); } ob = v; } if (k && v) { ob = PyTuple_Pack(2, k, v); Py_DECREF(k); Py_DECREF(v); } if (!k && !v) { ob = PongoPointer_Proxy(ctx, db); } break; case _InternalList: case _InternalObj: ob = PongoPointer_Proxy(ctx, db); break; default: PyErr_Format(PyExc_Exception, "Cannot handle dbtype %d", type); } return ob; }
Foam::autoPtr<Foam::Istream> Foam::fileOperations::masterFileOperation::readStream ( regIOobject& io, const fileName& fName ) const { if (!fName.size()) { FatalErrorInFunction << "empty file name" << exit(FatalError); } fileNameList filePaths(Pstream::nProcs()); filePaths[Pstream::myProcNo()] = fName; Pstream::gatherList(filePaths); PstreamBuffers pBufs(Pstream::nonBlocking); autoPtr<Istream> isPtr; if (Pstream::master()) { //const bool uniform = uniformFile(filePaths); autoPtr<IFstream> ifsPtr(new IFstream(fName)); IFstream& is = ifsPtr(); // Read header if (!io.readHeader(is)) { FatalIOErrorInFunction(is) << "problem while reading header for object " << io.name() << exit(FatalIOError); } // Open master (steal from ifsPtr) isPtr.reset(ifsPtr.ptr()); // Read slave files for (label proci = 1; proci < Pstream::nProcs(); proci++) { if (IFstream::debug) { Pout<< "For processor " << proci << " opening " << filePaths[proci] << endl; } std::ifstream is(filePaths[proci]); // Get length of file is.seekg(0, ios_base::end); std::streamoff count = is.tellg(); is.seekg(0, ios_base::beg); if (IFstream::debug) { Pout<< "From " << filePaths[proci] << " reading " << label(count) << " bytes" << endl; } List<char> buf(static_cast<label>(count)); is.read(buf.begin(), count); UOPstream os(proci, pBufs); os.write(buf.begin(), count); } } labelList recvSizes; pBufs.finishedSends(recvSizes); // isPtr will be valid on master. Else the information is in the // PstreamBuffers if (!isPtr.valid()) { UIPstream is(Pstream::masterNo(), pBufs); string buf(recvSizes[Pstream::masterNo()], '\0'); is.read(&buf[0], recvSizes[Pstream::masterNo()]); if (IFstream::debug) { Pout<< "Done reading " << buf.size() << " bytes" << endl; } isPtr.reset(new IStringStream(buf)); if (!io.readHeader(isPtr())) { FatalIOErrorInFunction(isPtr()) << "problem while reading header for object " << io.name() << exit(FatalIOError); } } return isPtr; }