void CmdInterpreter::visit(ProxyCmdRollback &cmd) { TxnHandle *pTxnHandle = getTxnHandle(cmd.getTxnHandle()); SharedDatabase pDb = pTxnHandle->pDb; // block checkpoints during this method bool txnBlocksCheckpoint = !pTxnHandle->readOnly && pDb->shouldForceTxns(); SXMutexSharedGuard actionMutexGuard( pDb->getCheckpointThread()->getActionMutex()); if (pDb->areSnapshotsEnabled()) { SnapshotRandomAllocationSegment *pSegment = SegmentFactory::dynamicCast<SnapshotRandomAllocationSegment *>( pTxnHandle->pSnapshotSegment); pSegment->rollbackChanges(); } if (cmd.getSvptHandle()) { SavepointId svptId = getSavepointId(cmd.getSvptHandle()); pTxnHandle->pTxn->rollback(&svptId); } else { pTxnHandle->pTxn->rollback(); deleteAndNullify(pTxnHandle); if (txnBlocksCheckpoint && !pDb->areSnapshotsEnabled()) { // Implement rollback by simulating crash recovery, // reverting all pages modified by transaction. No need // to do this when snapshots are in use because no permanent // pages were modified. pDb->recoverOnline(); } } }
void CmdInterpreter::beginTxn(ProxyBeginTxnCmd &cmd, bool readOnly, TxnId csn) { assert(readOnly || csn == NULL_TXN_ID); // block checkpoints during this method DbHandle *pDbHandle = getDbHandle(cmd.getDbHandle()); SharedDatabase pDb = pDbHandle->pDb; SXMutexSharedGuard actionMutexGuard( pDb->getCheckpointThread()->getActionMutex()); std::auto_ptr<TxnHandle> pTxnHandle(newTxnHandle()); JniUtil::incrementHandleCount(TXNHANDLE_TRACE_TYPE_STR, pTxnHandle.get()); pTxnHandle->pDb = pDb; pTxnHandle->readOnly = readOnly; // TODO: CacheAccessor factory pTxnHandle->pTxn = pDb->getTxnLog()->newLogicalTxn(pDb->getCache()); pTxnHandle->pResourceGovernor = pDbHandle->pResourceGovernor; // NOTE: use a null scratchAccessor; individual ExecStreamGraphs // will have their own SegmentAccessor scratchAccessor; pTxnHandle->pFtrsTableWriterFactory = SharedFtrsTableWriterFactory( new FtrsTableWriterFactory( pDb, pDb->getCache(), pDb->getTypeFactory(), scratchAccessor)); // If snapshots are enabled, set up 2 snapshot segments -- one of which // only reads committed data. This will be used for streams that need to // read a snapshot of the data before other portions of the stream graph // have modified the segment. if (pDb->areSnapshotsEnabled()) { if (csn == NULL_TXN_ID) { csn = pTxnHandle->pTxn->getTxnId(); } pTxnHandle->pSnapshotSegment = pDb->getSegmentFactory()->newSnapshotRandomAllocationSegment( pDb->getDataSegment(), pDb->getDataSegment(), csn, false); pTxnHandle->pReadCommittedSnapshotSegment = pDb->getSegmentFactory()->newSnapshotRandomAllocationSegment( pDb->getDataSegment(), pDb->getDataSegment(), csn, true); } else { assert(csn == NULL_TXN_ID); } setTxnHandle(cmd.getResultHandle(), pTxnHandle.release()); }
void CmdInterpreter::visit(ProxyCmdGetTxnCsn &cmd) { TxnHandle *pTxnHandle = getTxnHandle(cmd.getTxnHandle()); SharedDatabase pDb = pTxnHandle->pDb; assert(pDb->areSnapshotsEnabled()); SnapshotRandomAllocationSegment *pSegment = SegmentFactory::dynamicCast<SnapshotRandomAllocationSegment *>( pTxnHandle->pSnapshotSegment); setCsnHandle(cmd.getResultHandle(), pSegment->getSnapshotCsn()); }
void CmdInterpreter::visit(ProxyCmdVersionIndexRoot &cmd) { TxnHandle *pTxnHandle = getTxnHandle(cmd.getTxnHandle()); SharedDatabase pDb = pTxnHandle->pDb; assert(pDb->areSnapshotsEnabled()); SnapshotRandomAllocationSegment *pSnapshotSegment = SegmentFactory::dynamicCast<SnapshotRandomAllocationSegment *>( pTxnHandle->pSnapshotSegment); pSnapshotSegment->versionPage( PageId(cmd.getOldRootPageId()), PageId(cmd.getNewRootPageId())); }
void CmdInterpreter::visit(ProxyCmdAlterSystemDeallocate &cmd) { DbHandle *pDbHandle = getDbHandle(cmd.getDbHandle()); SharedDatabase pDb = pDbHandle->pDb; if (!pDb->areSnapshotsEnabled()) { // Nothing to do if snapshots aren't enabled return; } else { uint64_t paramVal = cmd.getOldestLabelCsn(); TxnId labelCsn = isMAXU(paramVal) ? NULL_TXN_ID : TxnId(paramVal); pDb->deallocateOldPages(labelCsn); } }
void CmdInterpreter::visit(ProxyCmdCommit &cmd) { TxnHandle *pTxnHandle = getTxnHandle(cmd.getTxnHandle()); SharedDatabase pDb = pTxnHandle->pDb; // block checkpoints during this method bool txnBlocksCheckpoint = !pTxnHandle->readOnly && pDb->shouldForceTxns(); SXMutexSharedGuard actionMutexGuard( pDb->getCheckpointThread()->getActionMutex()); if (pDb->areSnapshotsEnabled()) { // Commit the current txn, and start a new one so the versioned // pages that we're now going to commit will be marked with a txnId // corresponding to the time of the commit. At present, those pages // are marked with a txnId corresponding to the start of the txn. pTxnHandle->pTxn->commit(); pTxnHandle->pTxn = pDb->getTxnLog()->newLogicalTxn(pDb->getCache()); SnapshotRandomAllocationSegment *pSnapshotSegment = SegmentFactory::dynamicCast<SnapshotRandomAllocationSegment *>( pTxnHandle->pSnapshotSegment); TxnId commitTxnId = pTxnHandle->pTxn->getTxnId(); pSnapshotSegment->commitChanges(commitTxnId); // Flush pages associated with the snapshot segment. Note that we // don't need to flush the underlying versioned segment first since // the snapshot pages are all new and therefore, are never logged. // Pages in the underlying versioned segment will be flushed in the // requestCheckpoint call further below. Also note that the // checkpoint is not initiated through the dynamically cast segment // to ensure that the command is traced if tracing is turned on. if (txnBlocksCheckpoint) { pTxnHandle->pSnapshotSegment->checkpoint(CHECKPOINT_FLUSH_ALL); } } if (cmd.getSvptHandle()) { SavepointId svptId = getSavepointId(cmd.getSvptHandle()); pTxnHandle->pTxn->commitSavepoint(svptId); } else { pTxnHandle->pTxn->commit(); deleteAndNullify(pTxnHandle); if (txnBlocksCheckpoint) { // release the checkpoint lock acquired above actionMutexGuard.unlock(); // force a checkpoint now to flush all data modified by transaction // to disk; wait for it to complete before reporting the // transaction as committed pDb->requestCheckpoint(CHECKPOINT_FLUSH_ALL, false); } } }
extern "C" JNIEXPORT void JNICALL Java_net_sf_farrago_fennel_FennelStorage_tupleStreamGraphOpen( JNIEnv *pEnvInit, jclass, jlong hStreamGraph, jlong hTxn, jobject hJavaStreamMap, jobject hJavaErrorTarget) { JniEnvRef pEnv(pEnvInit); try { CmdInterpreter::StreamGraphHandle &streamGraphHandle = CmdInterpreter::getStreamGraphHandleFromLong(hStreamGraph); CmdInterpreter::TxnHandle &txnHandle = CmdInterpreter::getTxnHandleFromLong(hTxn); // Provide runtime context for stream open(), which a scheduler may // defer until after our java caller returns: hence the global ref. if (streamGraphHandle.javaRuntimeContext) { // TODO jvs 13-May-2010: Use a shared pointer for this // like we do with ErrorTarget, and track its JNI handle. pEnv->DeleteGlobalRef(streamGraphHandle.javaRuntimeContext); streamGraphHandle.javaRuntimeContext = NULL; } streamGraphHandle.javaRuntimeContext = pEnv->NewGlobalRef(hJavaStreamMap); streamGraphHandle.pExecStreamGraph->setTxn(txnHandle.pTxn); // When snapshots are enabled, switch the delegating segment so // the stream graph accesses the snapshot segment associated with // the current txn SharedDatabase pDb = txnHandle.pDb; if (pDb->areSnapshotsEnabled()) { DynamicDelegatingSegment *pSegment = SegmentFactory::dynamicCast<DynamicDelegatingSegment *>( streamGraphHandle.pSegment); pSegment->setDelegatingSegment( WeakSegment(txnHandle.pSnapshotSegment)); pSegment = SegmentFactory::dynamicCast<DynamicDelegatingSegment *>( streamGraphHandle.pReadCommittedSegment); pSegment->setDelegatingSegment( WeakSegment(txnHandle.pReadCommittedSnapshotSegment)); } streamGraphHandle.pExecStreamGraph->setErrorTarget( CmdInterpreter::newErrorTarget(hJavaErrorTarget)); txnHandle.pResourceGovernor->requestResources( *(streamGraphHandle.pExecStreamGraph)); streamGraphHandle.pExecStreamGraph->open(); if (streamGraphHandle.pScheduler.unique()) { streamGraphHandle.pScheduler->start(); } } catch (std::exception &ex) { pEnv.handleExcn(ex); } }
void CmdInterpreter::visit(ProxyCmdCreateExecutionStreamGraph &cmd) { #if 0 struct mallinfo minfo = mallinfo(); std::cout << "Number of allocated bytes before stream graph construction = " << minfo.uordblks << " bytes" << std::endl; #endif TxnHandle *pTxnHandle = getTxnHandle(cmd.getTxnHandle()); SharedDatabase pDb = pTxnHandle->pDb; SharedExecStreamGraph pGraph = ExecStreamGraph::newExecStreamGraph(); pGraph->setTxn(pTxnHandle->pTxn); pGraph->setResourceGovernor(pTxnHandle->pResourceGovernor); std::auto_ptr<StreamGraphHandle> pStreamGraphHandle( new StreamGraphHandle()); JniUtil::incrementHandleCount( STREAMGRAPHHANDLE_TRACE_TYPE_STR, pStreamGraphHandle.get()); pStreamGraphHandle->javaRuntimeContext = NULL; pStreamGraphHandle->pTxnHandle = pTxnHandle; pStreamGraphHandle->pExecStreamGraph = pGraph; pStreamGraphHandle->pExecStreamFactory.reset( new ExecStreamFactory( pDb, pTxnHandle->pFtrsTableWriterFactory, pStreamGraphHandle.get())); // When snapshots are enabled, allocate DynamicDelegatingSegments for the // stream graph so if the stream graph is executed in different txns, // we can reset the delegate to whatever is the snapshot segment associated // with the current txn. if (pDb->areSnapshotsEnabled()) { pStreamGraphHandle->pSegment = pDb->getSegmentFactory()->newDynamicDelegatingSegment( pTxnHandle->pSnapshotSegment); pStreamGraphHandle->pReadCommittedSegment = pDb->getSegmentFactory()->newDynamicDelegatingSegment( pTxnHandle->pReadCommittedSnapshotSegment); } setStreamGraphHandle( cmd.getResultHandle(), pStreamGraphHandle.release()); }