OSS_INLINE BOOLEAN operator<(const _monStorageUnit &r) const { SINT32 rc = ossStrncmp( _name, r._name, sizeof(_name))<0 ; if ( !rc ) return _sequence < r._sequence ; return rc ; }
INT32 _qgmOptiNLJoin::handleHints( QGM_HINS &hints ) { PD_TRACE_ENTRY( SDB__QGMOPTINLJOIN_HANDLEHINTS ) ; INT32 rc = SDB_OK ; if ( SQL_GRAMMAR::INNERJOIN != _joinType) { goto done ; } { QGM_HINS::iterator itr = hints.begin() ; for ( ; itr != hints.end(); itr++ ) { if ( 0 == ossStrncmp( itr->value.begin(), QGM_HINT_HASHJOIN, itr->value.size() )) { if ( NULL != _condition && SQL_GRAMMAR::EG == _condition->type ) { SDB_ASSERT( NULL != _condition->left && NULL != _condition->right, "impossible") ; if ( SQL_GRAMMAR::DBATTR == _condition->left->type && SQL_GRAMMAR::DBATTR == _condition->right->type ) { qgmHint hint = *itr ; _hints.push_back( hint ) ; } } } } } done: PD_TRACE_EXITRC( SDB__QGMOPTINLJOIN_HANDLEHINTS, rc ) ; return rc ; }
int main ( int argc, char **argv ) { INT32 rc = SDB_OK ; OSSNPIPE handle ; if ( argc != 2 ) { printf ( "Syntax: %s <pipe name>"OSS_NEWLINE, argv[0] ) ; return 0 ; } CHAR *pPipeName = argv[1] ; ossPrintf ( "Open named pipe: %s"OSS_NEWLINE, pPipeName ) ; rc = ossOpenNamedPipe ( pPipeName, OSS_NPIPE_DUPLEX | OSS_NPIPE_BLOCK, OSS_NPIPE_INFINITE_TIMEOUT, handle ) ; if ( rc && SDB_FE != rc ) { PD_LOG ( PDERROR, "Failed to create named pipe: %s, rc %d"OSS_NEWLINE, pPipeName, rc ) ; goto open_error ; } ossPrintf ( "Write to named pipe: %s"OSS_NEWLINE, pPipeName ) ; while ( TRUE ) { rc = readInput ( "Input", 1 ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to read from command line" ) ; goto error ; } rc = ossWriteNamedPipe ( handle, receiveBuffer, ossStrlen ( receiveBuffer ) + 1, NULL ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to read packet size"OSS_NEWLINE ) ; goto error ; } if ( ossStrncmp ( receiveBuffer, EXIT_CODE, sizeof(EXIT_CODE) ) == 0 ) break ; } error : rc = ossCloseNamedPipe ( handle ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to close named pipe"OSS_NEWLINE ) ; goto open_error ; } open_error : return rc ; }
BSONObj qgmUseIndexHintToBson( const qgmHint &h ) { BSONObjBuilder builder ; qgmField f ; if ( 1 == h.param.size() ) { f = h.param.at( 0 ).value.attr() ; } else if ( 2 == h.param.size() ) { f = h.param.at( 1 ).value.attr() ; } else { goto done ; } if ( 0 == ossStrncmp( f.begin(), "null", f.size() ) || 0 == ossStrncmp( f.begin(), "NULL", f.size() ) ) { builder.appendNull("") ; } else { builder.appendStrWithNoTerminating( "", f.begin(), f.size() ) ; } done: return builder.obj() ; }
INT32 _omaTaskMgr::removeTask ( const CHAR *pTaskName ) { INT32 rc = SDB_OK ; std::map<INT64, _omaTask*>::iterator it = _taskMap.begin() ; PD_LOG( PDDEBUG, "There are [%d] task kept in task manager, " "the removing task is[%s]", _taskMap.size(), pTaskName ) ; for ( ; it != _taskMap.end(); it++ ) { _omaTask *pTask = it->second ; const CHAR *name = pTask->getTaskName() ; PD_LOG ( PDDEBUG, "The task is [%s]", name ) ; if ( 0 == ossStrncmp( name, pTaskName, ossStrlen(pTaskName) ) ) { rc = removeTask( pTask ) ; break ; } } return rc ; }
INT32 _qgmOptiNLJoin::_handleHints( _qgmOptiTreeNode *sub, const QGM_HINS &hint ) { INT32 rc = SDB_OK ; QGM_HINS copy ; const qgmField &alias = sub->getAlias() ; QGM_HINS::const_iterator itr = hint.begin() ; for ( ; itr != hint.end(); itr++ ) { if ( 0 == ossStrncmp( itr->value.begin(), QGM_HINT_USEINDEX, itr->value.size() ) && 2 == itr->param.size() ) { const qgmField &tName = itr->param.begin()->value.attr() ; if ( alias == tName ) { copy.push_back( *itr ) ; break ; } } } if ( copy.empty() ) { goto done ; } rc = sub->handleHints( copy ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to handle hint in sub node:%d", rc ) ; goto error ; } done: return rc ; error: goto done ; }
BOOLEAN qgmUtilSame( const CHAR *src, UINT32 srcLen, const CHAR *dst, UINT32 dstLen ) { return srcLen == dstLen ? SDB_OK == ossStrncmp( src, dst, srcLen ) : FALSE ; }
INT32 _qgmOptiNLJoin::handleHints( QGM_HINS &hints ) { PD_TRACE_ENTRY( SDB__QGMOPTINLJOIN_HANDLEHINTS ) ; INT32 rc = SDB_OK ; if ( SQL_GRAMMAR::INNERJOIN != _joinType) { goto done ; } { QGM_HINS::iterator itr = hints.begin() ; for ( ; itr != hints.end(); itr++ ) { if ( 0 == ossStrncmp( itr->value.begin(), QGM_HINT_HASHJOIN, itr->value.size() )) { if ( NULL != _condition && SQL_GRAMMAR::EG == _condition->type ) { SDB_ASSERT( NULL != _condition->left && NULL != _condition->right, "impossible") ; if ( SQL_GRAMMAR::DBATTR == _condition->left->type && SQL_GRAMMAR::DBATTR == _condition->right->type ) { qgmHint hint = *itr ; _hints.push_back( hint ) ; } } } } } if ( 2 != _children.size() ) { goto done ; } if ( QGM_OPTI_TYPE_SCAN == _children[0]->getType() ) { rc = _handleHints( _children[0], hints ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "sub node failed to handle hint:%d", rc ) ; goto error ; } } if ( QGM_OPTI_TYPE_SCAN == _children[1]->getType() ) { rc = _handleHints( _children[1], hints ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "sub node failed to handle hint:%d", rc ) ; goto error ; } } done: PD_TRACE_EXITRC( SDB__QGMOPTINLJOIN_HANDLEHINTS, rc ) ; return rc ; error: goto done ; }
INT32 restAdaptor::appendHttpHeader( pmdRestSession *pSession, const CHAR *pKey, const CHAR *pValue ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY( SDB__RESTADP_APPENDHEADER ) ; SDB_ASSERT ( pSession, "pSession is NULL" ) ; INT32 keySize = ossStrlen( pKey ) ; INT32 valueSize = ossStrlen( pValue ) ; INT32 newHeaderSize = keySize + valueSize + 2 ; INT32 tempSize = 0 ; CHAR *pNewHeaderBuf = NULL ; CHAR *pNewKey = NULL ; CHAR *pNewValue = NULL ; httpConnection *pHttpCon = pSession->getRestConn() ; COLNAME_MAP_IT it ; if( REST_STRING_TRANSFER_SIZE == ossStrlen( pKey ) && 0 == ossStrncmp( pKey, REST_STRING_TRANSFER, REST_STRING_TRANSFER_SIZE ) && REST_STRING_CHUNKED_SIZE == ossStrlen( pValue ) && 0 == ossStrncmp( pValue, REST_STRING_CHUNKED, REST_STRING_CHUNKED_SIZE ) ) { pHttpCon->_isChunk = TRUE ; } it = pHttpCon->_responseHeaders.find( pKey ) ; if ( it == pHttpCon->_responseHeaders.end() ) { rc = pSession->allocBuff( newHeaderSize, &pNewHeaderBuf, tempSize ) ; if ( rc ) { PD_LOG ( PDERROR, "Unable to allocate %d bytes memory, rc=%d", newHeaderSize, rc ) ; goto error ; } pNewKey = pNewHeaderBuf ; pNewValue = pNewHeaderBuf + keySize + 1 ; ossStrncpy( pNewKey, pKey, keySize ) ; ossStrncpy( pNewValue, pValue, valueSize ) ; pNewKey[ keySize ] = 0 ; pNewValue[ valueSize ] = 0 ; pHttpCon->_responseHeaders.insert( std::make_pair( pNewKey, pNewValue ) ) ; } else { rc = pSession->allocBuff( valueSize + 1, &pNewValue, tempSize ) ; if ( rc ) { PD_LOG ( PDERROR, "Unable to allocate %d bytes memory, rc=%d", valueSize, rc ) ; goto error ; } ossStrncpy( pNewValue, pValue, valueSize ) ; pNewValue[ valueSize ] = 0 ; it->second = pNewValue ; } done: PD_TRACE_EXITRC( SDB__RESTADP_APPENDHEADER, rc ) ; return rc ; error: goto done ; }
static INT32 _ossEnumFiles( const string &dirPath, map<string, string> &mapFiles, const CHAR *filter, UINT32 filterLen, OSS_MATCH_TYPE type, UINT32 deep ) { INT32 rc = SDB_OK ; const CHAR *pFind = NULL ; try { fs::path dbDir ( dirPath ) ; fs::directory_iterator end_iter ; if ( 0 == deep ) { goto done ; } if ( fs::exists ( dbDir ) && fs::is_directory ( dbDir ) ) { for ( fs::directory_iterator dir_iter ( dbDir ); dir_iter != end_iter; ++dir_iter ) { try { if ( fs::is_regular_file ( dir_iter->status() ) ) { const std::string fileName = dir_iter->path().filename().string() ; if ( ( OSS_MATCH_NULL == type ) || ( OSS_MATCH_LEFT == type && 0 == ossStrncmp( fileName.c_str(), filter, filterLen ) ) || ( OSS_MATCH_MID == type && ossStrstr( fileName.c_str(), filter ) ) || ( OSS_MATCH_RIGHT == type && ( pFind = ossStrstr( fileName.c_str(), filter ) ) && pFind[filterLen] == 0 ) || ( OSS_MATCH_ALL == type && 0 == ossStrcmp( fileName.c_str(), filter ) ) ) { mapFiles[ fileName ] = dir_iter->path().string() ; } } else if ( fs::is_directory( dir_iter->path() ) && deep > 1 ) { _ossEnumFiles( dir_iter->path().string(), mapFiles, filter, filterLen, type, deep - 1 ) ; } } catch( std::exception &e ) { PD_LOG( PDWARNING, "File or dir[%s] occur exception: %s", dir_iter->path().string().c_str(), e.what() ) ; } } } else { rc = SDB_INVALIDARG ; goto error ; } } catch( std::exception &e ) { PD_LOG( PDERROR, "Occur exception: %s", e.what() ) ; rc = SDB_SYS ; goto error ; } done: return rc ; error: goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__DPSLOGFILE__RESTRORE, "_dpsLogFile::_restore" ) INT32 _dpsLogFile::_restore () { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__DPSLOGFILE__RESTRORE ); INT64 fileSize = 0 ; UINT64 offSet = 0 ; UINT64 baseOffset = 0 ; dpsLogRecordHeader lsnHeader ; CHAR *lastRecord = NULL ; UINT64 lastOffset = 0 ; UINT32 lastLen = 0 ; _inRestore = TRUE ; //Judge the length is right rc = ossGetFileSize( _file, &fileSize ) ; if ( SDB_OK != rc ) { goto error ; } if ( fileSize < (INT64)( _fileSize + sizeof(dpsLogHeader) ) ) { PD_LOG ( PDERROR, "DPS file size[%d] is smaller than config[%d]", fileSize - sizeof(dpsLogHeader), _fileSize ) ; rc = SDB_DPS_FILE_SIZE_NOT_SAME ; goto error ; } //Init header rc = _readHeader() ; if ( SDB_OK != rc ) { PD_LOG ( PDERROR, "Fail to read dps file header[rc:%d]", rc ) ; goto error ; } // check header info if ( ossStrncmp( _logHeader._eyeCatcher, DPS_LOG_HEADER_EYECATCHER, sizeof( _logHeader._eyeCatcher ) ) != 0 ) { PD_LOG( PDERROR, "DPS file eye catcher error" ) ; rc = SDB_DPS_FILE_NOT_RECOGNISE ; goto error ; } else if ( _logHeader._fileSize != 0 && _logHeader._fileSize != _fileSize ) { PD_LOG( PDERROR, "DPS file's meta size[%d] is not the same with " "config[%d]", _logHeader._fileSize, _fileSize ) ; rc = SDB_DPS_FILE_SIZE_NOT_SAME ; goto error ; } else if ( _logHeader._fileNum != 0 && _logHeader._fileNum != _fileNum ) { PD_LOG( PDERROR, "DPS file's meta file num[%d] is not the same with " "config[%d]", _logHeader._fileNum, _fileNum ) ; rc = SDB_INVALIDARG ; goto error ; } // check the real size if ( fileSize > (INT64)( _fileSize + sizeof(dpsLogHeader) ) ) { PD_LOG( PDERROR, "DPS file real size[%d] is not the same with " "config[%d]", fileSize - sizeof(dpsLogHeader), _fileSize ) ; // start up from crash if ( !pmdGetStartup().isOK() ) { rc = ossTruncateFile( _file, _fileSize + sizeof(dpsLogHeader) ) ; if ( rc ) { PD_LOG( PDWARNING, "Tuncate dps file to config size failed, " "rc: %d", rc ) ; goto error ; } PD_LOG( PDEVENT, "Tuncate dps file to config size[%d]", _fileSize ) ; } else { goto error ; } } PD_LOG ( PDEVENT, "Header info[first lsn:%d.%lld, logID:%d]", _logHeader._firstLSN.version, _logHeader._firstLSN.offset, _logHeader._logID ) ; // upgrade the header if ( _logHeader._version != DPS_LOG_FILE_VERSION1 ) { _logHeader._version = DPS_LOG_FILE_VERSION1 ; _logHeader._fileSize = _fileSize ; _logHeader._fileNum = _fileNum ; rc = _flushHeader() ; PD_RC_CHECK( rc, PDERROR, "Failed to flush header, rc: %d", rc ) ; } if ( _logHeader._logID == DPS_INVALID_LOG_FILE_ID || _logHeader._firstLSN.invalid() ) { _logHeader._firstLSN.version = DPS_INVALID_LSN_VERSION ; _logHeader._firstLSN.offset = DPS_INVALID_LSN_OFFSET ; goto done ; } offSet = _logHeader._firstLSN.offset % _fileSize ; baseOffset = _logHeader._firstLSN.offset - offSet ; //analysis the file while ( offSet < _fileSize ) { rc = read ( offSet + baseOffset , sizeof (dpsLogRecordHeader), (CHAR*)&lsnHeader ) ; if ( SDB_OK != rc ) { PD_LOG ( PDERROR, "Failed to read lsn header[offset:%lld,rc:%d]", offSet, rc ) ; goto error ; } if ( lsnHeader._lsn != offSet + baseOffset ) { PD_LOG ( PDEVENT, "LSN is not the same[%lld!=%lld]", lsnHeader._lsn, offSet + baseOffset ) ; break ; } else if ( offSet + lsnHeader._length > _fileSize ) { PD_LOG ( PDEVENT, "LSN length[%d] is over the file " "size[offSet:%lld]", lsnHeader._length, offSet ) ; break ; } else if ( lsnHeader._length < sizeof (dpsLogRecordHeader) ) { PD_LOG ( PDEVENT, "LSN length[%d] less than min[%d], invalid LSN", lsnHeader._length, sizeof (dpsLogRecordHeader) ) ; break ; } offSet += lsnHeader._length ; lastOffset = offSet ; lastLen = lsnHeader._length ; } /// ensure that the last record is valid. if ( 0 < lastLen && 0 < lastOffset ) { _dpsLogRecord lr ; lastRecord = ( CHAR * )SDB_OSS_MALLOC( lastLen ) ; if ( NULL == lastRecord ) { PD_LOG( PDERROR, "failed to allocate mem.") ; rc = SDB_OOM ; goto error ; } rc = read( lastOffset + baseOffset - lastLen, lastLen, lastRecord ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to read dps record[%lld, rc:%d]", offSet, rc ) ; goto error ; } rc = lr.load( lastRecord ) ; if ( SDB_DPS_CORRUPTED_LOG == rc ) { /// the last record is corrupted. move to pre one. offSet -= lastLen ; rc = SDB_OK ; const dpsLogRecordHeader *corruptedHeader = ( const dpsLogRecordHeader * )lastRecord ; PD_LOG( PDEVENT, "last log record(lsn:%lld) is corrupted.", corruptedHeader->_lsn ) ; /// only one corrupted log in this file. if ( 0 == offSet ) { _logHeader._firstLSN.offset = DPS_INVALID_LSN_OFFSET ; _logHeader._firstLSN.version = DPS_INVALID_LSN_VERSION ; } } else if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to load record log:%d", rc ) ; goto error ; } } _idleSize = _fileSize - offSet ; done: _inRestore = FALSE ; SAFE_OSS_FREE( lastRecord ) ; PD_TRACE_EXITRC ( SDB__DPSLOGFILE__RESTRORE, rc ); return rc ; error: goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB_RTNREBUILDDB, "rtnRebuildDB" ) INT32 rtnRebuildDB ( pmdEDUCB *cb ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB_RTNREBUILDDB ) ; pmdKRCB *krcb = pmdGetKRCB () ; SDB_DMSCB *dmsCB = krcb->getDMSCB () ; SDB_RTNCB *rtnCB = krcb->getRTNCB () ; BSONObj dummyObj ; std::set<monCollectionSpace> csList ; std::set<monCollectionSpace>::iterator it ; BOOLEAN registeredRebuild = FALSE ; PD_LOG ( PDEVENT, "Start rebuilding database" ) ; rc = dmsCB->registerRebuild () ; if ( rc ) { PD_LOG ( PDERROR, "Failed to register rebuild" ) ; goto error ; } registeredRebuild = TRUE ; dmsCB->dumpInfo ( csList, TRUE ) ; for ( it = csList.begin(); it != csList.end(); ++it ) { const CHAR *pCSName = (*it)._name ; std::set<monCollection> clList ; std::set<monCollection>::iterator itCollection ; dmsStorageUnitID suID ; if ( ossStrlen ( pCSName ) > DMS_COLLECTION_SPACE_NAME_SZ ) { PD_LOG ( PDERROR, "collection space name is not valid: %s", pCSName ) ; continue ; } if ( ossStrncmp ( pCSName, SDB_DMSTEMP_NAME, DMS_COLLECTION_SPACE_NAME_SZ ) == 0 ) { continue ; } PD_LOG ( PDEVENT, "Start rebuilding collection space %s", pCSName ) ; dmsStorageUnit *su = NULL; rc = dmsCB->nameToSUAndLock ( pCSName, suID, &su ) ; if ( rc != SDB_OK ) { PD_LOG ( PDERROR, "Failed to lock collection space %s", pCSName ) ; continue ; } do { su->dumpInfo ( clList ) ; for ( itCollection = clList.begin(); itCollection != clList.end(); ++itCollection ) { dmsMBContext *mbContext = NULL ; UINT16 collectionFlag ; const CHAR *pCLNameTemp = NULL ; const CHAR *pCLName = (*itCollection)._name ; if ( ( ossStrlen ( pCLName ) > DMS_COLLECTION_FULL_NAME_SZ ) || ( NULL == ( pCLNameTemp = ossStrrchr ( pCLName, '.' ))) ) { PD_LOG ( PDERROR, "collection name is not valid: %s", pCLName ) ; continue ; } rc = su->data()->getMBContext( &mbContext, pCLNameTemp+1, EXCLUSIVE ) ; if ( rc ) { PD_LOG ( PDWARNING, "Failed to lock collection %s, rc = %d", pCLName, rc ) ; continue ; } collectionFlag = mbContext->mb()->_flag ; su->data()->releaseMBContext( mbContext ) ; PD_LOG ( PDEVENT, "Start rebuilding collection %s", pCLName ) ; if ( DMS_IS_MB_OFFLINE_REORG( collectionFlag ) || DMS_IS_MB_ONLINE_REORG ( collectionFlag ) ) { rc = rtnReorgRecover ( pCLName, cb, dmsCB, rtnCB ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to perform reorg recover: %s, " "rc = %d", pCLName, rc ) ; continue ; } } else { rc = rtnReorgOffline ( pCLName, dummyObj, cb, dmsCB, rtnCB, TRUE ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to perform offline reorg: %s, " "rc = %d", pCLName, rc ) ; continue ; } } PD_LOG ( PDEVENT, "Complete rebuilding collection %s", pCLName ) ; } // for } while ( 0 ) ; dmsCB->suUnlock ( suID ) ; PD_LOG ( PDEVENT, "Complete rebuilding collection space %s", pCSName ) ; } // end for PD_LOG ( PDEVENT, "Database rebuild is completed" ) ; done : if ( registeredRebuild ) { dmsCB->rebuildDown () ; } PD_TRACE_EXITRC ( SDB_RTNREBUILDDB, rc ) ; return rc ; error : goto done ; }
OSS_INLINE BOOLEAN operator<(const _monCollection &r) const { return ossStrncmp( _name, r._name, sizeof(_name))<0 ; }