// whoever calling this function should NOT get latch int pmdEDUMgr::_createNewEDU ( EDU_TYPES type, void* arg, EDUID *eduid) { int rc = EDB_OK ; unsigned int probe = 0 ; pmdEDUCB *cb = NULL ; EDUID myEDUID = 0 ; if ( isQuiesced () ) { rc = EDB_QUIESCED ; goto done ; } if ( !getEntryFuncByType ( type ) ) { PD_LOG ( PDERROR, "The edu[type:%d] not exist or function is null", type ) ; rc = EDB_INVALIDARG ; probe = 30 ; goto error ; } cb = new(std::nothrow) pmdEDUCB ( this, type ) ; EDB_VALIDATE_GOTOERROR ( cb, EDB_OOM, "Out of memory to create agent control block" ) // set to creating status cb->setStatus ( PMD_EDU_CREATING ) ; /***********CRITICAL SECTION*********************/ _mutex.get () ; // if the EDU exist in runqueue if ( _runQueue.end() != _runQueue.find ( _EDUID ) ) { _mutex.release () ; rc = EDB_SYS ; probe = 10 ; goto error ; } // if the EDU exist in idle queue if ( _idleQueue.end() != _idleQueue.find ( _EDUID ) ) { _mutex.release () ; rc = EDB_SYS ; probe = 15 ; goto error ; } // assign EDU id and increment global EDUID cb->setID ( _EDUID ) ; if ( eduid ) *eduid = _EDUID ; // place cb into runqueue _runQueue [ _EDUID ] = ( pmdEDUCB* ) cb ; myEDUID = _EDUID ; ++_EDUID ; _mutex.release () ; /***********END CRITICAL SECTION****************/ // create a new thread here, pass agent CB and other arguments try { boost::thread agentThread ( pmdEDUEntryPoint, type, cb, arg ) ; // detach the agent so that he's all on his own // we only track based on CB agentThread.detach () ; } catch ( std::exception e ) { // if we failed to create thread, make sure to clean runqueue _runQueue.erase ( myEDUID ) ; rc = EDB_SYS ; probe = 20 ; goto error ; } //The edu is create, need post a resum event cb->postEvent( pmdEDUEvent( PMD_EDU_EVENT_RESUME, false, arg ) ) ; done : return rc ; error : // clean out memory if it's allocated if ( cb ) delete cb ; PD_LOG ( PDERROR, "Failed to create new agent, probe = %d", probe ) ; goto done ; }
//PD_TRACE_DECLARE_FUNCTION ( COORD_OPERATORDEL_EXE, "_coordDeleteOperator::execute" ) INT32 _coordDeleteOperator::execute( MsgHeader *pMsg, pmdEDUCB *cb, INT64 &contextID, rtnContextBuf *buf ) { INT32 rc = SDB_OK ; INT32 rcTmp = SDB_OK ; PD_TRACE_ENTRY ( COORD_OPERATORDEL_EXE ) ; coordSendOptions sendOpt( TRUE ) ; coordSendMsgIn inMsg( pMsg ) ; coordProcessResult result ; ROUTE_RC_MAP nokRC ; result._pNokRC = &nokRC ; coordCataSel cataSel ; MsgRouteID errNodeID ; BSONObj boDeletor ; MsgOpDelete *pDelMsg = (MsgOpDelete *)pMsg ; INT32 oldFlag = pDelMsg->flags ; pDelMsg->flags |= FLG_DELETE_RETURNNUM ; contextID = -1 ; INT32 flag = 0; CHAR *pCollectionName = NULL ; CHAR *pDeletor = NULL ; CHAR *pHint = NULL ; rc = msgExtractDelete( (CHAR*)pMsg, &flag, &pCollectionName, &pDeletor, &pHint ) ; if( rc ) { PD_LOG( PDERROR,"Failed to parse delete request, rc: %d", rc ) ; pCollectionName = NULL ; goto error ; } try { boDeletor = BSONObj( pDeletor ) ; } catch ( std::exception &e ) { PD_RC_CHECK( SDB_INVALIDARG, PDERROR, "Delete failed, received unexpected error: %s", e.what() ) ; } MON_SAVE_OP_DETAIL( cb->getMonAppCB(), pMsg->opCode, "Collection:%s, Deletor:%s, Hint:%s, " "Flag:0x%08x(%u)", pCollectionName, boDeletor.toString().c_str(), BSONObj(pHint).toString().c_str(), oldFlag, oldFlag ) ; rc = cataSel.bind( _pResource, pCollectionName, cb, FALSE, TRUE ) ; if ( rc ) { PD_LOG( PDERROR, "Get or update collection[%s]'s catalog info " "failed, rc: %d", pCollectionName, rc ) ; goto error ; } retry: pDelMsg->version = cataSel.getCataPtr()->getVersion() ; pDelMsg->w = 0 ; rcTmp = doOpOnCL( cataSel, boDeletor, inMsg, sendOpt, cb, result ) ; if ( SDB_OK == rcTmp && nokRC.empty() ) { goto done ; } else if ( checkRetryForCLOpr( rcTmp, &nokRC, cataSel, inMsg.msg(), cb, rc, &errNodeID, TRUE ) ) { nokRC.clear() ; _groupSession.getGroupCtrl()->incRetry() ; goto retry ; } else { PD_LOG( PDERROR, "Delete failed on node[%s], rc: %d", routeID2String( errNodeID ).c_str(), rc ) ; goto error ; } done: if ( oldFlag & FLG_DELETE_RETURNNUM ) { contextID = _recvNum ; } if ( pCollectionName ) { PD_AUDIT_OP( AUDIT_DML, MSG_BS_DELETE_REQ, AUDIT_OBJ_CL, pCollectionName, rc, "DeletedNum:%u, Deletor:%s, Hint:%s, Flag:0x%08x(%u)", _recvNum, boDeletor.toString().c_str(), BSONObj(pHint).toString().c_str(), oldFlag, oldFlag ) ; } PD_TRACE_EXITRC ( COORD_OPERATORDEL_EXE, rc ) ; return rc ; error: if ( buf && nokRC.size() > 0 ) { *buf = rtnContextBuf( coordBuildErrorObj( _pResource, rc, cb, &nokRC ) ) ; } goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB_RTNQUERY, "rtnQuery" ) INT32 rtnQuery ( const CHAR *pCollectionName, const BSONObj &selector, const BSONObj &matcher, const BSONObj &orderBy, const BSONObj &hint, SINT32 flags, pmdEDUCB *cb, SINT64 numToSkip, SINT64 numToReturn, SDB_DMSCB *dmsCB, SDB_RTNCB *rtnCB, SINT64 &contextID, rtnContextBase **ppContext, BOOLEAN enablePrefetch ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB_RTNQUERY ) ; dmsStorageUnitID suID = DMS_INVALID_CS ; contextID = -1 ; SDB_ASSERT ( pCollectionName, "collection name can't be NULL" ) ; SDB_ASSERT ( cb, "educb can't be NULL" ) ; SDB_ASSERT ( dmsCB, "dmsCB can't be NULL" ) ; SDB_ASSERT ( rtnCB, "rtnCB can't be NULL" ) ; dmsStorageUnit *su = NULL ; dmsMBContext *mbContext = NULL ; rtnContextData *dataContext = NULL ; const CHAR *pCollectionShortName = NULL ; rtnAccessPlanManager *apm = NULL ; optAccessPlan *plan = NULL ; BSONObj hintTmp = hint ; BSONObj blockObj ; BSONObj *pBlockObj = NULL ; const CHAR *indexName = NULL ; const CHAR *scanType = NULL ; INT32 indexLID = DMS_INVALID_EXTENT ; INT32 direction = 0 ; if ( FLG_QUERY_EXPLAIN & flags ) { rc = rtnExplain( pCollectionName, selector, matcher, orderBy, hint, flags, numToSkip, numToReturn, cb, dmsCB, rtnCB, contextID, ppContext ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to explain query:%d", rc ) ; goto error ; } else { goto done ; } } rc = rtnResolveCollectionNameAndLock ( pCollectionName, dmsCB, &su, &pCollectionShortName, suID ) ; PD_RC_CHECK( rc, PDERROR, "Failed to resolve collection name %s", pCollectionName ) ; rc = su->data()->getMBContext( &mbContext, pCollectionShortName, -1 ) ; PD_RC_CHECK( rc, PDERROR, "Failed to get dms mb context, rc: %d", rc ) ; rc = rtnCB->contextNew ( ( flags & FLG_QUERY_PARALLED ) ? RTN_CONTEXT_PARADATA : RTN_CONTEXT_DATA, (rtnContext**)&dataContext, contextID, cb ) ; PD_RC_CHECK( rc, PDERROR, "Failed to create new data context" ) ; if ( Object == hint.getField( FIELD_NAME_META ).type() ) { BSONObjBuilder build ; rc = _rtnParseQueryMeta( hint.getField( FIELD_NAME_META ).embeddedObject(), scanType, indexName, indexLID, direction, blockObj ) ; PD_RC_CHECK( rc, PDERROR, "Failed to parase query meta[%s], rc: %d", hint.toString().c_str(), rc ) ; pBlockObj = &blockObj ; if ( indexName ) { build.append( "", indexName ) ; } else { build.appendNull( "" ) ; } hintTmp = build.obj () ; } apm = su->getAPM() ; SDB_ASSERT ( apm, "apm shouldn't be NULL" ) ; rc = apm->getPlan ( matcher, orderBy, // orderBy hintTmp, // hint pCollectionShortName, &plan ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to get access plan for %s, context %lld, " "rc: %d", pCollectionName, contextID, rc ) ; goto error ; } else if ( ( flags & FLG_QUERY_FORCE_HINT ) && !hintTmp.isEmpty() && plan->isHintFailed() ) { PD_LOG( PDERROR, "Query used force hint[%s] failed", hintTmp.toString().c_str() ) ; rc = SDB_RTN_INVALID_HINT ; goto error ; } if ( pBlockObj ) { if ( !indexName && TBSCAN != plan->getScanType() ) { PD_LOG( PDERROR, "Scan type[%d] must be TBSCAN", plan->getScanType() ) ; rc = SDB_SYS ; goto error ; } else if ( indexName && ( IXSCAN != plan->getScanType() || indexLID != plan->getIndexLID() ) ) { PD_LOG( PDERROR, "Scan type[%d] error or indexLID[%d] is the " "same with [%d]", plan->getScanType(), plan->getIndexLID(), indexLID ) ; rc = SDB_IXM_NOTEXIST ; goto error ; } } if ( flags & FLG_QUERY_STRINGOUT ) { dataContext->getSelector().setStringOutput( TRUE ) ; } rc = dataContext->open( su, mbContext, plan, cb, selector, plan->sortRequired() ? -1 : numToReturn, plan->sortRequired() ? 0 : numToSkip, pBlockObj, direction ) ; PD_RC_CHECK( rc, PDERROR, "Open data context failed, rc: %d", rc ) ; suID = DMS_INVALID_CS ; plan = NULL ; mbContext = NULL ; if ( cb->getMonConfigCB()->timestampON ) { dataContext->getMonCB()->recordStartTimestamp() ; } if ( dataContext->getPlan()->sortRequired() ) { rc = rtnSort ( (rtnContext**)&dataContext, orderBy, cb, numToSkip, numToReturn, rtnCB, contextID ) ; PD_RC_CHECK( rc, PDERROR, "Failed to sort, rc: %d", rc ) ; } if ( ppContext ) { *ppContext = dataContext ; } if ( enablePrefetch ) { dataContext->enablePrefetch ( cb ) ; } done : PD_TRACE_EXITRC ( SDB_RTNQUERY, rc ) ; return rc ; error : if ( su && mbContext ) { su->data()->releaseMBContext( mbContext ) ; } if ( plan ) { plan->release() ; } if ( DMS_INVALID_CS != suID ) { dmsCB->suUnlock( suID ) ; } if ( -1 != contextID ) { rtnCB->contextDelete ( contextID, cb ) ; contextID = -1 ; } goto done ; }
INT32 migWorker::_getBsonFromQueue( pmdEDUCB *eduCB, BSONObj &obj ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__MIGWORKER__GETBSON ); INT32 tempRc = SDB_OK ; UINT32 offset = 0 ; UINT32 size = 0 ; UINT32 line = 0 ; UINT32 column = 0 ; UINT32 startBlock = 0 ; UINT32 endBlock = 0 ; //CHAR *pJsonBuffer = NULL ; _master->popFromQueue ( eduCB, offset, size, line, column ) ; if ( 0 == offset && 0 == size && 0 == line && 0 == column ) { rc = SDB_MIG_END_OF_QUEUE ; goto done ; } if ( MIG_PARSER_JSON == _master->_fileType ) { tempRc = fromjson ( _master->getBuffer() + offset, obj ) ; } else if ( MIG_PARSER_CSV == _master->_fileType ) { rc = _csvParser.csv2bson( _master->getBuffer() + offset, size, &obj ) ; if ( rc ) { rc = SDB_UTIL_PARSE_JSON_INVALID ; PD_LOG ( PDERROR, "Failed to convert Bson, rc=%d", rc ) ; goto error ; } } else { rc = SDB_MIG_UNKNOW_FILE_TYPE ; PD_LOG ( PDERROR, "unknow file type" ) ; goto error ; } if ( tempRc ) { //PD_LOG ( PDERROR, "Failed to json convert bson, json: %s , rc=%d", // _pJsonBuffer, tempRc ) ; _master->sendMsgToClient ( "Error: error " "in json format, line %u, column %u", line, column ) ; } rc = _master->getBlockFromPointer ( offset, size, startBlock, endBlock ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to get block from pointer, rc=%d", rc ) ; goto error ; } for ( UINT32 i = startBlock; i <= endBlock; ++i ) { _master->bucketDec( i ) ; } done: if ( tempRc ) { rc = tempRc ; } PD_TRACE_EXITRC ( SDB__MIGWORKER__GETBSON, rc ); return rc ; error: goto done ; }
INT32 _omTaskStrategyInfo::fromBSON( const BSONObj &obj ) { INT32 rc = SDB_OK ; BSONElement beField ; BSONObj ipsObj ; beField = obj.getField( OM_REST_FIELD_RULE_ID ) ; if ( !beField.isNumber() ) { PD_LOG( PDERROR, "Field[%s] must be number", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } setID( beField.numberLong() ) ; beField = obj.getField( OM_REST_FIELD_TASK_ID ) ; if ( !beField.isNumber() ) { PD_LOG( PDERROR, "Field[%s] must be number", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } setTaskID( beField.numberLong() ) ; beField = obj.getField( OM_REST_FIELD_TASK_NAME ) ; if ( String != beField.type() ) { PD_LOG( PDERROR, "Field[%s] must be string", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } setTaskName( beField.str() ) ; beField = obj.getField( OM_REST_FIELD_NICE ) ; if ( !beField.isNumber() ) { PD_LOG( PDERROR, "Field[%s] must be number", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } setNice( beField.numberInt() ) ; beField = obj.getField( OM_REST_FIELD_USER_NAME ) ; if ( String != beField.type() ) { PD_LOG( PDERROR, "Field[%s] must be string", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } setUserName( beField.str() ) ; beField = obj.getField( OM_REST_FIELD_IPS ) ; if ( Array != beField.type() ) { PD_LOG( PDERROR, "Field[%s] must be string array", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } else { clearIPSet() ; string tmpStr ; BSONElement e ; BSONObjIterator itr( beField.embeddedObject() ) ; while( itr.more() ) { e = itr.next() ; if ( String != beField.type() ) { PD_LOG( PDERROR, "Field[%s] must be string array", beField.toString( TRUE, TRUE ).c_str() ) ; rc = SDB_INVALIDARG ; goto error ; } tmpStr = e.str() ; if ( !tmpStr.empty() ) { addIP( tmpStr ) ; } } } done: return rc ; error: goto done ; }
INT32 utilWriteConfigFile( const CHAR * pFile, const CHAR * pData, BOOLEAN createOnly ) { INT32 rc = SDB_OK ; std::string tmpFile = pFile ; tmpFile += ".tmp" ; OSSFILE file ; BOOLEAN isOpen = FALSE ; BOOLEAN isBak = FALSE ; if ( SDB_OK == ossAccess( tmpFile.c_str() ) ) { ossDelete( tmpFile.c_str() ) ; } if ( SDB_OK == ossAccess( pFile ) ) { if ( createOnly ) { rc = SDB_FE ; goto error ; } if ( SDB_OK == ossRenamePath( pFile, tmpFile.c_str() ) ) { isBak = TRUE ; } } rc = ossOpen ( pFile, OSS_READWRITE|OSS_SHAREWRITE|OSS_REPLACE, OSS_RWXU, file ) ; if ( rc ) { goto error ; } isOpen = TRUE ; { SINT64 written = 0 ; SINT64 len = ossStrlen( pData ) ; while ( 0 < len ) { SINT64 tmpWritten = 0 ; rc = ossWrite( &file, pData + written , len, &tmpWritten ) ; if ( rc && SDB_INTERRUPT != rc ) { PD_LOG( PDERROR, "Failed to write file[%s]:%d", pFile, rc ) ; goto error ; } written += tmpWritten ; len -= tmpWritten ; rc = SDB_OK ; } } if ( SDB_OK == ossAccess( tmpFile.c_str() ) ) { ossDelete( tmpFile.c_str() ) ; } done: if ( isOpen ) { ossClose( file ) ; } return rc ; error: if ( isBak ) { if ( isOpen ) { ossClose( file ) ; isOpen = FALSE ; ossDelete( pFile ) ; } ossRenamePath( tmpFile.c_str(), pFile ) ; } goto done ; }
INT32 migMaster::_checkErrAndRollback ( pmdEDUCB *eduCB, dmsStorageLoadOp* loadOp, dmsMBContext *mbContext, UINT32 &success, UINT32 &failure ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__MIGLOADJSONPS__CHECKWORKER ); pmdEDUEvent event ; // if we receive any post, that means worker got something wrong and we // should handle it respectively /*isGetEven = eduCB->waitEvent ( event, 0 ) ; if ( isGetEven ) { // if we receive anything, let's count the success and failure and // goon, note it means something wrong happened at worker workerRe = (_workerReturn *)event._Data ; success += workerRe->success ; failure += workerRe->failure ; --_workerNum ; }*/ // if something wrong happened at worker, or the connection is gone, // let's rollback //if ( isGetEven || !_sock->isConnected() ) if ( !_exitSignal ) { _exitSignal = !_sock->isConnected() ; if ( !_exitSignal ) { _exitSignal = eduCB->isForced() ; } } if ( _exitSignal ) { // print the error in log PD_LOG ( PDERROR, "rollback all data" ) ; // send error to user side, note we don't need to check rc since we // can't do anything if it's not success, anyway sendMsgToClient ( "Error: rollback all data" ) ; rc = _stopAndWaitWorker ( eduCB, success, failure ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to call _stopAndWaitWorker, rc=%d", rc ) ; //roll back rc = loadOp->loadRollbackPhase ( mbContext ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to rollback, rc=%d", rc ) ; sendMsgToClient ( "Error: Failed to rollback, rc = %d", rc ) ; goto error ; } rc = SDB_LOAD_ROLLBACK ; goto error ; } done: PD_TRACE_EXITRC ( SDB__MIGLOADJSONPS__CHECKWORKER, rc ); return rc ; error: goto done ; }
static INT32 _ossEnumFiles( const string &dirPath, map<string, string> &mapFiles, const CHAR *filter, UINT32 filterLen, OSS_MATCH_TYPE type, UINT32 deep ) { INT32 rc = SDB_OK ; const CHAR *pFind = NULL ; try { fs::path dbDir ( dirPath ) ; fs::directory_iterator end_iter ; if ( 0 == deep ) { goto done ; } if ( fs::exists ( dbDir ) && fs::is_directory ( dbDir ) ) { for ( fs::directory_iterator dir_iter ( dbDir ); dir_iter != end_iter; ++dir_iter ) { try { if ( fs::is_regular_file ( dir_iter->status() ) ) { const std::string fileName = dir_iter->path().filename().string() ; if ( ( OSS_MATCH_NULL == type ) || ( OSS_MATCH_LEFT == type && 0 == ossStrncmp( fileName.c_str(), filter, filterLen ) ) || ( OSS_MATCH_MID == type && ossStrstr( fileName.c_str(), filter ) ) || ( OSS_MATCH_RIGHT == type && ( pFind = ossStrstr( fileName.c_str(), filter ) ) && pFind[filterLen] == 0 ) || ( OSS_MATCH_ALL == type && 0 == ossStrcmp( fileName.c_str(), filter ) ) ) { mapFiles[ fileName ] = dir_iter->path().string() ; } } else if ( fs::is_directory( dir_iter->path() ) && deep > 1 ) { _ossEnumFiles( dir_iter->path().string(), mapFiles, filter, filterLen, type, deep - 1 ) ; } } catch( std::exception &e ) { PD_LOG( PDWARNING, "File or dir[%s] occur exception: %s", dir_iter->path().string().c_str(), e.what() ) ; } } } else { rc = SDB_INVALIDARG ; goto error ; } } catch( std::exception &e ) { PD_LOG( PDERROR, "Occur exception: %s", e.what() ) ; rc = SDB_SYS ; goto error ; } done: return rc ; error: goto done ; }
static INT32 _ossEnumSubDirs( const string &dirPath, const string &parentSubDir, vector< string > &subDirs, UINT32 deep ) { INT32 rc = SDB_OK ; try { fs::path dbDir ( dirPath ) ; fs::directory_iterator end_iter ; string subDir ; if ( 0 == deep ) { goto done ; } if ( fs::exists ( dbDir ) && fs::is_directory ( dbDir ) ) { for ( fs::directory_iterator dir_iter ( dbDir ); dir_iter != end_iter; ++dir_iter ) { try { if ( fs::is_directory( dir_iter->path() ) ) { if ( parentSubDir.empty() ) { subDir = dir_iter->path().leaf().string() ; } else { string subDir = parentSubDir ; subDir += OSS_FILE_SEP ; subDir += dir_iter->path().leaf().string() ; } subDirs.push_back( subDir ) ; if ( deep > 1 ) { _ossEnumSubDirs( dir_iter->path().string(), subDir, subDirs,deep - 1 ) ; } } } catch( std::exception &e ) { PD_LOG( PDWARNING, "File or dir[%s] occur exception: %s", dir_iter->path().string().c_str(), e.what() ) ; } } } else { rc = SDB_INVALIDARG ; goto error ; } } catch( std::exception &e ) { PD_LOG( PDERROR, "Occur exception: %s", e.what() ) ; rc = SDB_SYS ; goto error ; } done: return rc ; error: goto done ; }
INT32 _sptInvoker::_callbackDone( JSContext *cx, JSObject *obj, _sptReturnVal &rval, const bson::BSONObj &detail, jsval *rvp ) { INT32 rc = SDB_OK ; const sptProperty &rpro = rval.getVal() ; jsval val = JSVAL_VOID ; if ( EOO == rpro.getType() ) { *rvp = JSVAL_VOID ; goto done ; } else if ( Object == rpro.getType() ) { JSObject *jsObj = JS_NewObject ( cx, (JSClass *)(rval.getClassDef()), 0 , 0 ) ; if ( NULL == jsObj ) { PD_LOG( PDERROR, "faile to new js object" ) ; rc = SDB_OOM ; rval.releaseObj() ; goto error ; } JS_SetPrivate( cx, jsObj, rpro.getValue() ) ; if ( !rval.getValProperties().empty() ) { rc = _sptInvoker::setProperty( cx, jsObj, rval.getValProperties() ) ; if ( SDB_OK != rc ) { goto error ; } } val = OBJECT_TO_JSVAL( jsObj ) ; } else { rc = _getValFromProperty( cx, rpro, val ) ; if ( SDB_OK != rc ) { goto error ; } } if ( !rpro.getName().empty() && NULL != obj ) { if ( !JS_SetProperty( cx, obj, rpro.getName().c_str(), &val )) { PD_LOG( PDERROR, "failed to set obj to parent obj" ) ; rc = SDB_SYS ; goto error ; } } *rvp = val ; done: return rc ; error: goto done ; }
/* _sptInvoker implement */ INT32 _sptInvoker::_getValFromProperty( JSContext *cx, const sptProperty &pro, jsval &val ) { INT32 rc = SDB_OK ; if ( String == pro.getType() ) { JSString *jsstr = JS_NewStringCopyN( cx, pro.getString(), ossStrlen( pro.getString() ) ) ; if ( NULL == jsstr ) { ossPrintf( "%s\n", pro.getString() ) ; PD_LOG( PDERROR, "failed to create a js string" ) ; rc = SDB_SYS ; goto error ; } val = STRING_TO_JSVAL( jsstr ) ; } else if ( Bool == pro.getType() ) { BOOLEAN v = TRUE ; rc = pro.getNative( Bool, &v ) ; if ( SDB_OK != rc ) { goto error ; } val = BOOLEAN_TO_JSVAL( v ) ; } else if ( NumberInt == pro.getType() ) { INT32 v = 0 ; rc = pro.getNative( NumberInt, &v ) ; if ( SDB_OK != rc ) { goto error ; } val = INT_TO_JSVAL( v ) ; } else if ( NumberDouble == pro.getType() ) { FLOAT64 v = 0 ; rc = pro.getNative( NumberDouble, &v ) ; if ( SDB_OK != rc ) { goto error ; } val = DOUBLE_TO_JSVAL( v ) ; } else { PD_LOG( PDERROR, "the type %d is not surpported yet.", pro.getType() ) ; rc = SDB_SYS ; goto error ; } done: return rc ; error: goto done ; }
// initialize a log file, file size max 4GB // PD_TRACE_DECLARE_FUNCTION ( SDB__DPSLOGFILE_INIT, "_dpsLogFile::init" ) INT32 _dpsLogFile::init( const CHAR *path, UINT32 size, UINT32 fileNum ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__DPSLOGFILE_INIT ) ; BOOLEAN created = FALSE ; SDB_ASSERT ( 0 == ( _fileSize % DPS_DEFAULT_PAGE_SIZE ), "Size must be multiple of DPS_DEFAULT_PAGE_SIZE bytes" ) ; _fileSize = size ; _fileNum = fileNum ; _idleSize = _fileSize ; // allocate OSS_FILE, free in destructor _file = SDB_OSS_NEW _OSS_FILE(); if ( !_file ) { rc = SDB_OOM; PD_LOG ( PDERROR, "new _OSS_FILE failed!" ); goto error; } // if the file exist, restore if ( SDB_OK == ossAccess( path ) ) { rc = ossOpen ( path, OSS_READWRITE|OSS_SHAREWRITE, OSS_RWXU, *_file ) ; if ( rc == SDB_OK ) { rc = _restore () ; if ( rc == SDB_OK ) { UINT32 startOffset = 0 ; if ( DPS_INVALID_LSN_OFFSET != _logHeader._firstLSN.offset ) { startOffset = (UINT32)( _logHeader._firstLSN.offset % _fileSize ) ; } PD_LOG ( PDEVENT, "Restore dps log file[%s] succeed, " "firstLsn[%lld], idle space: %u, start offset: %d", path, getFirstLSN().offset, getIdleSize(), startOffset ) ; goto done ; } else { close () ; PD_LOG ( PDEVENT, "Restore dps log file[%s] failed[rc:%d]", path, rc ) ; goto error ; } } } if ( SDB_OK == ossAccess( path ) ) { rc = ossDelete ( path ); if ( SDB_IO == rc ) { PD_LOG ( PDERROR, "Failed to delete file at %s", path ) ; goto error; } } // open the file with "create only" and "read write" mode, for rx-r----- rc = ossOpen( path, OSS_CREATEONLY |OSS_READWRITE | OSS_SHAREWRITE, OSS_RWXU, *_file ); if ( rc ) { PD_LOG ( PDERROR, "Failed to open log file %s, rc = %d", path, rc ) ; goto error; } created = TRUE ; // increase the file size to the given size plus log file header rc = ossExtendFile( _file, (SINT64)_fileSize + DPS_LOG_HEAD_LEN ); if ( rc ) { close() ; PD_LOG ( PDERROR, "Failed to extend log file size to %d, rc = %d", size + DPS_LOG_HEAD_LEN, rc ) ; goto error; } _initHead ( DPS_INVALID_LOG_FILE_ID ) ; rc = _flushHeader () ; if ( rc ) { close () ; PD_LOG ( PDERROR, "Failed to flush header, rc = %d", rc ) ; goto error ; } // Currently let's just skip head rc = ossSeek ( _file, DPS_LOG_HEAD_LEN, OSS_SEEK_SET ) ; if ( rc ) { close() ; PD_LOG ( PDERROR, "Failed to seek to %d offset in log file, rc = %d", DPS_LOG_HEAD_LEN, rc ) ; goto error ; } done: PD_TRACE_EXITRC ( SDB__DPSLOGFILE_INIT, rc ); return rc; error: if ( NULL != _file ) { SDB_OSS_DEL _file; _file = NULL ; } if ( created ) { INT32 rcTmp = SDB_OK ; rcTmp = ossDelete( path ) ; if ( SDB_OK != rcTmp ) { PD_LOG( PDERROR, "failed to remove new file[%s], rc:%d", path, rc ) ; } } goto done; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__DPSLOGFILE__RESTRORE, "_dpsLogFile::_restore" ) INT32 _dpsLogFile::_restore () { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__DPSLOGFILE__RESTRORE ); INT64 fileSize = 0 ; UINT64 offSet = 0 ; UINT64 baseOffset = 0 ; dpsLogRecordHeader lsnHeader ; CHAR *lastRecord = NULL ; UINT64 lastOffset = 0 ; UINT32 lastLen = 0 ; _inRestore = TRUE ; //Judge the length is right rc = ossGetFileSize( _file, &fileSize ) ; if ( SDB_OK != rc ) { goto error ; } if ( fileSize < (INT64)( _fileSize + sizeof(dpsLogHeader) ) ) { PD_LOG ( PDERROR, "DPS file size[%d] is smaller than config[%d]", fileSize - sizeof(dpsLogHeader), _fileSize ) ; rc = SDB_DPS_FILE_SIZE_NOT_SAME ; goto error ; } //Init header rc = _readHeader() ; if ( SDB_OK != rc ) { PD_LOG ( PDERROR, "Fail to read dps file header[rc:%d]", rc ) ; goto error ; } // check header info if ( ossStrncmp( _logHeader._eyeCatcher, DPS_LOG_HEADER_EYECATCHER, sizeof( _logHeader._eyeCatcher ) ) != 0 ) { PD_LOG( PDERROR, "DPS file eye catcher error" ) ; rc = SDB_DPS_FILE_NOT_RECOGNISE ; goto error ; } else if ( _logHeader._fileSize != 0 && _logHeader._fileSize != _fileSize ) { PD_LOG( PDERROR, "DPS file's meta size[%d] is not the same with " "config[%d]", _logHeader._fileSize, _fileSize ) ; rc = SDB_DPS_FILE_SIZE_NOT_SAME ; goto error ; } else if ( _logHeader._fileNum != 0 && _logHeader._fileNum != _fileNum ) { PD_LOG( PDERROR, "DPS file's meta file num[%d] is not the same with " "config[%d]", _logHeader._fileNum, _fileNum ) ; rc = SDB_INVALIDARG ; goto error ; } // check the real size if ( fileSize > (INT64)( _fileSize + sizeof(dpsLogHeader) ) ) { PD_LOG( PDERROR, "DPS file real size[%d] is not the same with " "config[%d]", fileSize - sizeof(dpsLogHeader), _fileSize ) ; // start up from crash if ( !pmdGetStartup().isOK() ) { rc = ossTruncateFile( _file, _fileSize + sizeof(dpsLogHeader) ) ; if ( rc ) { PD_LOG( PDWARNING, "Tuncate dps file to config size failed, " "rc: %d", rc ) ; goto error ; } PD_LOG( PDEVENT, "Tuncate dps file to config size[%d]", _fileSize ) ; } else { goto error ; } } PD_LOG ( PDEVENT, "Header info[first lsn:%d.%lld, logID:%d]", _logHeader._firstLSN.version, _logHeader._firstLSN.offset, _logHeader._logID ) ; // upgrade the header if ( _logHeader._version != DPS_LOG_FILE_VERSION1 ) { _logHeader._version = DPS_LOG_FILE_VERSION1 ; _logHeader._fileSize = _fileSize ; _logHeader._fileNum = _fileNum ; rc = _flushHeader() ; PD_RC_CHECK( rc, PDERROR, "Failed to flush header, rc: %d", rc ) ; } if ( _logHeader._logID == DPS_INVALID_LOG_FILE_ID || _logHeader._firstLSN.invalid() ) { _logHeader._firstLSN.version = DPS_INVALID_LSN_VERSION ; _logHeader._firstLSN.offset = DPS_INVALID_LSN_OFFSET ; goto done ; } offSet = _logHeader._firstLSN.offset % _fileSize ; baseOffset = _logHeader._firstLSN.offset - offSet ; //analysis the file while ( offSet < _fileSize ) { rc = read ( offSet + baseOffset , sizeof (dpsLogRecordHeader), (CHAR*)&lsnHeader ) ; if ( SDB_OK != rc ) { PD_LOG ( PDERROR, "Failed to read lsn header[offset:%lld,rc:%d]", offSet, rc ) ; goto error ; } if ( lsnHeader._lsn != offSet + baseOffset ) { PD_LOG ( PDEVENT, "LSN is not the same[%lld!=%lld]", lsnHeader._lsn, offSet + baseOffset ) ; break ; } else if ( offSet + lsnHeader._length > _fileSize ) { PD_LOG ( PDEVENT, "LSN length[%d] is over the file " "size[offSet:%lld]", lsnHeader._length, offSet ) ; break ; } else if ( lsnHeader._length < sizeof (dpsLogRecordHeader) ) { PD_LOG ( PDEVENT, "LSN length[%d] less than min[%d], invalid LSN", lsnHeader._length, sizeof (dpsLogRecordHeader) ) ; break ; } offSet += lsnHeader._length ; lastOffset = offSet ; lastLen = lsnHeader._length ; } /// ensure that the last record is valid. if ( 0 < lastLen && 0 < lastOffset ) { _dpsLogRecord lr ; lastRecord = ( CHAR * )SDB_OSS_MALLOC( lastLen ) ; if ( NULL == lastRecord ) { PD_LOG( PDERROR, "failed to allocate mem.") ; rc = SDB_OOM ; goto error ; } rc = read( lastOffset + baseOffset - lastLen, lastLen, lastRecord ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to read dps record[%lld, rc:%d]", offSet, rc ) ; goto error ; } rc = lr.load( lastRecord ) ; if ( SDB_DPS_CORRUPTED_LOG == rc ) { /// the last record is corrupted. move to pre one. offSet -= lastLen ; rc = SDB_OK ; const dpsLogRecordHeader *corruptedHeader = ( const dpsLogRecordHeader * )lastRecord ; PD_LOG( PDEVENT, "last log record(lsn:%lld) is corrupted.", corruptedHeader->_lsn ) ; /// only one corrupted log in this file. if ( 0 == offSet ) { _logHeader._firstLSN.offset = DPS_INVALID_LSN_OFFSET ; _logHeader._firstLSN.version = DPS_INVALID_LSN_VERSION ; } } else if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to load record log:%d", rc ) ; goto error ; } } _idleSize = _fileSize - offSet ; done: _inRestore = FALSE ; SAFE_OSS_FREE( lastRecord ) ; PD_TRACE_EXITRC ( SDB__DPSLOGFILE__RESTRORE, rc ); return rc ; error: goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB_RTNINSERT2, "rtnInsert" ) INT32 rtnInsert ( const CHAR *pCollectionName, BSONObj &objs, INT32 objNum, INT32 flags, pmdEDUCB *cb, SDB_DMSCB *dmsCB, SDB_DPSCB *dpsCB, INT16 w ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB_RTNINSERT2 ) ; SDB_ASSERT ( pCollectionName, "collection name can't be NULL" ) ; SDB_ASSERT ( cb, "educb can't be NULL" ) ; SDB_ASSERT ( dmsCB, "dmsCB can't be NULL" ) ; dmsStorageUnit *su = NULL ; dmsStorageUnitID suID = DMS_INVALID_CS ; const CHAR *pCollectionShortName = NULL ; UINT32 insertCount = 0 ; BOOLEAN writable = FALSE ; ossValuePtr pDataPos = 0 ; rc = dmsCB->writable( cb ) ; if ( rc ) { PD_LOG ( PDERROR, "Database is not writable, rc = %d", rc ) ; goto error; } writable = TRUE; rc = rtnResolveCollectionNameAndLock ( pCollectionName, dmsCB, &su, &pCollectionShortName, suID ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to resolve collection name %s", pCollectionName ) ; goto error ; } if ( objs.isEmpty () ) { PD_LOG ( PDERROR, "Insert record can't be empty" ) ; rc = SDB_INVALIDARG ; goto error ; } pDataPos = (ossValuePtr)objs.objdata() ; for ( INT32 i = 0 ; i < objNum ; ++i ) { if ( ++insertCount > RTN_INSERT_ONCE_NUM ) { insertCount = 0 ; if ( cb->isInterrupted() ) { rc = SDB_APP_INTERRUPT ; goto error ; } } try { BSONObj record ( (const CHAR*)pDataPos ) ; rc = su->insertRecord ( pCollectionShortName, record, cb, dpsCB ) ; if ( rc ) { if ( ( SDB_IXM_DUP_KEY == rc ) && ( FLG_INSERT_CONTONDUP & flags ) ) { rc = SDB_OK ; } else { PD_LOG ( PDERROR, "Failed to insert record %s into " "collection: %s, rc: %d", record.toString().c_str(), pCollectionName, rc ) ; goto error ; } } pDataPos += ossAlignX ( (ossValuePtr)record.objsize(), 4 ) ; } catch ( std::exception &e ) { PD_LOG ( PDERROR, "Failed to convert to BSON and insert to " "collection: %s", e.what() ) ; rc = SDB_INVALIDARG ; goto error ; } } done : if ( DMS_INVALID_CS != suID ) { dmsCB->suUnlock ( suID ) ; } if ( writable ) { dmsCB->writeDown( cb ); } if ( cb ) { if ( SDB_OK == rc && dpsCB ) { rc = dpsCB->completeOpr( cb, w ) ; } } PD_TRACE_EXITRC ( SDB_RTNINSERT2, rc ) ; return rc ; error : goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__DMSROUNIT__INIT, "_dmsReorgUnit::_init" ) INT32 _dmsReorgUnit::_init ( BOOLEAN createNew ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__DMSROUNIT__INIT ); class _reorgUnitHead *unitHead = NULL ; INT32 bufSize = ossRoundUpToMultipleX ( sizeof ( class _reorgUnitHead ), DMS_REORG_UNIT_HEAD_SIZE_UNIT ) ; INT32 restSize = bufSize ; _headSize = bufSize ; CHAR *pBuffer = (CHAR*)SDB_OSS_MALLOC (bufSize) ; if ( !pBuffer ) { PD_LOG ( PDERROR, "Failed to allocate %d bytes of memory", bufSize ) ; rc = SDB_OOM ; goto error ; } unitHead = (class _reorgUnitHead*)pBuffer ; ossMemset ( unitHead, 0, bufSize ) ; if ( createNew ) { SINT64 writeSize = 0 ; _readOnly = FALSE ; ossMemcpy ( unitHead->_eyeCatcher, DMS_REORG_UNIT_EYECATCHER, DMS_REORG_UNIT_EYECATCHER_LEN ) ; unitHead->_headerSize = bufSize ; ossMemcpy ( unitHead->_fileName, _fileName, OSS_MAX_PATHSIZE ) ; unitHead->_pageSize = _pageSize ; while ( restSize != 0 ) { rc = ossWrite ( &_file, &pBuffer[bufSize-restSize], restSize, &writeSize ) ; if ( rc && SDB_INTERRUPT != rc ) { PD_LOG ( PDERROR, "Failed to write into file: %s, rc = %d", _fileName, rc ) ; goto error ; } restSize -= writeSize ; rc = SDB_OK ; } } else { SINT64 readSize = 0 ; _readOnly = TRUE ; while ( restSize > 0 ) { rc = ossRead ( &_file, &pBuffer[bufSize-restSize], restSize, &readSize ) ; if ( rc && SDB_INTERRUPT != rc ) { PD_LOG ( PDERROR, "Failed to read from file: %s, rc = %d", _fileName, rc ) ; goto error ; } restSize -= readSize ; rc = SDB_OK ; } if ( ossMemcmp ( unitHead->_eyeCatcher, DMS_REORG_UNIT_EYECATCHER, DMS_REORG_UNIT_EYECATCHER_LEN ) || unitHead->_headerSize != bufSize ) { PD_LOG ( PDERROR, "Invalid reorg file is detected" ) ; rc = SDB_DMS_INVALID_REORG_FILE ; goto error ; } } done : if ( pBuffer ) { SDB_OSS_FREE ( pBuffer ) ; } PD_TRACE_EXITRC ( SDB__DMSROUNIT__INIT, rc ); return rc ; error : goto done ; }
INT32 _ossModuleHandle::init () { INT32 rc = SDB_OK ; CHAR strPath [ 2*OSS_MAX_PATHSIZE + 1 ] = {0} ; CHAR strModule [ OSS_MAX_PATHSIZE + 1 ] = {0} ; OSS_MHANDLE handle = 0 ; CHAR *p = NULL ; #if defined (_WINDOWS) UINT32 errorMode ; #endif PD_TRACE_ENTRY ( SDB_OSSMODULEHANDLE_INIT ) ; if ( _moduleName[0] == '\0' ) { PD_LOG ( PDERROR, "Module name can't be empty" ) ; rc = SDB_INVALIDARG ; goto error ; } PD_TRACE3 ( SDB_OSSMODULEHANDLE_INIT, PD_PACK_STRING(_moduleName), PD_PACK_STRING(_libPath), PD_PACK_UINT(_flags) ) ; ossStrncpy ( strModule, _moduleName, sizeof(strModule) ) ; p = ossStrchr ( strModule, '(' ) ; if ( p ) { *p = '\0' ; } rc = patchModuleName( strModule, _moduleName, sizeof(_moduleName) ); PD_RC_CHECK ( rc, PDERROR, "Failed to patch module name, rc = %d", rc ) ; if ( _libPath[0] ) { INT32 pathLen = 0 ; rc = ossAccess ( _libPath, #if defined (_LINUX) F_OK #elif defined (_WINDOWS) 0 #endif ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to access path %s, rc = %d", _libPath, rc ) ; ossStrncat ( strPath, _libPath, sizeof(strPath) ) ; pathLen = ossStrlen ( strPath ) ; if ( strPath[pathLen-1] != OSS_FILE_SEP_CHAR ) { if ( pathLen >= OSS_MAX_PATHSIZE ) { PD_LOG ( PDERROR, "library path is too long: %s", _libPath ) ; rc = SDB_INVALIDARG ; goto error ; } strPath[pathLen-1] = OSS_FILE_SEP_CHAR ; strPath[pathLen] = '\0' ; } } if ( ossStrlen ( strPath ) + ossStrlen ( _moduleName ) >= sizeof(strPath) ) { PD_LOG ( PDERROR, "path + module name is too long: %s:%s", strPath, _moduleName ) ; rc = SDB_INVALIDARG ; goto error ; } ossStrncat ( strPath, _moduleName, OSS_MAX_PATHSIZE ) ; #if defined (_LINUX) handle = dlopen ( strPath, _flags | RTLD_NOW ) ; if ( !handle ) { PD_LOG ( PDERROR, "Failed to load module %s, error = %s", strPath, dlerror() ) ; rc = SDB_SYS ; goto error ; } _isInitialized = TRUE ; _moduleHandle = handle ; dlerror() ; #elif defined (_WINDOWS) errorMode = SetErrorMode ( SEM_NOOPENFILEERRORBOX | SEM_FAILCRITICALERRORS ) ; _moduleHandle = LoadLibrary ( (LPCTSTR)strPath ) ; SetErrorMode ( errorMode ) ; if ( NULL == _moduleHandle ) { rc = ossGetLastError () ; PD_LOG ( PDERROR, "Failed to load module %s, error = %d", _moduleName, rc ) ; OSSMODULEHANDLE_ERR(rc) ; goto error ; } #endif done : PD_TRACE_EXITRC ( SDB_OSSMODULEHANDLE_INIT, rc ) ; return rc ; error : _isInitialized = FALSE ; _moduleHandle = OSS_MINVALIDHANDLE ; goto done ; }
int pmdEDUMgr::_createNewEDU(EDU_TYPES type, void* arg, EDUID *eduid) { int rc = EDB_OK; unsigned int probe = 0; pmdEDUCB *cb = NULL; EDUID myEDUID = 0; if (isQuiesced()) { rc = EDB_QUIESCED; goto done; } if (!getEntryFuncByType(type)) { PD_LOG(PDERROR, "The edu[type:%d] not exist or function is null", type); rc = EDB_INVALIDARG; probe = 30; goto error; } cb = new(std::nothrow) pmdEDUCB(this, type); EDB_VALIDATE_GOTOERROR(cb, EDB_OOM, "Out of memery to create agent control block"); cb->setStatus(PMD_EDU_CREATING); _mutex.get(); if (_runQueue.end() != _runQueue.find(_EDUID)) { _mutex.release(); rc = EDB_SYS; probe = 10; goto error; } if (_idleQueue.end() != _idleQueue.find(_EDUID)) { _mutex.release(); rc = EDB_SYS; probe = 15; goto error; } cb->setId(_EDUID); if (eduid) { *eduid = _EDUID; } _runQueue[_EDUID] = (pmdEDUCB*) cb; myEDUID = _EDUID; ++_EDUID; _mutex.release(); try { boost::thread agentThread(pmdEDUEntryPoint, type, cb, arg); agentThread.detach(); } catch (std::exception e) { _runQueue.erase(myEDUID); rc = EDB_SYS; probe = 20; goto error; } cb->postEvent(pmdEDUEvent(PMD_EDU_EVENT_RESUME, false, arg)); done: return rc; error: if (cb) { delete cb; } PD_LOG(PDERROR, "Failed to create new agent, probe = %d", probe); goto done; }
INT32 omRemoveBusinessCommand::doCommand() { INT32 rc = SDB_OK ; INT64 taskID = 0 ; BSONObj buzInfo ; BSONObj taskConfig ; BSONArray resultInfo ; omArgOptions option( _restAdaptor, _restSession ) ; omRestTool restTool( _restAdaptor, _restSession ) ; _setFileLanguageSep() ; pmdGetThreadEDUCB()->resetInfo( EDU_INFO_ERROR ) ; rc = option.parseRestArg( "s", OM_REST_FIELD_BUSINESS_NAME, &_businessName ) ; if ( rc ) { _errorMsg.setError( TRUE, option.getErrorMsg() ) ; PD_LOG( PDERROR, "failed to parse rest arg: rc=%d", rc ) ; goto error ; } rc = _check( buzInfo ) ; if ( rc ) { PD_LOG( PDERROR, "failed to check: rc=%d", rc ) ; goto error ; } rc = _generateRequest( buzInfo, taskConfig, resultInfo ) ; if ( rc ) { PD_LOG( PDERROR, "failed to generate task request: rc=%d", rc ) ; goto error ; } rc = _createTask( taskConfig, resultInfo, taskID ) ; if ( rc ) { PD_LOG( PDERROR, "failed to create task: rc=%d", rc ) ; goto error ; } { BSONObj result = BSON( OM_BSON_TASKID << taskID ) ; rc = restTool.appendResponeContent( result ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to append respone content: rc=%d", rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } } restTool.sendOkRespone() ; done: return rc ; error: restTool.sendRespone( rc, _errorMsg.getError() ) ; goto done ; }
INT32 migMaster::initialize ( setParameters *pParameters ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__MIGLOADJSONPS__INITIALIZE ); _utilParserParamet parserPara ; UINT32 startOffset = 0 ; UINT32 fieldsSize = 0 ; _pParameters = pParameters ; _sock = _pParameters->clientSock ; _fileType = _pParameters->fileType ; _blockSize = _pParameters->bufferSize/_pParameters->bucketNum ; _exitSignal = FALSE ; if ( MIG_PARSER_JSON == _pParameters->fileType ) { _parser = SDB_OSS_NEW _utilJSONParser() ; } else if ( MIG_PARSER_CSV == _pParameters->fileType ) { _parser = SDB_OSS_NEW _utilCSVParser() ; } else { rc = SDB_MIG_UNKNOW_FILE_TYPE ; PD_LOG ( PDERROR, "Unknow file type" ) ; goto error ; } PD_CHECK ( _parser, SDB_OOM, error, PDERROR, "Failed to new _parser" ) ; _ppBucket = (_bucket**)SDB_OSS_MALLOC ( sizeof(_bucket*) * _pParameters->bucketNum ) ; PD_CHECK ( _ppBucket, SDB_OOM, error, PDERROR, "Failed to allocate bucket pointer array" ) ; ossMemset ( _ppBucket, 0, sizeof(_bucket*) * _pParameters->bucketNum ) ; for ( UINT32 i = 0; i < _pParameters->bucketNum; ++i ) { _ppBucket[i] = SDB_OSS_NEW _bucket() ; PD_CHECK ( _ppBucket[i], SDB_OOM, error, PDERROR, "Failed to allocate bucket pointer array" ) ; } _parser->setDel ( _pParameters->delCFR[0], _pParameters->delCFR[1], _pParameters->delCFR[2] ) ; parserPara.fileName = _pParameters->pFileName ; parserPara.bufferSize = _pParameters->bufferSize ; parserPara.blockNum = _pParameters->bucketNum ; rc = _parser->initialize( &parserPara ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to init utilParseJSONs, rc = %d", rc ) ; if ( SDB_IO == rc || SDB_FNE == rc ) { sendMsgToClient ( "Failed to open file %s, rc=%d", _pParameters->pFileName, rc ) ; } goto error ; } _buffer = _parser->getBuffer() ; _delChar = _pParameters->delCFR[0] ; _delField = _pParameters->delCFR[1] ; _delRecord = _pParameters->delCFR[2] ; _autoAddField = TRUE ; _autoCompletion = FALSE ; _isHeaderline = _pParameters->headerline ; if ( _isHeaderline ) { rc = _parser->getNextRecord ( startOffset, fieldsSize ) ; if ( rc ) { if ( rc == SDB_EOF ) { if ( 0 == fieldsSize ) { goto done ; } } else { PD_LOG ( PDERROR, "Failed to _parser getNextRecord, rc=%d", rc ) ; goto error ; } } } if ( _pParameters->pFieldArray ) { _pFields = _pParameters->pFieldArray ; _fieldsSize = ossStrlen( _pFields ) ; } else { _pFields = _buffer + startOffset ; _fieldsSize = fieldsSize ; } done: PD_TRACE_EXITRC ( SDB__MIGLOADJSONPS__INITIALIZE, rc ); return rc ; error: goto done ; }
INT32 omRemoveBusinessCommand::_check( BSONObj &buzInfo ) { INT32 rc = SDB_OK ; INT64 taskID = -1 ; omDatabaseTool dbTool( _cb ) ; if ( FALSE == dbTool.isBusinessExist( _businessName ) ) { rc = SDB_INVALIDARG ; _errorMsg.setError( TRUE, "business does not exist: name=%s", _businessName.c_str() ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } if ( TRUE == dbTool.isRelationshipExistByBusiness( _businessName ) ) { rc = SDB_INVALIDARG ; _errorMsg.setError( TRUE, "business has relationship: name=%s", _businessName.c_str() ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } taskID = dbTool.getTaskIdOfRunningBuz( _businessName ) ; if( 0 <= taskID ) { rc = SDB_INVALIDARG ; _errorMsg.setError( TRUE, "business[%s] is exist " "in task["OSS_LL_PRINT_FORMAT"]", _businessName.c_str(), taskID ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } rc = dbTool.getOneBusinessInfo( _businessName, buzInfo ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get business info: rc=%d", rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } if ( TRUE == _isDiscoveredBusiness( buzInfo ) ) { rc = SDB_INVALIDARG ; _errorMsg.setError( TRUE, "discovered business could not be removed: " "business=%s", _businessName.c_str() ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } _clusterName = buzInfo.getStringField( OM_BUSINESS_FIELD_CLUSTERNAME ) ; _businessType = buzInfo.getStringField( OM_BUSINESS_FIELD_TYPE ) ; _deployMod = buzInfo.getStringField( OM_BUSINESS_FIELD_DEPLOYMOD ) ; if ( OM_BUSINESS_SEQUOIADB != _businessType && OM_BUSINESS_ZOOKEEPER != _businessType && OM_BUSINESS_SEQUOIASQL_OLAP != _businessType && OM_BUSINESS_SEQUOIASQL_OLTP != _businessType ) { rc = SDB_INVALIDARG ; _errorMsg.setError( TRUE, "Unsupported business type: type=%s", _businessType.c_str() ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } done: return rc ; error: goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__MIGLOADJSONPS__RUN, "migMaster::run" ) INT32 migMaster::run() { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__MIGLOADJSONPS__RUN ); UINT32 startOffset = 0 ; UINT32 size = 0 ; UINT32 startBlock = 0 ; UINT32 endBlock = 0 ; pmdKRCB *krcb = pmdGetKRCB () ; pmdEDUMgr *eduMgr = krcb->getEDUMgr () ; SDB_DMSCB *dmsCB = krcb->getDMSCB () ; pmdEDUCB *eduCB = eduMgr->getEDU() ; EDUID agentEDU = PMD_INVALID_EDUID ; BOOLEAN writable = FALSE ; BOOLEAN noClearFlag = FALSE ; dmsMBContext *mbContext = NULL ; UINT32 line = 0 ; UINT32 column = 0 ; UINT32 success = 0 ; UINT32 failure = 0 ; UINT16 clFlag = 0 ; dmsStorageUnitID suID = DMS_INVALID_CS ; dmsStorageUnit *su = NULL ; initWorker dataWorker ; pmdEDUEvent event ; dmsStorageLoadOp dmsLoadExtent ; sendMsgToClient ( "Load start" ) ; rc = rtnCollectionSpaceLock ( _pParameters->pCollectionSpaceName, dmsCB, FALSE, &su, suID ) ; if ( rc ) { if ( SDB_DMS_CS_NOTEXIST == rc ) { sendMsgToClient ( "Error: collection space not exist" ) ; } PD_LOG ( PDERROR, "Failed to lock collection space, rc=%d", rc ) ; goto error ; } dmsLoadExtent.init ( su ) ; rc = su->data()->getMBContext( &mbContext, _pParameters->pCollectionName, EXCLUSIVE ) ; if ( rc ) { if ( SDB_DMS_NOTEXIST == rc ) { sendMsgToClient ( "Error: collection not exist" ) ; } PD_LOG ( PDERROR, "Failed to lock collection, rc=%d", rc ) ; goto error ; } clFlag = mbContext->mb()->_flag ; if ( DMS_IS_MB_DROPPED( clFlag ) ) { PD_LOG( PDERROR, "Collection is droped" ) ; rc = SDB_COLLECTION_LOAD ; sendMsgToClient ( "Collection is droped" ) ; goto error ; } else if ( DMS_IS_MB_LOAD ( clFlag ) ) { PD_LOG( PDERROR, "Collection is loading" ) ; rc = SDB_COLLECTION_LOAD ; sendMsgToClient ( "Collection is loading" ) ; // we set noClearFlag to true, so that we'll convert the collection // flag to NORMAL in done noClearFlag = TRUE ; goto error ; } dmsLoadExtent.setFlagLoad ( mbContext->mb() ) ; dmsLoadExtent.setFlagLoadLoad ( mbContext->mb() ) ; // unlock mbContext->mbUnlock() ; rc = dmsCB->writable( eduCB ) ; if ( rc ) { PD_LOG ( PDERROR, "Database is not writable, rc = %d", rc ) ; goto error; } writable = TRUE; dataWorker.pMaster = this ; dataWorker.masterEDUID = eduCB->getID() ; dataWorker.pSu = su ; dataWorker.clLID = mbContext->clLID() ; dataWorker.collectionID = mbContext->mbID() ; for ( UINT32 i = 0; i < _pParameters->workerNum; ++i ) { eduMgr->startEDU ( EDU_TYPE_LOADWORKER, &dataWorker, &agentEDU ) ; } while ( TRUE ) { rc = _checkErrAndRollback ( eduCB, &dmsLoadExtent, mbContext, success, failure ) ; if ( SDB_TIMEOUT != rc && rc ) { PD_LOG ( PDERROR, "Failed to call _checkErrAndRollback, rc=%d", rc ) ; goto error ; } // fetch one record rc = _parser->getNextRecord ( startOffset, size, &line, &column, _ppBucket ) ; if ( rc ) { // special handle for end of file if ( rc == SDB_EOF ) { // when we hit end of file, let's push 0 to all worker threads, // with num of workers ( so each worker will dispatch one 0, and // exit ) rc = _stopAndWaitWorker ( eduCB, success, failure ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to call _stopAndWaitWorker, rc=%d", rc ) ; break ; } sendMsgToClient ( "Error: Parse Json error in line: %u," " column: %u", line, column ) ; PD_LOG ( PDERROR, "Failed to parseJSONs getNextRecord,rc=%d", rc ) ; goto error1 ; } // calculate the blocks to be locked, based on the length of our record rc = getBlockFromPointer ( startOffset, size, startBlock, endBlock ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to get block from pointer, rc=%d", rc ) ; goto error1 ; } // lock them for ( UINT32 i = startBlock; i <= endBlock; ++i ) { _ppBucket[i]->inc() ; } // push the record to queue pushToQueue ( startOffset, size, line, column ) ; } // while ( !eduCB->isForced() ) // when all workers are finish, let's start build phase to rebuild all // indexes sendMsgToClient ( "build index" ) ; rc = dmsLoadExtent.loadBuildPhase ( mbContext, eduCB, _pParameters->isAsynchronous, this, &success, &failure ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to load data, rc=%d", rc ) ; goto error ; } done: // we only lock and clear flag if we switched to load if ( su && mbContext && !noClearFlag ) { rc = mbContext->mbLock( EXCLUSIVE ) ; // we should log failure information if ( SDB_OK == rc ) { if ( dmsLoadExtent.isFlagLoadLoad ( mbContext->mb() ) ) { dmsLoadExtent.clearFlagLoadLoad ( mbContext->mb() ) ; } if ( dmsLoadExtent.isFlagLoadBuild ( mbContext->mb() ) ) { dmsLoadExtent.clearFlagLoadBuild ( mbContext->mb() ) ; } if ( dmsLoadExtent.isFlagLoad ( mbContext->mb() ) ) { dmsLoadExtent.clearFlagLoad ( mbContext->mb() ) ; } } else { PD_LOG ( PDERROR, "Failed to lock collection, rc=%d", rc ) ; } } // send the success message to client sendMsgToClient ( "success json: %u, failure json: %u", success, failure ) ; sendMsgToClient ( "Load end" ) ; if ( su && mbContext ) { su->data()->releaseMBContext( mbContext ) ; } if ( DMS_INVALID_CS != suID ) { dmsCB->suUnlock ( suID ) ; } // count down if ( writable ) { dmsCB->writeDown( eduCB ); } PD_TRACE_EXITRC ( SDB__MIGLOADJSONPS__RUN, rc ); return rc ; error: goto done ; error1: _stopAndWaitWorker ( eduCB, success, failure ) ; sendMsgToClient ( "Error: rollback all data" ) ; failure += success ; success = 0 ; rc = dmsLoadExtent.loadRollbackPhase ( mbContext ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to rollback, rc=%d", rc ) ; sendMsgToClient ( "Error: Failed to rollback, rc = %d", rc ) ; goto error ; } goto done ; }
INT32 omRemoveBusinessCommand::_generateTaskConfig( list<BSONObj> &configList, BSONObj &taskConfig ) { INT32 rc = SDB_OK ; BSONObj filter ; BSONObjBuilder taskConfigBuilder ; BSONArrayBuilder configBuilder ; list<BSONObj>::iterator iter ; omDatabaseTool dbTool( _cb ) ; filter = BSON( OM_HOST_FIELD_NAME << "" << OM_HOST_FIELD_IP << "" << OM_HOST_FIELD_CLUSTERNAME << "" << OM_HOST_FIELD_USER << "" << OM_HOST_FIELD_PASSWD << "" << OM_HOST_FIELD_SSHPORT << "" ) ; taskConfigBuilder.append( OM_BSON_CLUSTER_NAME, _clusterName ) ; taskConfigBuilder.append( OM_BSON_BUSINESS_TYPE, _businessType ) ; taskConfigBuilder.append( OM_BSON_BUSINESS_NAME, _businessName ) ; taskConfigBuilder.append( OM_BSON_DEPLOY_MOD, _deployMod ) ; if ( OM_BUSINESS_SEQUOIADB == _businessType ) { string authUser ; string authPasswd ; rc = dbTool.getAuth( _businessName, authUser, authPasswd ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get business auth: " "name=%s, rc=%d", _businessName.c_str(), rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } taskConfigBuilder.append( OM_TASKINFO_FIELD_AUTH_USER, authUser ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_AUTH_PASSWD, authPasswd ) ; } else if ( OM_BUSINESS_ZOOKEEPER == _businessType || OM_BUSINESS_SEQUOIASQL_OLAP == _businessType ) { string sdbUser ; string sdbPasswd ; string sdbUserGroup ; BSONObj clusterInfo ; rc = dbTool.getClusterInfo( _clusterName, clusterInfo ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get cluster info: " "name=%s, rc=%d", _clusterName.c_str(), rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } sdbUser = clusterInfo.getStringField( OM_CLUSTER_FIELD_SDBUSER ) ; sdbPasswd = clusterInfo.getStringField( OM_CLUSTER_FIELD_SDBPASSWD ) ; sdbUserGroup = clusterInfo.getStringField( OM_CLUSTER_FIELD_SDBUSERGROUP ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_SDBUSER, sdbUser ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_SDBPASSWD, sdbPasswd ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_SDBUSERGROUP, sdbUserGroup ) ; } else if( OM_BUSINESS_SEQUOIASQL_OLTP == _businessType ) { } for ( iter = configList.begin(); iter != configList.end(); ++iter ) { string hostName ; string installPath ; BSONObj hostInfo ; BSONObj tmpHostInfo ; BSONObj configInfo ; BSONObj packages ; hostName = iter->getStringField( OM_CONFIGURE_FIELD_HOSTNAME ) ; configInfo = iter->getObjectField( OM_CONFIGURE_FIELD_CONFIG ) ; rc = dbTool.getHostInfoByAddress( hostName, tmpHostInfo ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get host info: name=%s, rc=%d", hostName.c_str(), rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } hostInfo = tmpHostInfo.filterFieldsUndotted( filter, TRUE ) ; packages = tmpHostInfo.getObjectField( OM_HOST_FIELD_PACKAGES ) ; { BSONObjIterator pkgIter( packages ) ; while ( pkgIter.more() ) { BSONElement ele = pkgIter.next() ; BSONObj pkgInfo = ele.embeddedObject() ; string pkgName = pkgInfo.getStringField( OM_HOST_FIELD_PACKAGENAME ) ; if ( pkgName == _businessType ) { installPath = pkgInfo.getStringField( OM_HOST_FIELD_INSTALLPATH ) ; break ; } } } { BSONObjIterator configIter( configInfo ) ; while ( configIter.more() ) { BSONObjBuilder configInfoBuilder ; BSONElement ele = configIter.next() ; BSONObj nodeInfo = ele.embeddedObject() ; if ( OM_BUSINESS_SEQUOIADB == _businessType && 0 == ossStrlen( nodeInfo.getStringField( OM_CONF_DETAIL_CATANAME ) ) ) { CHAR catName[ OM_INT32_LENGTH + 1 ] = { 0 } ; string svcName = nodeInfo.getStringField( OM_CONF_DETAIL_SVCNAME ) ; INT32 iSvcName = ossAtoi( svcName.c_str() ) ; INT32 iCatName = iSvcName + MSG_ROUTE_CAT_SERVICE ; ossItoa( iCatName, catName, OM_INT32_LENGTH ) ; configInfoBuilder.append( OM_CONF_DETAIL_CATANAME, catName ) ; } configInfoBuilder.appendElements( nodeInfo ) ; configInfoBuilder.appendElements( hostInfo ) ; configInfoBuilder.append( OM_BSON_INSTALL_PATH, installPath ) ; configBuilder.append( configInfoBuilder.obj() ) ; } } } taskConfigBuilder.append( OM_TASKINFO_FIELD_CONFIG, configBuilder.arr() ) ; taskConfig = taskConfigBuilder.obj() ; done: return rc ; error: goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__MIGWORKER__IMPORT, "migWorker::importData" ) INT32 migWorker::importData ( EDUID masterEDUID, dmsStorageUnit *su, UINT16 collectionID, UINT32 clLID, BOOLEAN isAsynchr ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__MIGWORKER__IMPORT ); pmdKRCB *krcb = pmdGetKRCB () ; pmdEDUMgr *eduMgr = krcb->getEDUMgr () ; pmdEDUCB *eduCB = eduMgr->getEDU() ; BOOLEAN isLast = FALSE ; BOOLEAN isFirst = TRUE ; BSONObj record ; dmsStorageLoadOp dmsLoadExtent ; _workerReturn *workRe = NULL ; _dmsMBContext *mbContext = NULL ; SDB_ASSERT ( su, "su is NULL" ) ; dmsLoadExtent.init ( su ) ; workRe = SDB_OSS_NEW _workerReturn() ; if ( !workRe ) { rc = SDB_OOM ; PD_LOG ( PDERROR, "memory error" ) ; goto error ; } rc = su->data()->getMBContext( &mbContext, collectionID, clLID, -1 ) ; if ( rc ) { PD_LOG( PDERROR, "Failed to get dms mb context, rc: %d", rc ) ; goto error ; } workRe->success = 0 ; workRe->failure = 0 ; workRe->rc = 0 ; rc = _csvParser.init( _master->_autoAddField, _master->_autoCompletion, _master->_isHeaderline, _master->_delChar, _master->_delField, _master->_delRecord ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to csv parser initialize, rc=%d", rc ) ; goto error ; } rc = _csvParser.parseHeader( _master->_pFields, _master->_fieldsSize ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to parse csv header, rc=%d", rc ) ; goto error ; } while ( !_master->_exitSignal && !eduCB->isInterrupted() && !eduCB->isDisconnected() ) { rc = _getBsonFromQueue ( eduCB, record ) ; if ( rc ) { if ( SDB_MIG_END_OF_QUEUE == rc ) { isLast = TRUE ; rc = SDB_OK ; if ( isFirst ) { goto done ; } } else if ( SDB_INVALIDARG == rc ) { ++workRe->failure ; continue ; } else { PD_LOG ( PDERROR, "Failed to get bson from queue, rc=%d", rc ) ; goto error ; } } rc = dmsLoadExtent.pushToTempDataBlock( mbContext, record, isLast, isAsynchr ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to import to block, rc=%d", rc ) ; goto error ; } if ( isLast ) { goto done ; } ++workRe->success ; isFirst = FALSE ; } goto error ; done: if ( su && mbContext ) { su->data()->releaseMBContext( mbContext ) ; } workRe->rc = rc ; rc = eduMgr->postEDUPost ( masterEDUID, PMD_EDU_EVENT_MSG, PMD_EDU_MEM_ALLOC, workRe ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to postEDUPost, rc=%d", rc ) ; } PD_TRACE_EXITRC ( SDB__MIGWORKER__IMPORT, rc ); return rc ; error: _master->_exitSignal = TRUE ; if ( workRe ) { workRe->failure += workRe->success ; workRe->success = 0 ; } goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB_DMSUNCOMPRESS, "dmsUncompress" ) INT32 dmsUncompress ( _pmdEDUCB *cb, _dmsCompressorEntry *compressorEntry, const CHAR *pInputData, INT32 inputSize, const CHAR **ppData, INT32 *pDataSize ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY( SDB_DMSUNCOMPRESS ) ; CHAR *pBuff = NULL ; UINT32 uncompressedLen = 0 ; SDB_ASSERT ( pInputData && ppData && pDataSize, "Data pointer and size pointer can't be NULL" ) ; SDB_ASSERT( compressorEntry, "Compressor entry pointer can't be NULL" ) ; _utilCompressor *compressor = compressorEntry->getCompressor() ; SDB_ASSERT( compressor, "Compressor pointer can't be NULL" ) ; if ( !compressor ) { compressor = getCompressorByType( UTIL_COMPRESSOR_SNAPPY ) ; } if ( !compressor ) { PD_LOG( PDERROR, "Occur serious error: " "The compressor handle is NULL" ) ; rc = SDB_SYS ; goto error ; } rc = compressor->getUncompressedLen( pInputData, inputSize, uncompressedLen ) ; PD_RC_CHECK( rc, PDERROR, "Failed to get uncompressed length, rc: %d", rc ) ; pBuff = cb->getUncompressBuff( uncompressedLen ) ; if ( !pBuff ) { PD_LOG( PDERROR, "Failed to allocate decompression buff, size: %d", uncompressedLen ) ; rc = SDB_OOM ; goto error ; } rc = compressor->decompress( pInputData, inputSize, pBuff, uncompressedLen, compressorEntry->getDictionary() ) ; PD_RC_CHECK( rc, PDERROR, "Failed to decompress data, rc: %d", rc ) ; if ( ppData ) { *ppData = pBuff ; } if ( pDataSize ) { *pDataSize = uncompressedLen ; } done : PD_TRACE_EXITRC( SDB_DMSUNCOMPRESS, rc ) ; return rc ; error : goto done ; }
INT32 _coordDeleteOperator::_prepareMainCLOp( coordCataSel &cataSel, coordSendMsgIn &inMsg, coordSendOptions &options, pmdEDUCB *cb, coordProcessResult &result ) { INT32 rc = SDB_OK ; MsgOpDelete *pDelMsg = ( MsgOpDelete* )inMsg.msg() ; INT32 flag = 0 ; CHAR *pCollectionName = NULL; CHAR *pDeletor = NULL; CHAR *pHint = NULL; CoordGroupSubCLMap &grpSubCl = cataSel.getGroup2SubsMap() ; CoordGroupSubCLMap::iterator it ; inMsg.data()->clear() ; rc = msgExtractDelete( (CHAR*)inMsg.msg(), &flag, &pCollectionName, &pDeletor, &pHint ) ; PD_RC_CHECK( rc, PDERROR, "Failed to parse delete request, rc: %d", rc ) ; try { BSONObj boDeletor( pDeletor ) ; BSONObj boHint( pHint ) ; BSONObj boNew ; CHAR *pBuff = NULL ; UINT32 buffLen = 0 ; UINT32 buffPos = 0 ; it = grpSubCl.begin() ; while( it != grpSubCl.end() ) { CoordSubCLlist &subCLLst = it->second ; netIOVec &iovec = inMsg._datas[ it->first ] ; netIOV ioItem ; ioItem.iovBase = (const CHAR*)inMsg.msg() + sizeof( MsgHeader ) ; ioItem.iovLen = ossRoundUpToMultipleX ( offsetof(MsgOpDelete, name) + pDelMsg->nameLength + 1, 4 ) - sizeof( MsgHeader ) ; iovec.push_back( ioItem ) ; boNew = _buildNewDeletor( boDeletor, subCLLst ) ; UINT32 roundLen = ossRoundUpToMultipleX( boNew.objsize(), 4 ) ; if ( buffPos + roundLen > buffLen ) { UINT32 alignLen = ossRoundUpToMultipleX( roundLen, DMS_PAGE_SIZE4K ) ; rc = cb->allocBuff( alignLen, &pBuff, &buffLen ) ; PD_RC_CHECK( rc, PDERROR, "Alloc buff[%u] failed, rc: %d", alignLen, rc ) ; _vecBlock.push_back( pBuff ) ; buffPos = 0 ; } ossMemcpy( &pBuff[ buffPos ], boNew.objdata(), boNew.objsize() ) ; ioItem.iovBase = &pBuff[ buffPos ] ; ioItem.iovLen = roundLen ; buffPos += roundLen ; iovec.push_back( ioItem ) ; ioItem.iovBase = boHint.objdata() ; ioItem.iovLen = boHint.objsize() ; iovec.push_back( ioItem ) ; ++it ; } } catch( std::exception &e ) { PD_LOG( PDERROR, "Parse delete message occur exception: %s", e.what() ) ; rc = SDB_SYS ; goto error ; } done: return rc ; error: _clearBlock( cb ) ; goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB_DMSCOMPRESS2, "dmsCompress" ) INT32 dmsCompress ( _pmdEDUCB *cb, _dmsCompressorEntry *compressorEntry, const CHAR *pInputData, INT32 inputSize, const CHAR **ppData, INT32 *pDataSize, UINT8 &ratio ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY( SDB_DMSCOMPRESS2 ) ; CHAR *pBuff = NULL ; UINT32 compressedLen = 0 ; SDB_ASSERT ( pInputData && ppData && pDataSize, "Data pointer and size pointer can't be NULL" ) ; SDB_ASSERT( compressorEntry, "Compressor entry pointer can't be NULL" ) ; _utilCompressor *compressor = compressorEntry->getCompressor() ; const utilDictHandle dictionary = compressorEntry->getDictionary() ; SDB_ASSERT( compressor, "Compressor pointer can't be NULL" ) ; if ( !compressor ) { PD_LOG( PDERROR, "Occur serious error: " "The compressor handle is NULL" ) ; rc = SDB_SYS ; goto error ; } rc = compressor->compressBound( inputSize, compressedLen, dictionary ) ; PD_RC_CHECK( rc, PDERROR, "Failed to get max compressed length, rc: %d", rc ) ; pBuff = cb->getCompressBuff( compressedLen ) ; if ( !pBuff ) { PD_LOG( PDERROR, "Failed to alloc compress buff, size: %d", compressedLen ) ; rc = SDB_OOM ; goto error ; } rc = compressor->compress( pInputData, inputSize, pBuff, compressedLen, dictionary ) ; if ( rc ) { PD_LOG( ( ( SDB_UTIL_COMPRESS_ABORT == rc ) ? PDINFO : PDERROR ), "Failed to compress data, rc: %d", rc ) ; goto error ; } if ( ppData ) { *ppData = pBuff ; } if ( pDataSize ) { *pDataSize = (INT32)compressedLen ; } ratio = (UINT8)( (*pDataSize) * 100 / inputSize ) ; done : PD_TRACE_EXITRC( SDB_DMSCOMPRESS2, rc ) ; return rc ; error : goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__MTHSELECTOR__BUILDCSV, "_mthSelector::_buildCSV" ) INT32 _mthSelector::_buildCSV( const bson::BSONObj &obj, bson::BSONObj &csv ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY( SDB__MTHSELECTOR__BUILDCSV ) ; BOOLEAN result = FALSE ; INT32 stringLength = 0 ; // in the first round, let's allocate memory if ( 0 == _stringOutputBufferSize ) { rc = mthDoubleBufferSize ( &_stringOutputBuffer, _stringOutputBufferSize ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to append string, rc = %d", rc ) ; } _stringOutputBuffer[FIRST_ELEMENT_STARTING_POS-6] = String ; _stringOutputBuffer[FIRST_ELEMENT_STARTING_POS-5] = '\0' ; while ( _stringOutputBufferSize < MAX_SELECTOR_BUFFER_THRESHOLD ) { result = rawbson2csv ( obj.objdata(), &_stringOutputBuffer[FIRST_ELEMENT_STARTING_POS], _stringOutputBufferSize-FIRST_ELEMENT_STARTING_POS ) ; if ( result ) { break ; } else { rc = mthDoubleBufferSize ( &_stringOutputBuffer, _stringOutputBufferSize ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to double buffer, rc = %d", rc ) ; } } if ( _stringOutputBufferSize >= MAX_SELECTOR_BUFFER_THRESHOLD ) { PD_LOG ( PDERROR, "string output buffer size is greater than threshold" ) ; rc = SDB_INVALIDARG ; goto error ; } stringLength = ossStrlen ( &_stringOutputBuffer[FIRST_ELEMENT_STARTING_POS] ) ; // assign object length, 1 for 0 at the end, 1 for the eoo *(INT32*)_stringOutputBuffer = FIRST_ELEMENT_STARTING_POS + 2 + stringLength ; _stringOutputBuffer[ *(INT32*)_stringOutputBuffer -1 ] = EOO ; // assign string length, 1 for 0 at the end *(INT32*)(&_stringOutputBuffer[FIRST_ELEMENT_STARTING_POS-4]) = stringLength + 1 ; // it should not cause memory leak even if there's previous owned // buffer because _stringOutputBuffer is owned by context, and we don't // touch holder in BSONObj, so smart pointer should still holding the // original buffer it owns csv.init ( _stringOutputBuffer ) ; done: PD_TRACE_EXITRC( SDB__MTHSELECTOR__BUILDCSV, rc ) ; return rc ; error: goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__DMSROUNIT_INSRCD, "_dmsReorgUnit::insertRecord" ) INT32 _dmsReorgUnit::insertRecord ( BSONObj &obj, _pmdEDUCB *cb, UINT32 attributes ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__DMSROUNIT_INSRCD ); UINT32 dmsrecordSize = 0 ; ossValuePtr recordPtr = 0 ; ossValuePtr prevPtr = 0 ; dmsOffset offset = DMS_INVALID_OFFSET ; dmsOffset recordOffset = DMS_INVALID_OFFSET ; dmsExtent *currentExtent = (dmsExtent*)_pCurrentExtent ; BOOLEAN isCompressed = FALSE ; const CHAR *compressedData = NULL ; INT32 compressedDataSize = 0 ; if ( obj.objsize() + DMS_RECORD_METADATA_SZ > DMS_RECORD_MAX_SZ ) { rc = SDB_CORRUPTED_RECORD ; goto error ; } if ( OSS_BIT_TEST ( attributes, DMS_MB_ATTR_COMPRESSED ) ) { rc = dmsCompress ( cb, obj, NULL, 0, &compressedData, &compressedDataSize ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to compress record, rc = %d: %s", rc, obj.toString().c_str() ) ; dmsrecordSize = compressedDataSize + sizeof(INT32) ; if ( dmsrecordSize > (UINT32)(obj.objsize()) ) { dmsrecordSize = obj.objsize() ; } else { isCompressed = TRUE ; } } else { dmsrecordSize = obj.objsize() ; } dmsrecordSize += DMS_RECORD_METADATA_SZ ; dmsrecordSize *= DMS_RECORD_OVERFLOW_RATIO ; dmsrecordSize = OSS_MIN(DMS_RECORD_MAX_SZ, ossAlignX(dmsrecordSize,4)) ; alloc: if ( !_pCurrentExtent ) { rc = _allocateExtent ( dmsrecordSize << DMS_RECORDS_PER_EXTENT_SQUARE ) ; if ( rc ) { PD_LOG ( PDERROR, "Failed to allocate new extent in reorg file, " "rc = %d", rc ) ; goto error ; } currentExtent = (dmsExtent*)_pCurrentExtent ; } if ( dmsrecordSize > (UINT32)currentExtent->_freeSpace ) { rc = _flushExtent () ; if ( rc ) { PD_LOG ( PDERROR, "Failed to flush extent, rc = %d", rc ) ; goto error ; } goto alloc ; } recordOffset = _currentExtentSize - currentExtent->_freeSpace ; recordPtr = ((ossValuePtr)currentExtent) + recordOffset ; if ( currentExtent->_freeSpace - (INT32)dmsrecordSize < (INT32)DMS_MIN_RECORD_SZ && currentExtent->_freeSpace <= (INT32)DMS_RECORD_MAX_SZ ) { dmsrecordSize = (UINT32)currentExtent->_freeSpace ; } DMS_RECORD_SETSTATE ( recordPtr, DMS_RECORD_FLAG_NORMAL ) ; DMS_RECORD_RESETATTR ( recordPtr ) ; DMS_RECORD_SETMYOFFSET ( recordPtr, recordOffset ) ; DMS_RECORD_SETSIZE ( recordPtr, dmsrecordSize ) ; if ( isCompressed ) { DMS_RECORD_SETATTR ( recordPtr, DMS_RECORD_FLAG_COMPRESSED ) ; DMS_RECORD_SETDATA ( recordPtr, compressedData, compressedDataSize ) ; } else { DMS_RECORD_SETDATA ( recordPtr, obj.objdata(), obj.objsize() ) ; } DMS_RECORD_SETNEXTOFFSET ( recordPtr, DMS_INVALID_OFFSET ) ; DMS_RECORD_SETPREVOFFSET ( recordPtr, DMS_INVALID_OFFSET ) ; currentExtent->_recCount ++ ; currentExtent->_freeSpace -= dmsrecordSize ; offset = currentExtent->_lastRecordOffset ; if ( DMS_INVALID_OFFSET != offset ) { prevPtr = ((ossValuePtr)currentExtent) + offset ; DMS_RECORD_SETNEXTOFFSET ( prevPtr, recordOffset ) ; DMS_RECORD_SETPREVOFFSET ( recordPtr, offset ) ; } currentExtent->_lastRecordOffset = recordOffset ; offset = currentExtent->_firstRecordOffset ; if ( DMS_INVALID_OFFSET == offset ) { currentExtent->_firstRecordOffset = recordOffset ; } done : PD_TRACE_EXITRC ( SDB__DMSROUNIT_INSRCD, rc ); return rc ; error : goto done ; }
// PD_TRACE_DECLARE_FUNCTION ( SDB_RTNTRAVERSALQUERY, "rtnTraversalQuery" ) INT32 rtnTraversalQuery ( const CHAR *pCollectionName, const BSONObj &key, const CHAR *pIndexName, INT32 dir, pmdEDUCB *cb, SDB_DMSCB *dmsCB, SDB_RTNCB *rtnCB, SINT64 &contextID, rtnContextData **ppContext, BOOLEAN enablePrefetch ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB_RTNTRAVERSALQUERY ) ; SDB_ASSERT ( pCollectionName, "collection name can't be NULL" ) ; SDB_ASSERT ( pIndexName, "index name can't be NULL" ) ; SDB_ASSERT ( cb, "cb can't be NULL" ) ; SDB_ASSERT ( dmsCB, "dmsCB can't be NULL" ) ; SDB_ASSERT ( rtnCB, "rtnCB can't be NULL" ) ; SDB_ASSERT ( dir == 1 || dir == -1, "dir must be 1 or -1" ) ; dmsStorageUnitID suID = DMS_INVALID_CS ; dmsStorageUnit *su = NULL ; rtnContextData *context = NULL ; const CHAR *pCollectionShortName = NULL ; optAccessPlan *plan = NULL ; dmsMBContext *mbContext = NULL ; rtnPredicateList *predList = NULL ; rtnIXScanner *scanner = NULL ; BSONObj hint ; BSONObj dummy ; rc = rtnResolveCollectionNameAndLock ( pCollectionName, dmsCB, &su, &pCollectionShortName, suID ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to resolve collection name %s", pCollectionName ) ; rc = su->data()->getMBContext( &mbContext, pCollectionShortName, -1 ) ; PD_RC_CHECK( rc, PDERROR, "Failed to get dms mb context, rc: %d", rc ) ; rc = rtnCB->contextNew ( RTN_CONTEXT_DATA, (rtnContext**)&context, contextID, cb ) ; PD_RC_CHECK ( rc, PDERROR, "Failed to create new context, %d", rc ) ; SDB_ASSERT ( context, "context can't be NULL" ) ; try { hint = BSON( "" << pIndexName ) ; } catch ( std::exception &e ) { PD_RC_CHECK ( SDB_SYS, PDERROR, "Failed to construct hint object: %s", e.what() ) ; } plan = SDB_OSS_NEW optAccessPlan( su, pCollectionShortName, dummy, dummy, hint ) ; if ( !plan ) { rc = SDB_OOM ; goto error ; } rc = plan->optimize() ; PD_RC_CHECK( rc, PDERROR, "Plan optimize failed, rc: %d", rc ) ; PD_CHECK ( plan->getScanType() == IXSCAN && !plan->isAutoGen(), SDB_INVALIDARG, error, PDERROR, "Unable to generate access plan by index %s", pIndexName ) ; rc = mbContext->mbLock( SHARED ) ; PD_RC_CHECK( rc, PDERROR, "dms mb context lock failed, rc: %d", rc ) ; { dmsRecordID rid ; if ( -1 == dir ) { rid.resetMax() ; } else { rid.resetMin () ; } ixmIndexCB indexCB ( plan->getIndexCBExtent(), su->index(), NULL ) ; PD_CHECK ( indexCB.isInitialized(), SDB_SYS, error, PDERROR, "unable to get proper index control block" ) ; if ( indexCB.getLogicalID() != plan->getIndexLID() ) { PD_LOG( PDERROR, "Index[extent id: %d] logical id[%d] is not " "expected[%d]", plan->getIndexCBExtent(), indexCB.getLogicalID(), plan->getIndexLID() ) ; rc = SDB_IXM_NOTEXIST ; goto error ; } predList = plan->getPredList() ; SDB_ASSERT ( predList, "predList can't be NULL" ) ; predList->setDirection ( dir ) ; scanner = SDB_OSS_NEW rtnIXScanner ( &indexCB, predList, su, cb ) ; PD_CHECK ( scanner, SDB_OOM, error, PDERROR, "Unable to allocate memory for scanner" ) ; rc = scanner->relocateRID ( key, rid ) ; PD_CHECK ( SDB_OK == rc, rc, error, PDERROR, "Failed to relocate key to the specified location: %s, " "rc = %d", key.toString().c_str(), rc ) ; } mbContext->mbUnlock() ; rc = context->openTraversal( su, mbContext, plan, scanner, cb, dummy, -1, 0 ) ; PD_RC_CHECK( rc, PDERROR, "Open context traversal faield, rc: %d", rc ) ; mbContext = NULL ; plan = NULL ; suID = DMS_INVALID_CS ; scanner = NULL ; su = NULL ; if ( cb->getMonConfigCB()->timestampON ) { context->getMonCB()->recordStartTimestamp() ; } if ( ppContext ) { *ppContext = context ; } if ( enablePrefetch ) { context->enablePrefetch ( cb ) ; } done : PD_TRACE_EXITRC ( SDB_RTNTRAVERSALQUERY, rc ) ; return rc ; error : if ( su && mbContext ) { su->data()->releaseMBContext( mbContext ) ; } if ( plan ) { plan->release() ; } if ( scanner ) { SDB_OSS_DEL scanner ; } if ( DMS_INVALID_CS != suID ) { dmsCB->suUnlock( suID ) ; } if ( -1 != contextID ) { rtnCB->contextDelete ( contextID, cb ) ; contextID = -1 ; } goto done ; }
INT32 _omaAddHostSubTask::doit() { INT32 rc = SDB_OK ; INT32 tmpRc = SDB_OK ; _pTask->setSubTaskStatus( _taskName, OMA_TASK_STATUS_RUNNING ) ; while( TRUE ) { AddHostInfo *pInfo = NULL ; AddHostResultInfo resultInfo = { "", "", OMA_TASK_STATUS_INIT, OMA_TASK_STATUS_DESC_INIT, SDB_OK, "" } ; CHAR flow[OMA_BUFF_SIZE + 1] = { 0 } ; const CHAR *pDetail = NULL ; const CHAR *pIP = NULL ; const CHAR *pHostName = NULL ; INT32 errNum = 0 ; BSONObj retObj ; pInfo = _pTask->getAddHostItem() ; if ( NULL == pInfo ) { PD_LOG( PDEVENT, "No hosts need to add now, sub task[%s] exits", _taskName.c_str() ) ; goto done ; } pIP = pInfo->_item._ip.c_str() ; pHostName = pInfo->_item._hostName.c_str() ; resultInfo._ip = pIP ; resultInfo._hostName = pHostName ; ossSnprintf( flow, OMA_BUFF_SIZE, "Adding host[%s]", pIP ) ; resultInfo._status = OMA_TASK_STATUS_RUNNING ; resultInfo._statusDesc = getTaskStatusDesc( OMA_TASK_STATUS_RUNNING ) ; resultInfo._errno = SDB_OK ; resultInfo._detail = "" ; resultInfo._flow.push_back( flow ) ; tmpRc = _pTask->updateProgressToTask( pInfo->_serialNum, resultInfo ) ; if ( tmpRc ) { PD_LOG( PDWARNING, "Failed to update add host[%s]'s progress, " "rc = %d", pIP, tmpRc ) ; } _omaAddHost runCmd( *pInfo ) ; rc = runCmd.init( NULL ) ; if ( rc ) { PD_LOG( PDERROR, "Failed to init for adding " "host[%s], rc = %d", pIP, rc ) ; pDetail = pmdGetThreadEDUCB()->getInfo( EDU_INFO_ERROR ) ; if ( NULL == pDetail || 0 == *pDetail ) pDetail = "Failed to init for adding host " ; ossSnprintf( flow, OMA_BUFF_SIZE, "Failed to add host[%s]", pIP ) ; resultInfo._status = OMA_TASK_STATUS_FINISH ; resultInfo._statusDesc = getTaskStatusDesc( OMA_TASK_STATUS_FINISH ) ; resultInfo._errno = rc ; resultInfo._detail = pDetail ; resultInfo._flow.push_back( flow ) ; rc = _pTask->updateProgressToTask( pInfo->_serialNum, resultInfo ) ; if ( rc ) { PD_LOG( PDWARNING, "Failed to update add host[%s]'s progress, " "rc = %d", pIP, rc ) ; } continue ; } rc = runCmd.doit( retObj ) ; if ( rc ) { PD_LOG( PDERROR, "Failed to do adding host[%s], rc = %d", pIP, rc ) ; tmpRc = omaGetStringElement ( retObj, OMA_FIELD_DETAIL, &pDetail ) ; if ( SDB_OK != tmpRc ) { pDetail = pmdGetThreadEDUCB()->getInfo( EDU_INFO_ERROR ) ; if ( NULL == pDetail || 0 == *pDetail ) pDetail = "Not exeute js file yet" ; } ossSnprintf( flow, OMA_BUFF_SIZE, "Failed to add host[%s]", pIP ) ; resultInfo._status = OMA_TASK_STATUS_FINISH ; resultInfo._statusDesc = getTaskStatusDesc( OMA_TASK_STATUS_FINISH ) ; resultInfo._errno = rc ; resultInfo._detail = pDetail ; resultInfo._flow.push_back( flow ) ; tmpRc = _pTask->updateProgressToTask( pInfo->_serialNum, resultInfo ) ; if ( tmpRc ) { PD_LOG( PDWARNING, "Failed to update add host[%s]'s progress, " "rc = %d", pIP, tmpRc ) ; } continue ; } rc = omaGetIntElement ( retObj, OMA_FIELD_ERRNO, errNum ) ; if ( rc ) { PD_LOG( PDERROR, "Failed to get errno from js after " "adding host[%s], rc = %d", pIP, rc ) ; pDetail = pmdGetThreadEDUCB()->getInfo( EDU_INFO_ERROR ) ; if ( NULL == pDetail || 0 == *pDetail ) pDetail = "Failed to get errno from js after adding host" ; ossSnprintf( flow, OMA_BUFF_SIZE, "Failed to add host[%s]", pIP ) ; resultInfo._status = OMA_TASK_STATUS_FINISH ; resultInfo._statusDesc = getTaskStatusDesc( OMA_TASK_STATUS_FINISH ) ; resultInfo._errno = rc ; resultInfo._detail = pDetail ; resultInfo._flow.push_back( flow ) ; tmpRc =_pTask->updateProgressToTask( pInfo->_serialNum, resultInfo ) ; if ( tmpRc ) { PD_LOG( PDWARNING, "Failed to update add host[%s]'s progress, " "rc = %d", pIP, tmpRc ) ; } continue ; } if ( SDB_OK != errNum ) { rc = omaGetStringElement ( retObj, OMA_FIELD_DETAIL, &pDetail ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "Failed to get error detail from js after " "adding host[%s], rc = %d", pIP, rc ) ; pDetail = pmdGetThreadEDUCB()->getInfo( EDU_INFO_ERROR ) ; if ( NULL == pDetail || 0 == *pDetail ) pDetail = "Failed to get error detail from js after adding host" ; } ossSnprintf( flow, OMA_BUFF_SIZE, "Failed to add host[%s]", pIP ) ; resultInfo._status = OMA_TASK_STATUS_FINISH ; resultInfo._statusDesc = getTaskStatusDesc( OMA_TASK_STATUS_FINISH ) ; resultInfo._errno = errNum ; resultInfo._detail = pDetail ; resultInfo._flow.push_back( flow ) ; tmpRc = _pTask->updateProgressToTask( pInfo->_serialNum, resultInfo ) ; if ( tmpRc ) { PD_LOG( PDWARNING, "Failed to update add host[%s]'s progress, " "rc = %d", pIP, tmpRc ) ; } continue ; } else { ossSnprintf( flow, OMA_BUFF_SIZE, "Finish adding host[%s]", pIP ) ; PD_LOG ( PDEVENT, "Success to add host[%s]", pIP ) ; resultInfo._status = OMA_TASK_STATUS_FINISH ; resultInfo._statusDesc = getTaskStatusDesc( OMA_TASK_STATUS_FINISH ) ; resultInfo._flow.push_back( flow ) ; tmpRc = _pTask->updateProgressToTask( pInfo->_serialNum, resultInfo ) ; if ( tmpRc ) { PD_LOG( PDWARNING, "Failed to update add host[%s]'s progress, " "rc = %d", pIP, tmpRc ) ; } } } done: _pTask->setSubTaskStatus( _taskName, OMA_TASK_STATUS_FINISH ) ; _pTask->notifyUpdateProgress() ; return SDB_OK ; }