int TaskManager::loadWorker(std::string workerFileName, OSFileSys* fsys) { int result = 0; int errorCount = 0; std::auto_ptr<EParser> aTest(new EParser(m_ostr)); //EParser* aTest = new EParser(m_ostr); aTest->setFilename(workerFileName); int res = aTest->extractInclude(workerFileName, TaskConfig::getDeployHome()); if(res != 0){ result = res; errorCount = res; } if(aTest->getInclude() != 0){ //preprocessing include EList<std::string> includedFiles = aTest->getInclude()->getIncludeFiles(); if(includedFiles.moveHead()){ do{ std::string fName = includedFiles.getCurObject(); //parsing LOG_TRACE(m_log, "Reading the file, '%s'...", fName.c_str()); aTest->setFilename(fName); errorCount = aTest->parseScript(TaskConfig::getDeployHome() + fName); if(errorCount>0){ result = errorCount; ///////////////////////////////////에러코드정리 LOG_ERROR(m_log, "Parser can not open the file '%s'.", fName.c_str()); break; } } while(includedFiles.moveNext()); } } if(errorCount == 0){ GparsedResult* pRslt = aTest->getParsingResult(); EList<Genum*>* enumlist = pRslt->getEnums(); EList<Gmodel*>* modellist = pRslt->getModels(); EList<Gaction*>* actionlist = pRslt->getActions(); // EList<Gworker*>* workerlist = pRslt->getWorkers(); //예전 방법 task 정의 EList<Gtask*>* tasklist = pRslt->getTasks(); //최근 방법 task 정의 EList<Gbehavior*>* bhvlist = pRslt->getbehaviors(); EList<Gconnector*>* connlist = pRslt->getconnectors(); SymTAB4Task* newSymT = new SymTAB4Task(); std::auto_ptr<RTObjBuilder> rtmBuilder(new RTObjBuilder(m_ostr, newSymT)); //RTObjBuilder *rtmBuilder = new RTObjBuilder(m_ostr, newSymT); EList<RFSMtask*>* rtasks = NULL; EList<RFSMbehavior*>* rbhvs = NULL; EList<RFSMconnector*>* rconns = NULL; try{ //model, action, enum 검사 LOG_TRACE(m_log, "Checking Model composed of Symbol, Function, Action and Enum ..."); rtmBuilder->makeSymTable(pRslt); //TASK 명세 검사 LOG_TRACE(m_log, "Checking Tasks ..."); rtasks = rtmBuilder->checkSemantics(tasklist); rbhvs = rtmBuilder->checkSemantics(bhvlist); rconns = rtmBuilder->checkSemantics(connlist); //Model 파일수가 여러개 이더라도 1개의 심볼테이블이 만들어진다. //파일이 나누어져도 하나의 파일로 간주 //SymTAB4Task* newSymT = rtmBuilder->getSymT(); /* if(workerlist !=NULL && workerlist->getSize()>0 && workerlist->moveHead()){ do{ Gworker* worker = workerlist->getCurObject(); std::string workerName = worker->getName()->getData(); //Task 들을 등록 result = m_taskMem->addTask(workerName, worker->getStartBhv()->getData()); //심볼테이블 등록 if(result ==0){ newSymT->pluRefCount(); result = m_taskMem->addSymbolT(workerName, newSymT); } else result = 100; ///////////////////////////////////에러코드정리 } while(workerlist->moveNext()); } */ if(rtasks !=NULL && rtasks->getSize()>0 && rtasks->moveHead()){ do{ RFSMtask* task = rtasks->getCurObject(); std::string taskName = task->getName(); //Task 들을 등록 result = m_taskMem->addTask2(taskName, task); //여기까지 했다 다음부터 구현 할 것 //심볼테이블 등록 if(result ==0){ newSymT->pluRefCount(); result = m_taskMem->addSymbolT(taskName, newSymT); } else result = 100; ///////////////////////////////////에러코드정리 } while(rtasks->moveNext()); } // else // delete newSymT; //task가 없으면 나중에 지울 수가 없기 때문에... //Behavior 등록 if(result ==0){ if(rbhvs != NULL && rbhvs->getSize()>0 && rbhvs->moveHead()){ do{ RFSMbehavior* bhv = rbhvs->getCurObject(); //아래 순서 중요.. 모니터링에서 behavior테이블로 중복을 검사하기 때문에 //1. 모니터링 정보 등록 m_taskMem->addMonitorInfo(bhv); //2. behavior등록 result = m_taskMem->addBehavior(bhv); } while(rbhvs->moveNext()); } if(rconns != NULL && rconns->getSize()>0 && rconns->moveHead()){ do{ RFSMconnector* rconn= rconns->getCurObject(); //아래 순서 중요.. 모니터링에서 behavior테이블로 중복을 검사하기 때문에 //1. 모니터링 정보 등록 //m_taskMem->addMonitorInfo(rconn); //2. behavior등록 result = m_taskMem->addConnector(rconn); } while(rconns->moveNext()); } } else result = 100; ///////////////////////////////////에러코드정리 /* //bhv에 관련된 worker이름을 저장 if(result ==0){ if(workerlist !=NULL && workerlist->getSize()>0 && workerlist->moveHead()){ do{ Gworker* worker = workerlist->getCurObject(); std::string workerName = worker->getName()->getData(); std::string startBhvName = worker->getStartBhv()->getData(); RFSMbehavior* t = m_taskMem->findBehavior(startBhvName); if(t == NULL){//task의 시작 behavior가 존재하지 않는 경우 LOG_ERROR(m_log, "The root behavior, <%s()>, cannot be found.", startBhvName.c_str()); return 100;////////////////////////에러코드정리 } else{ t->addRefTask(workerName); EList<std::string> tlist; setWorkerName(t, workerName, tlist); //각각의 worker에 대해서 dot파일 생성 std::string dot = getDot(workerName, t, tlist); std::ofstream dotfile; std::string dotfilename = workerName; dotfilename.append(".dot"); dotfile.open(dotfilename.c_str()); dotfile << dot.c_str() << "\n"; dotfile.flush(); dotfile.close(); LOG_TRACE(m_log, "%s.dot file, which represents graphical view of a worker, is created.", workerName.c_str()); } } while(workerlist->moveNext()); } } */ //bhv에 관련된 worker이름을 저장 if(result ==0){ if(rtasks !=NULL && rtasks->getSize()>0 && rtasks->moveHead()){ do{ RFSMtask* task = rtasks->getCurObject(); std::string taskName = task->getName(); //task에 포함된 모든 behavior와 conexer의 이름 std::vector<RFSMstmt*>* runBlock = task->getRunBlock(); std::vector<std::string> startList; task->extractStartNodes(runBlock, startList); //runblock에 포함된 모든 behavior나 conexer에 대해서 std::vector<std::string>::iterator it; for(it=startList.begin(); it<startList.end(); it++){ std::string startNodeName = *it; RFSMtreenode* startNode = m_taskMem->findBehavior(startNodeName); if(startNode == NULL){//task의 시작 behavior가 존재하지 않는 경우 startNode = m_taskMem->findConnector(startNodeName); if(startNode == NULL){ LOG_ERROR(m_log, "The root node(behavior or conexer), <%s()>, cannot be found.", startNodeName.c_str()); result = 100;////////////////////////에러코드정리 break; } } startNode->addRefTask(taskName); EList<std::string> tlist; // for dot setWorkerName(startNode, taskName, tlist); tlist.clearAllchild(); //이걸해야 tlist의 new EListElement<T>(obj); 으로 만들어진 부분이 삭제된다. } //각각의 worker에 대해서 dot파일 생성 // 아래 부분은 task노드를 root로 하는 구조로 바뀌어야 한다. /* std::string dot = getDot(taskName, t, tlist); std::ofstream dotfile; std::string dotfilename = taskName; dotfilename.append(".dot"); dotfile.open(dotfilename.c_str()); dotfile << dot.c_str() << "\n"; dotfile.flush(); dotfile.close(); LOG_TRACE(m_log, "%s.dot file, which represents graphical view of a worker, is created.", taskName.c_str()); */ } while(result == 0 && rtasks->moveNext()); } } if(rtasks != 0 && rtasks->getSize()>0 && rtasks->moveHead()){ do{ RFSMtreenode* rtask = rtasks->getCurObject(); setSubNodes(rtask); }while(rtasks->moveNext()); } if(rbhvs != 0 && rbhvs->getSize()>0 && rbhvs->moveHead()){ do{ RFSMtreenode* rbhv = rbhvs->getCurObject(); setSubNodes(rbhv); }while(rbhvs->moveNext()); } if(rconns != 0 && rconns->getSize()>0 && rconns->moveHead()){ do{ RFSMtreenode* rconn = rconns->getCurObject(); setSubNodes(rconn); }while(rconns->moveNext()); } if(rconns != 0 && rconns->getSize()>0 && rconns->moveHead()){ do{ RFSMconnector* rconn = (RFSMconnector*)rconns->getCurObject(); std::map<int, int> synchInfo = checkValidSynchIDs(rconn); rconn->setSynchInfo(synchInfo); //rconn->checkValidSynchIDs(); }while(rconns->moveNext()); } if(rtasks != 0 && rtasks->getSize()>0 && rtasks->moveHead()){ do{ std::set<int> ids; RFSMtask* rtask = (RFSMtask*)(rtasks->getCurObject()); rtask->setSynchidInTask(); }while(rtasks->moveNext()); } /* 삭제(껍데기만 내용은 TaskMem에 저장) */ if(rtasks != NULL){ while(rtasks->getSize()>0 && rtasks->moveHead()){ //RFSMtask* obj = rtasks->getCurObject(); //delete obj; rtasks->delHead(); } delete rtasks; rtasks = NULL; } if(rbhvs != NULL){ while(rbhvs->getSize()>0 && rbhvs->moveHead()){ //RFSMbehavior* obj = rbhvs->getCurObject(); //delete obj; rbhvs->delHead(); } delete rbhvs; rbhvs = NULL; } if(rconns != NULL){ while(rconns->getSize()>0 && rconns->moveHead()){ //RFSMconnector* obj = rconns->getCurObject(); //delete obj; rconns->delHead(); } delete rconns; rconns = NULL; } /* //시나리오 파일 처리 if(pRslt->getSCs().size()>0){ std::string inclstr; if(aTest->getInclude() != 0){ Ginclude* inclList = aTest->getInclude(); if(inclList->moveHead()){ do{ Gtoken* tok = inclList->getCurObject(); std::string fname = tok->getFileName(); std::string incl = tok->getData(); if(fname==workerFileName) inclstr.append("#include \"").append(incl).append("\"\n"); } while(inclList->moveNext()); } } TPLGenerator tplGen(fsys, workerFileName, inclstr, pRslt->getSTasks(), pRslt->getSCs(), pRslt->getSBehaviors(), pRslt->getSEvents()); tplGen.generateTPL(); } */ } catch(SemanticsEH& semEH){ LOG_ERROR(m_log, "The parser returned %d errors, Task loading aborted.", semEH.hasError_()); delete newSymT; newSymT = NULL; delete rtasks; rtasks = NULL; delete rbhvs; rbhvs = NULL; delete rconns; rconns = NULL; result = 100;/////////////////////////////////////////////////////에러코드 정리 } } //aTest 에서 pRslt가 지워진다. //delete aTest; return result; }
template < typename IN_PORT_TYPE, typename OUT_PORT_TYPE > int add_const_vii_base::_transformerServiceFunction( typename std::vector< gr_istream< IN_PORT_TYPE > > &istreams , typename std::vector< gr_ostream< OUT_PORT_TYPE > > &ostreams ) { typedef typename std::vector< gr_istream< IN_PORT_TYPE > > _IStreamList; typedef typename std::vector< gr_ostream< OUT_PORT_TYPE > > _OStreamList; boost::mutex::scoped_lock lock(serviceThreadLock); if ( validGRBlock() == false ) { // create our processing block, and setup property notifiers createBlock(); LOG_DEBUG( add_const_vii_base, " FINISHED BUILDING GNU RADIO BLOCK"); } //process any Stream ID changes this could affect number of io streams processStreamIdChanges(); if ( !validGRBlock() || istreams.size() == 0 || ostreams.size() == 0 ) { LOG_WARN( add_const_vii_base, "NO STREAMS ATTACHED TO BLOCK..." ); return NOOP; } _input_ready.resize( istreams.size() ); _ninput_items_required.resize( istreams.size() ); _ninput_items.resize( istreams.size() ); _input_items.resize( istreams.size() ); _output_items.resize( ostreams.size() ); // // RESOLVE: need to look at forecast strategy, // 1) see how many read items are necessary for N number of outputs // 2) read input data and see how much output we can produce // // // Grab available data from input streams // typename _OStreamList::iterator ostream; typename _IStreamList::iterator istream = istreams.begin(); int nitems=0; for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) { // note this a blocking read that can cause deadlocks nitems = istream->read(); if ( istream->overrun() ) { LOG_WARN( add_const_vii_base, " NOT KEEPING UP WITH STREAM ID:" << istream->streamID ); } if ( istream->sriChanged() ) { // RESOLVE - need to look at how SRI changes can affect Gnu Radio BLOCK state LOG_DEBUG( add_const_vii_base, "SRI CHANGED, STREAMD IDX/ID: " << idx << "/" << istream->pkt->streamID ); setOutputStreamSRI( idx, istream->pkt->SRI ); } } LOG_TRACE( add_const_vii_base, "READ NITEMS: " << nitems ); if ( nitems <= 0 && !_istreams[0].eos() ) { return NOOP; } bool eos = false; int nout = 0; bool workDone = false; while ( nout > -1 && serviceThread->threadRunning() ) { eos = false; nout = _forecastAndProcess( eos, istreams, ostreams ); if ( nout > -1 ) { workDone = true; // we chunked on data so move read pointer.. istream = istreams.begin(); for ( ; istream != istreams.end(); istream++ ) { int idx=std::distance( istreams.begin(), istream ); // if we processed data for this stream if ( _input_ready[idx] ) { size_t nitems = 0; try { nitems = gr_sptr->nitems_read( idx ); } catch(...){} if ( nitems > istream->nitems() ) { LOG_WARN( add_const_vii_base, "WORK CONSUMED MORE DATA THAN AVAILABLE, READ/AVAILABLE " << nitems << "/" << istream->nitems() ); nitems = istream->nitems(); } istream->consume( nitems ); LOG_TRACE( add_const_vii_base, " CONSUME READ DATA ITEMS/REMAIN " << nitems << "/" << istream->nitems()); } } gr_sptr->reset_read_index(); } // check for not enough data return if ( nout == -1 ) { // check for end of stream istream = istreams.begin(); for ( ; istream != istreams.end() ; istream++) { if ( istream->eos() ) { eos=true; } } if ( eos ) { LOG_TRACE( add_const_vii_base, "EOS SEEN, SENDING DOWNSTREAM " ); _forecastAndProcess( eos, istreams, ostreams); } } } if ( eos ) { istream = istreams.begin(); for ( ; istream != istreams.end() ; istream++ ) { int idx=std::distance( istreams.begin(), istream ); LOG_DEBUG( add_const_vii_base, " CLOSING INPUT STREAM IDX:" << idx ); istream->close(); } // close remaining output streams ostream = ostreams.begin(); for ( ; eos && ostream != ostreams.end(); ostream++ ) { int idx=std::distance( ostreams.begin(), ostream ); LOG_DEBUG( add_const_vii_base, " CLOSING OUTPUT STREAM IDX:" << idx ); ostream->close(); } } // // set the read pointers of the GNU Radio Block to start at the beginning of the // supplied buffers // gr_sptr->reset_read_index(); LOG_TRACE( add_const_vii_base, " END OF TRANSFORM SERVICE FUNCTION....." << noutput_items ); if ( nout == -1 && eos == false && !workDone ) { return NOOP; } else { return NORMAL; } }
/**************************************************************************** ** ** ** Name: esm_send_activate_default_eps_bearer_context_request() ** ** ** ** Description: Builds Activate Default EPS Bearer Context Request ** ** message ** ** ** ** The activate default EPS bearer context request message ** ** is sent by the network to the UE to request activation of ** ** a default EPS bearer context. ** ** ** ** Inputs: pti: Procedure transaction identity ** ** ebi: EPS bearer identity ** ** qos: Subscribed EPS quality of service ** ** apn: Access Point Name in used ** ** pdn_addr: PDN IPv4 address and/or IPv6 suffix ** ** esm_cause: ESM cause code ** ** Others: None ** ** ** ** Outputs: msg: The ESM message to be sent ** ** Return: RETURNok, RETURNerror ** ** Others: None ** ** ** ***************************************************************************/ int esm_send_activate_default_eps_bearer_context_request ( int pti, int ebi, activate_default_eps_bearer_context_request_msg * msg, const OctetString * apn, const ProtocolConfigurationOptions * pco, int pdn_type, const OctetString * pdn_addr, const EpsQualityOfService * qos, int esm_cause) { LOG_FUNC_IN; /* * Mandatory - ESM message header */ msg->protocoldiscriminator = EPS_SESSION_MANAGEMENT_MESSAGE; msg->epsbeareridentity = ebi; msg->messagetype = ACTIVATE_DEFAULT_EPS_BEARER_CONTEXT_REQUEST; msg->proceduretransactionidentity = pti; /* * Mandatory - EPS QoS */ msg->epsqos = *qos; LOG_TRACE (INFO, "ESM-SAP - epsqos qci: %u", qos->qci); if (qos->bitRatesPresent) { LOG_TRACE (INFO, "ESM-SAP - epsqos maxBitRateForUL: %u", qos->bitRates.maxBitRateForUL); LOG_TRACE (INFO, "ESM-SAP - epsqos maxBitRateForDL: %u", qos->bitRates.maxBitRateForDL); LOG_TRACE (INFO, "ESM-SAP - epsqos guarBitRateForUL: %u", qos->bitRates.guarBitRateForUL); LOG_TRACE (INFO, "ESM-SAP - epsqos guarBitRateForDL: %u", qos->bitRates.guarBitRateForDL); } else { LOG_TRACE (INFO, "ESM-SAP - epsqos no bit rates defined"); } if (qos->bitRatesExtPresent) { LOG_TRACE (INFO, "ESM-SAP - epsqos maxBitRateForUL Ext: %u", qos->bitRatesExt.maxBitRateForUL); LOG_TRACE (INFO, "ESM-SAP - epsqos maxBitRateForDL Ext: %u", qos->bitRatesExt.maxBitRateForDL); LOG_TRACE (INFO, "ESM-SAP - epsqos guarBitRateForUL Ext: %u", qos->bitRatesExt.guarBitRateForUL); LOG_TRACE (INFO, "ESM-SAP - epsqos guarBitRateForDL Ext: %u", qos->bitRatesExt.guarBitRateForDL); } else { LOG_TRACE (INFO, "ESM-SAP - epsqos no bit rates ext defined"); } if ((apn == NULL) || ((apn != NULL) && (apn->value == NULL))) { LOG_TRACE (WARNING, "ESM-SAP - apn is NULL!"); } LOG_TRACE (INFO, "ESM-SAP - apn is %s", apn->value); /* * Mandatory - Access Point Name */ msg->accesspointname.accesspointnamevalue = *apn; /* * Mandatory - PDN address */ LOG_TRACE (INFO, "ESM-SAP - pdn_type is %u", pdn_type); msg->pdnaddress.pdntypevalue = pdn_type; LOG_TRACE (INFO, "ESM-SAP - pdn_addr is %u", dump_octet_string (pdn_addr)); msg->pdnaddress.pdnaddressinformation = *pdn_addr; /* * Optional - ESM cause code */ msg->presencemask = 0; if (esm_cause != ESM_CAUSE_SUCCESS) { msg->presencemask |= ACTIVATE_DEFAULT_EPS_BEARER_CONTEXT_REQUEST_ESM_CAUSE_PRESENT; msg->esmcause = esm_cause; } if (pco != NULL) { msg->presencemask |= ACTIVATE_DEFAULT_EPS_BEARER_CONTEXT_REQUEST_PROTOCOL_CONFIGURATION_OPTIONS_PRESENT; msg->protocolconfigurationoptions = *pco; } #warning "TEST LG FORCE APN-AMBR" LOG_TRACE (INFO, "ESM-SAP - FORCE APN-AMBR"); msg->presencemask |= ACTIVATE_DEFAULT_EPS_BEARER_CONTEXT_REQUEST_APNAMBR_PRESENT; msg->apnambr.apnambrfordownlink = 0xfe; // (8640kbps) msg->apnambr.apnambrforuplink = 0xfe; // (8640kbps) msg->apnambr.apnambrfordownlink_extended = 0xde; // (200Mbps) msg->apnambr.apnambrforuplink_extended = 0x9e; // (100Mbps) msg->apnambr.apnambrfordownlink_extended2 = 0; msg->apnambr.apnambrforuplink_extended2 = 0; msg->apnambr.extensions = 0 | APN_AGGREGATE_MAXIMUM_BIT_RATE_MAXIMUM_EXTENSION_PRESENT; LOG_TRACE (INFO, "ESM-SAP - Send Activate Default EPS Bearer Context " "Request message (pti=%d, ebi=%d)", msg->proceduretransactionidentity, msg->epsbeareridentity); LOG_FUNC_RETURN (RETURNok); }
bool HybridScanExecutor::ExecPrimaryIndexLookup() { PL_ASSERT(index_done_ == false); const planner::HybridScanPlan &node = GetPlanNode<planner::HybridScanPlan>(); bool acquire_owner = GetPlanNode<planner::AbstractScan>().IsForUpdate(); auto key_column_ids_ = node.GetKeyColumnIds(); auto expr_type_ = node.GetExprTypes(); std::vector<ItemPointer *> tuple_location_ptrs; PL_ASSERT(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY); if (0 == key_column_ids_.size()) { LOG_TRACE("Scan all keys"); index_->ScanAllKeys(tuple_location_ptrs); } else { LOG_TRACE("Scan"); index_->Scan(values_, key_column_ids_, expr_type_, SCAN_DIRECTION_TYPE_FORWARD, tuple_location_ptrs, &node.GetIndexPredicate().GetConjunctionList()[0]); } LOG_TRACE("Result tuple count: %lu", tuple_location_ptrs.size()); auto &transaction_manager = concurrency::TransactionManagerFactory::GetInstance(); auto current_txn = executor_context_->GetTransaction(); if (tuple_location_ptrs.size() == 0) { index_done_ = true; return false; } std::map<oid_t, std::vector<oid_t>> visible_tuples; // for every tuple that is found in the index. for (auto tuple_location_ptr : tuple_location_ptrs) { ItemPointer tuple_location = *tuple_location_ptr; if (type_ == HYBRID_SCAN_TYPE_HYBRID && tuple_location.block >= (block_threshold)) { item_pointers_.insert(tuple_location); } auto &manager = catalog::Manager::GetInstance(); auto tile_group = manager.GetTileGroup(tuple_location.block); auto tile_group_header = tile_group.get()->GetHeader(); // perform transaction read size_t chain_length = 0; while (true) { ++chain_length; auto visibility = transaction_manager.IsVisible(current_txn, tile_group_header, tuple_location.offset); if (visibility == VISIBILITY_OK) { visible_tuples[tuple_location.block].push_back(tuple_location.offset); auto res = transaction_manager.PerformRead(current_txn, tuple_location, acquire_owner); if (!res) { transaction_manager.SetTransactionResult(current_txn, RESULT_FAILURE); return res; } break; } else { ItemPointer old_item = tuple_location; cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset); tuple_location = tile_group_header->GetNextItemPointer(old_item.offset); // there must exist a visible version. assert(tuple_location.IsNull() == false); cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid(); // check whether older version is garbage. if (old_end_cid < max_committed_cid) { assert(tile_group_header->GetTransactionId(old_item.offset) == INITIAL_TXN_ID || tile_group_header->GetTransactionId(old_item.offset) == INVALID_TXN_ID); if (tile_group_header->SetAtomicTransactionId( old_item.offset, INVALID_TXN_ID) == true) { // atomically swap item pointer held in the index bucket. AtomicUpdateItemPointer(tuple_location_ptr, tuple_location); } } tile_group = manager.GetTileGroup(tuple_location.block); tile_group_header = tile_group.get()->GetHeader(); } } } // Construct a logical tile for each block for (auto tuples : visible_tuples) { auto &manager = catalog::Manager::GetInstance(); auto tile_group = manager.GetTileGroup(tuples.first); std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile()); // Add relevant columns to logical tile logical_tile->AddColumns(tile_group, full_column_ids_); logical_tile->AddPositionList(std::move(tuples.second)); if (column_ids_.size() != 0) { logical_tile->ProjectColumns(full_column_ids_, column_ids_); } result_.push_back(logical_tile.release()); } index_done_ = true; LOG_TRACE("Result tiles : %lu", result_.size()); return true; }
useragentObj::response useragentObj::do_request(const fd *terminate_fd, requestimpl &req, request_sans_body &impl) { bool initial=true; method_t meth=req.getMethod(); uriimpl redirected_uri; for (size_t i=0; ; ++i) { auto response=initial ? do_request_redirected(terminate_fd, req, impl) : ({ requestimpl next(GET, redirected_uri); request_sans_body no_body; meth=GET; do_request_redirected(terminate_fd, next, no_body); }); initial=false; if (i >= maxredirects.get()) return response; // Too many redirections LOG_DEBUG("Processing redirect " + to_string(i+1)); // Check for redirects that we should handle // See RFC 2616. switch (response->message.getStatusCode()) { case 301: case 302: case 307: if (meth != HEAD && meth != GET) return response; break; case 303: break; default: return response; } // If there's no location header, we cannot redirect auto location=response->message.equal_range("Location"); if (location.first == location.second) return response; try { redirected_uri=response->uri + uriimpl(std::string(location.first-> second.begin(), location.first-> second.end())); } catch (...) { // Probably a bad URI return response; } LOG_TRACE("Processing redirect: " + std::string(response->begin(), response->end())); response->discardbody(); }
void CommRemote::runThread() { fd_set sockets; int maxSocket = serverSocket + 1; struct timeval timeout; timeout.tv_sec = (int)floor(socketTimeout); timeout.tv_usec = (int)floor((socketTimeout - timeout.tv_sec) * 1000); LOG_DEBUG("Timeouts = %d %d", timeout.tv_sec, timeout.tv_usec); struct sockaddr_in from; int fromLength = sizeof(from); unsigned char clientBuffer[maxPacketSize]; unsigned int offset = 0; unsigned int size = 0; FD_ZERO(&sockets); FD_SET(serverSocket, &sockets); LOG_TRACE("Starting thread"); threadRunning = true; while (threadRunning) { FD_ZERO(&sockets); FD_SET(serverSocket, &sockets); if (clientSocket >= 0) { FD_SET(clientSocket, &sockets); } maxSocket = serverSocket > clientSocket ? serverSocket + 1 : clientSocket + 1; int result = select(maxSocket, &sockets, NULL, NULL, &timeout); if (result < 0) { if (errno == EINTR) { // printf("EINTR caught in select()\n."); usleep((int)(socketTimeout * 1000000)); continue; } LOG_ERROR("Error reading from sockets using select().") threadRunning = false; break; } // Read data from connected client (if any) if ((clientSocket >= 0) && FD_ISSET(clientSocket, &sockets)) { bool closeConnection = false; // Read as much as we can int numRead = recv(clientSocket, clientBuffer + offset, maxPacketSize - offset, 0); if (numRead > 0) { LOG_INFO("Read %d bytes.", numRead); } // Client ended the connection if (numRead == 0) { closeConnection = true; LOG_INFO("Client ended the connection."); } // Error reading from the socket else if ((numRead == -1) && (errno != EAGAIN) && (errno != EINTR)) { closeConnection = true; LOG_INFO("Error reading from client socket: %d.", errno); } if (closeConnection) { LOG_INFO("Disconnecting client"); disconnectClient(sockets, maxSocket); } // We actually read some data while (numRead > 0) { // Do we have a size for the packet? if ((numRead + offset >= robotMessageHeaderSize) && (size == 0)) { // We want to read the last 4 bytes (32 bits) uint32_t *sizePointer = (uint32_t *)(clientBuffer + robotMessageHeaderSize - 4); size = ntohl(*sizePointer); LOG_INFO("Read a size of %d", size); // Update the buffer's offset and numRead counter offset += robotMessageHeaderSize; numRead -= robotMessageHeaderSize; } // Has the whole packet been read? if ((size >= 0) && (offset + numRead >= size + robotMessageHeaderSize)) { // Update the buffer's offset and numRead counter numRead -= (size + robotMessageHeaderSize - offset); offset = size + robotMessageHeaderSize; // same as: offset += (size + robotMessageHeaderSize - offset) LOG_INFO("Received a packet of type %d, size %d.", clientBuffer[0], size); // Convert it into a packet // Assumption: the type fits in 1 byte RemoteMessageToRobot const *message = RemoteMessageToRobot::create(clientBuffer[0], size, clientBuffer + robotMessageHeaderSize); if (message != NULL) { pthread_mutex_lock(&dataMutex); messagesToRobot.push_back(message); pthread_mutex_unlock(&dataMutex); } // Move the buffer's data as necessary if (numRead > 0) { memcpy(clientBuffer, clientBuffer + offset, numRead); offset = 0; size = 0; } } } } // Accept connection from server socket if (FD_ISSET(serverSocket, &sockets)) { int newClientSocket = accept(serverSocket, (struct sockaddr *)&from, (socklen_t *)&fromLength); if (newClientSocket == -1) { if (errno == EINTR) { LOG_INFO("Caught EINTR while accepting client connection."); } else { LOG_ERROR("Error accepting client.") } } // New client connected else { // Kill any existing client connection if (clientSocket >= 0) { disconnectClient(sockets, maxSocket); } // Save the new client socket clientSocket = newClientSocket; LOG_INFO("New client connected from %s", inet_ntoa(from.sin_addr)); // Make the client socket non-blocking int flags = 1; if (ioctl(clientSocket, FIONBIO, &flags) == -1) { LOG_WARN("Could not make client socket non-blocking."); } offset = 0; size = 0; } }
/**************************************************************************** ** ** ** Name: usim_api_authenticate() ** ** ** ** Description: Performs mutual authentication of the USIM to the network,** ** checking whether authentication token AUTN can be accep- ** ** ted. If so, returns an authentication response RES and ** ** the ciphering and integrity keys. ** ** In case of synch failure, returns a re-synchronization ** ** token AUTS. ** ** ** ** 3GPP TS 31.102, section 7.1.1.1 ** ** ** ** Authentication and key generating function algorithms are ** ** specified in 3GPP TS 35.206. ** ** ** ** Inputs: rand_pP: Random challenge number ** ** autn_pP: Authentication token ** ** AUTN = (SQN xor AK) || AMF || MAC ** ** 48 16 64 bits ** ** Others: Security key ** ** ** ** Outputs: auts_pP: Re-synchronization token ** ** res_pP: Authentication response ** ** ck_pP: Ciphering key ** ** ik_pP Integrity key ** ** ** ** Return: RETURNerror, RETURNok ** ** Others: None ** ** ** ***************************************************************************/ int usim_api_authenticate(const OctetString* rand_pP, const OctetString* autn_pP, OctetString* auts_pP, OctetString* res_pP, OctetString* ck_pP, OctetString* ik_pP) { LOG_FUNC_IN; int rc; int i; LOG_TRACE(DEBUG, "USIM-API - rand :%s",dump_octet_string(rand_pP)); LOG_TRACE(DEBUG, "USIM-API - autn :%s",dump_octet_string(autn_pP)); /* Compute the authentication response RES = f2K (RAND) */ /* Compute the cipher key CK = f3K (RAND) */ /* Compute the integrity key IK = f4K (RAND) */ /* Compute the anonymity key AK = f5K (RAND) */ #define USIM_API_AK_SIZE 6 u8 ak[USIM_API_AK_SIZE]; f2345(_usim_api_k, rand_pP->value, res_pP->value, ck_pP->value, ik_pP->value, ak); LOG_TRACE(DEBUG, "USIM-API - res(f2) :%s",dump_octet_string(res_pP)); LOG_TRACE(DEBUG, "USIM-API - ck(f3) :%s",dump_octet_string(ck_pP)); LOG_TRACE(DEBUG, "USIM-API - ik(f4) :%s",dump_octet_string(ik_pP)); LOG_TRACE(DEBUG, "USIM-API - ak(f5) : %02X%02X%02X%02X%02X%02X", ak[0],ak[1],ak[2],ak[3],ak[4],ak[5]); /* Retrieve the sequence number SQN = (SQN ⊕ AK) ⊕ AK */ #define USIM_API_SQN_SIZE USIM_API_AK_SIZE u8 sqn[USIM_API_SQN_SIZE]; for (i = 0; i < USIM_API_SQN_SIZE; i++) { sqn[i] = autn_pP->value[i] ^ ak[i]; } LOG_TRACE(DEBUG, "USIM-API - Retrieved SQN %02X%02X%02X%02X%02X%02X", sqn[0],sqn[1],sqn[2],sqn[3],sqn[4],sqn[5]); /* Compute XMAC = f1K (SQN || RAND || AMF) */ #define USIM_API_XMAC_SIZE 8 u8 xmac[USIM_API_XMAC_SIZE]; f1(_usim_api_k, rand_pP->value, sqn, &autn_pP->value[USIM_API_SQN_SIZE], xmac); LOG_TRACE(DEBUG, "USIM-API - Computed XMAC %02X%02X%02X%02X%02X%02X%02X%02X", xmac[0],xmac[1],xmac[2],xmac[3], xmac[4],xmac[5],xmac[6],xmac[7]); /* Compare the XMAC with the MAC included in AUTN */ #define USIM_API_AMF_SIZE 2 if ( memcmp(xmac, &autn_pP->value[USIM_API_SQN_SIZE + USIM_API_AMF_SIZE], USIM_API_XMAC_SIZE) != 0 ) { LOG_TRACE(INFO, "USIM-API - Comparing the XMAC with the MAC included in AUTN Failed"); //LOG_FUNC_RETURN (RETURNerror); } else { LOG_TRACE(INFO, "USIM-API - Comparing the XMAC with the MAC included in AUTN Succeeded"); } /* Verify that the received sequence number SQN is in the correct range */ rc = _usim_api_check_sqn(*(uint32_t*)(sqn), sqn[USIM_API_SQN_SIZE - 1]); if (rc != RETURNok) { /* Synchronisation failure; compute the AUTS parameter */ /* Concealed value of the counter SQNms in the USIM: * Conc(SQNMS) = SQNMS ⊕ f5*K(RAND) */ f5star(_usim_api_k, rand_pP->value, ak); #define USIM_API_SQNMS_SIZE USIM_API_SQN_SIZE u8 sqn_ms[USIM_API_SQNMS_SIZE]; memset(sqn_ms, 0, USIM_API_SQNMS_SIZE); #define USIM_API_SQN_MS_SIZE 3 for (i = 0; i < USIM_API_SQN_MS_SIZE; i++) { #warning "LG:BUG HERE TODO" sqn_ms[USIM_API_SQNMS_SIZE - i] = ((uint8_t*)(_usim_api_data.sqn_ms))[USIM_API_SQN_MS_SIZE - i]; } u8 sqnms[USIM_API_SQNMS_SIZE]; for (i = 0; i < USIM_API_SQNMS_SIZE; i++) { sqnms[i] = sqn_ms[i] ^ ak[i]; } LOG_TRACE(DEBUG, "USIM-API - SQNms %02X%02X%02X%02X%02X%02X", sqnms[0],sqnms[1],sqnms[2],sqnms[3],sqnms[4],sqnms[5]); /* Synchronisation message authentication code: * MACS = f1*K(SQNMS || RAND || AMF) */ #define USIM_API_MACS_SIZE USIM_API_XMAC_SIZE u8 macs[USIM_API_MACS_SIZE]; f1star(_usim_api_k, rand_pP->value, sqn_ms, &rand_pP->value[USIM_API_SQN_SIZE], macs); LOG_TRACE(DEBUG, "USIM-API - MACS %02X%02X%02X%02X%02X%02X%02X%02X", macs[0],macs[1],macs[2],macs[3], macs[4],macs[5],macs[6],macs[7]); /* Synchronisation authentication token: * AUTS = Conc(SQNMS) || MACS */ memcpy(&auts_pP->value[0], sqnms, USIM_API_SQNMS_SIZE); memcpy(&auts_pP->value[USIM_API_SQNMS_SIZE], macs, USIM_API_MACS_SIZE); } LOG_FUNC_RETURN (RETURNok); }
/* check password strenght and history */ int ipapwd_CheckPolicy(struct ipapwd_data *data) { struct ipapwd_policy pol = {0}; struct ipapwd_policy tmppol = {0}; time_t acct_expiration; time_t pwd_expiration; time_t last_pwd_change; char **pwd_history; char *tmpstr; int ret; pol.max_pwd_life = IPAPWD_DEFAULT_PWDLIFE; pol.min_pwd_length = IPAPWD_DEFAULT_MINLEN; switch(data->changetype) { case IPA_CHANGETYPE_NORMAL: /* Find the entry with the password policy */ ret = ipapwd_getPolicy(data->dn, data->target, &pol); if (ret) { LOG_TRACE("No password policy, use defaults"); } break; case IPA_CHANGETYPE_ADMIN: /* The expiration date needs to be older than the current time * otherwise the KDC may not immediately register the password * as expired. The last password change needs to match the * password expiration otherwise minlife issues will arise. */ data->timeNow -= 1; data->expireTime = data->timeNow; /* let set the entry password property according to its * entry password policy (done with ipapwd_getPolicy) * For this intentional fallthrough here */ case IPA_CHANGETYPE_DSMGR: /* PassSync agents and Directory Manager can administratively * change the password without expiring it. * * Find password policy for the entry to properly set expiration. * Do not store it in resulting policy to avoid aplying password * quality checks on administratively set passwords */ ret = ipapwd_getPolicy(data->dn, data->target, &tmppol); if (ret) { LOG_TRACE("No password policy, use defaults"); } else { pol.max_pwd_life = tmppol.max_pwd_life; pol.history_length = tmppol.history_length; } break; default: LOG_TRACE("Unknown password change type, use defaults"); break; } tmpstr = slapi_entry_attr_get_charptr(data->target, "krbPrincipalExpiration"); acct_expiration = ipapwd_gentime_to_time_t(tmpstr); slapi_ch_free_string(&tmpstr); tmpstr = slapi_entry_attr_get_charptr(data->target, "krbPasswordExpiration"); pwd_expiration = ipapwd_gentime_to_time_t(tmpstr); slapi_ch_free_string(&tmpstr); tmpstr = slapi_entry_attr_get_charptr(data->target, "krbLastPwdChange"); last_pwd_change = ipapwd_gentime_to_time_t(tmpstr); slapi_ch_free_string(&tmpstr); pwd_history = slapi_entry_attr_get_charray(data->target, "passwordHistory"); /* check policy */ ret = ipapwd_check_policy(&pol, data->password, data->timeNow, acct_expiration, pwd_expiration, last_pwd_change, pwd_history); slapi_ch_array_free(pwd_history); if (data->expireTime == 0) { if (pol.max_pwd_life > 0) { /* max_pwd_life = 0 => never expire * set expire time only when max_pwd_life > 0 */ data->expireTime = data->timeNow + pol.max_pwd_life; } } data->policy = pol; return ret; }
/* Modify the Password attributes of the entry */ int ipapwd_SetPassword(struct ipapwd_krbcfg *krbcfg, struct ipapwd_data *data, int is_krb) { int ret = 0; Slapi_Mods *smods = NULL; Slapi_Value **svals = NULL; Slapi_Value **ntvals = NULL; Slapi_Value **pwvals = NULL; char *nt = NULL; int is_smb = 0; int is_ipant = 0; int is_host = 0; Slapi_Value *sambaSamAccount; Slapi_Value *ipaNTUserAttrs; Slapi_Value *ipaHost; char *errMesg = NULL; char *modtime = NULL; LOG_TRACE("=>\n"); sambaSamAccount = slapi_value_new_string("sambaSamAccount"); if (slapi_entry_attr_has_syntax_value(data->target, "objectClass", sambaSamAccount)) { is_smb = 1; } slapi_value_free(&sambaSamAccount); ipaNTUserAttrs = slapi_value_new_string("ipaNTUserAttrs"); if (slapi_entry_attr_has_syntax_value(data->target, "objectClass", ipaNTUserAttrs)) { is_ipant = 1; } slapi_value_free(&ipaNTUserAttrs); ipaHost = slapi_value_new_string("ipaHost"); if (slapi_entry_attr_has_syntax_value(data->target, "objectClass", ipaHost)) { is_host = 1; } slapi_value_free(&ipaHost); ret = ipapwd_gen_hashes(krbcfg, data, data->password, is_krb, is_smb, is_ipant, &svals, &nt, &ntvals, &errMesg); if (ret) { goto free_and_return; } smods = slapi_mods_new(); if (svals) { slapi_mods_add_mod_values(smods, LDAP_MOD_REPLACE, "krbPrincipalKey", svals); /* krbLastPwdChange is used to tell whether a host entry has a * keytab so don't set it on hosts. */ if (!is_host) { /* change Last Password Change field with the current date */ ret = ipapwd_setdate(data->target, smods, "krbLastPwdChange", data->timeNow, false); if (ret != LDAP_SUCCESS) goto free_and_return; /* set Password Expiration date */ ret = ipapwd_setdate(data->target, smods, "krbPasswordExpiration", data->expireTime, (data->expireTime == 0)); if (ret != LDAP_SUCCESS) goto free_and_return; } } if (nt && is_smb) { slapi_mods_add_string(smods, LDAP_MOD_REPLACE, "sambaNTPassword", nt); } if (ntvals && is_ipant) { slapi_mods_add_mod_values(smods, LDAP_MOD_REPLACE, "ipaNTHash", ntvals); } if (is_smb) { /* with samba integration we need to also set sambaPwdLastSet or * samba will decide the user has to change the password again */ if (data->changetype == IPA_CHANGETYPE_ADMIN) { /* if it is an admin change instead we need to let know to * samba as well that the use rmust change its password */ modtime = slapi_ch_smprintf("0"); } else { modtime = slapi_ch_smprintf("%ld", (long)data->timeNow); } if (!modtime) { LOG_FATAL("failed to smprintf string!\n"); ret = LDAP_OPERATIONS_ERROR; goto free_and_return; } slapi_mods_add_string(smods, LDAP_MOD_REPLACE, "sambaPwdLastset", modtime); } if (is_krb) { if (data->changetype == IPA_CHANGETYPE_ADMIN) { slapi_mods_add_string(smods, LDAP_MOD_REPLACE, "krbLoginFailedCount", "0"); } } /* let DS encode the password itself, this allows also other plugins to * intercept it to perform operations like synchronization with Active * Directory domains through the replication plugin */ slapi_mods_add_string(smods, LDAP_MOD_REPLACE, "userPassword", data->password); /* set password history */ if (data->policy.history_length > 0) { pwvals = ipapwd_setPasswordHistory(smods, data); if (pwvals) { slapi_mods_add_mod_values(smods, LDAP_MOD_REPLACE, "passwordHistory", pwvals); } } /* FIXME: * instead of replace we should use a delete/add so that we are * completely sure nobody else modified the entry meanwhile and * fail if that's the case */ /* commit changes */ ret = ipapwd_apply_mods(data->dn, smods); LOG_TRACE("<= result: %d\n", ret); free_and_return: if (nt) slapi_ch_free((void **)&nt); if (modtime) slapi_ch_free((void **)&modtime); slapi_mods_free(&smods); ipapwd_free_slapi_value_array(&svals); ipapwd_free_slapi_value_array(&ntvals); ipapwd_free_slapi_value_array(&pwvals); return ret; }
int ipapwd_getPolicy(const char *dn, Slapi_Entry *target, struct ipapwd_policy *policy) { const char *krbPwdPolicyReference; char *pdn = NULL; Slapi_PBlock *pb = NULL; char *attrs[] = { "krbMaxPwdLife", "krbMinPwdLife", "krbPwdMinDiffChars", "krbPwdMinLength", "krbPwdHistoryLength", NULL}; Slapi_Entry **es = NULL; Slapi_Entry *pe = NULL; int ret, res, scope, i; int buffer_flags=0; Slapi_ValueSet* results = NULL; char *actual_type_name = NULL; LOG_TRACE("Searching policy for [%s]\n", dn); pwd_get_values(target, "krbPwdPolicyReference", &results, &actual_type_name, &buffer_flags); if (results) { Slapi_Value *sv; slapi_valueset_first_value(results, &sv); krbPwdPolicyReference = slapi_value_get_string(sv); pdn = slapi_ch_strdup(krbPwdPolicyReference); } else { /* Fallback to hardcoded value */ pdn = slapi_ch_smprintf("cn=global_policy,%s", ipa_realm_dn); } if (pdn == NULL) { LOG_OOM(); ret = -1; goto done; } LOG_TRACE("Using policy at [%s]\n", pdn); scope = LDAP_SCOPE_BASE; pb = slapi_pblock_new(); slapi_search_internal_set_pb(pb, pdn, scope, "(objectClass=krbPwdPolicy)", attrs, 0, NULL, /* Controls */ NULL, /* UniqueID */ ipapwd_plugin_id, 0); /* Flags */ /* do search the tree */ ret = slapi_search_internal_pb(pb); slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &res); if (ret == -1 || res != LDAP_SUCCESS) { LOG_FATAL("Couldn't find policy, err (%d)\n", res ? res : ret); ret = -1; goto done; } /* get entries */ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &es); if (!es) { LOG_TRACE("No entries ?!"); ret = -1; goto done; } /* count entries */ for (i = 0; es[i]; i++) /* count */ ; /* if there is only one, return that */ if (i == 1) { pe = es[0]; } else { LOG_TRACE("Multiple entries from a base search ?!"); ret = -1; goto done; } /* read data out of policy object */ policy->min_pwd_life = slapi_entry_attr_get_int(pe, "krbMinPwdLife"); policy->max_pwd_life = slapi_entry_attr_get_int(pe, "krbMaxPwdLife"); policy->min_pwd_length = slapi_entry_attr_get_int(pe, "krbPwdMinLength"); policy->history_length = slapi_entry_attr_get_int(pe, "krbPwdHistoryLength"); policy->min_complexity = slapi_entry_attr_get_int(pe, "krbPwdMinDiffChars"); ret = 0; done: if (results) { pwd_values_free(&results, &actual_type_name, buffer_flags); } if (pb) { slapi_free_search_results_internal(pb); slapi_pblock_destroy(pb); } slapi_ch_free_string(&pdn); return ret; }
int ipapwd_gen_checks(Slapi_PBlock *pb, char **errMesg, struct ipapwd_krbcfg **config, int check_flags) { int ret, ssf; int rc = LDAP_SUCCESS; Slapi_Backend *be; const Slapi_DN *psdn; Slapi_DN *sdn; char *dn = NULL; LOG_TRACE("=>\n"); #ifdef LDAP_EXTOP_PASSMOD_CONN_SECURE if (check_flags & IPAPWD_CHECK_CONN_SECURE) { /* Allow password modify on all connections with a Security Strength * Factor (SSF) higher than 1 */ if (slapi_pblock_get(pb, SLAPI_OPERATION_SSF, &ssf) != 0) { LOG("Could not get SSF from connection\n"); *errMesg = "Operation requires a secure connection.\n"; rc = LDAP_OPERATIONS_ERROR; goto done; } if (ssf <= 1) { *errMesg = "Operation requires a secure connection.\n"; rc = LDAP_CONFIDENTIALITY_REQUIRED; goto done; } } #endif if (check_flags & IPAPWD_CHECK_DN) { /* check we have a valid DN in the pblock or just abort */ ret = slapi_pblock_get(pb, SLAPI_TARGET_DN, &dn); if (ret) { LOG("Tried to change password for an invalid DN [%s]\n", dn ? dn : "<NULL>"); *errMesg = "Invalid DN"; rc = LDAP_OPERATIONS_ERROR; goto done; } sdn = slapi_sdn_new_dn_byref(dn); if (!sdn) { LOG_FATAL("Unable to convert dn to sdn %s", dn ? dn : "<NULL>"); *errMesg = "Internal Error"; rc = LDAP_OPERATIONS_ERROR; goto done; } be = slapi_be_select(sdn); slapi_sdn_free(&sdn); psdn = slapi_be_getsuffix(be, 0); if (!psdn) { *errMesg = "Invalid DN"; rc = LDAP_OPERATIONS_ERROR; goto done; } } /* get the kerberos context and master key */ *config = ipapwd_getConfig(); if (NULL == *config) { LOG_FATAL("Error Retrieving Master Key"); *errMesg = "Fatal Internal Error"; rc = LDAP_OPERATIONS_ERROR; } done: return rc; }
void gcm::TetrFirstOrderInterpolator::interpolate(CalcNode& node, CalcNode& node0, CalcNode& node1, CalcNode& node2, CalcNode& node3) { LOG_TRACE("Start interpolation"); float Vol = tetrVolume( (node1.coords[0])-(node0.coords[0]), (node1.coords[1])-(node0.coords[1]), (node1.coords[2])-(node0.coords[2]), (node2.coords[0])-(node0.coords[0]), (node2.coords[1])-(node0.coords[1]), (node2.coords[2])-(node0.coords[2]), (node3.coords[0])-(node0.coords[0]), (node3.coords[1])-(node0.coords[1]), (node3.coords[2])-(node0.coords[2]) ); float factor[4]; factor[0] = fabs(tetrVolume( (node1.coords[0])-(node.coords[0]), (node1.coords[1])-(node.coords[1]), (node1.coords[2])-(node.coords[2]), (node2.coords[0])-(node.coords[0]), (node2.coords[1])-(node.coords[1]), (node2.coords[2])-(node.coords[2]), (node3.coords[0])-(node.coords[0]), (node3.coords[1])-(node.coords[1]), (node3.coords[2])-(node.coords[2]) ) / Vol); factor[1] = fabs(tetrVolume( (node0.coords[0])-(node.coords[0]), (node0.coords[1])-(node.coords[1]), (node0.coords[2])-(node.coords[2]), (node2.coords[0])-(node.coords[0]), (node2.coords[1])-(node.coords[1]), (node2.coords[2])-(node.coords[2]), (node3.coords[0])-(node.coords[0]), (node3.coords[1])-(node.coords[1]), (node3.coords[2])-(node.coords[2]) ) / Vol); factor[2] = fabs(tetrVolume( (node1.coords[0])-(node.coords[0]), (node1.coords[1])-(node.coords[1]), (node1.coords[2])-(node.coords[2]), (node0.coords[0])-(node.coords[0]), (node0.coords[1])-(node.coords[1]), (node0.coords[2])-(node.coords[2]), (node3.coords[0])-(node.coords[0]), (node3.coords[1])-(node.coords[1]), (node3.coords[2])-(node.coords[2]) ) / Vol); factor[3] = fabs(tetrVolume( (node1.coords[0])-(node.coords[0]), (node1.coords[1])-(node.coords[1]), (node1.coords[2])-(node.coords[2]), (node2.coords[0])-(node.coords[0]), (node2.coords[1])-(node.coords[1]), (node2.coords[2])-(node.coords[2]), (node0.coords[0])-(node.coords[0]), (node0.coords[1])-(node.coords[1]), (node0.coords[2])-(node.coords[2]) ) / Vol); // If we see potential instability if (factor[0] + factor[1] + factor[2] + factor[3] > 1.0) { // If it is small - treat instability as minor and just 'smooth' it // TODO - think about it more carefully //if( point_in_tetr(node.local_num, node.coords[0], node.coords[1], node.coords[2], tetr) ) if (factor[0] + factor[1] + factor[2] + factor[3] < 10) // FIXME@avasyukov { if (factor[0] + factor[1] + factor[2] + factor[3] > 5.0) LOG_ERROR("Factor: " << factor[0] + factor[1] + factor[2] + factor[3]); float sum = factor[0] + factor[1] + factor[2] + factor[3]; for (int i = 0; i < 4; i++) factor[i] = factor[i] / sum; } // If point is not in tetr - throw exception else { /* *logger << "\tTetrVol = " < Vol; *logger << "\tfactor[0]=" << factor[0] << " factor[1]=" << factor[1] << " factor[2]=" << factor[2] << " factor[3]=" << factor[3] << " Sum: " < factor[0] + factor[1] + factor[2] + factor[3]; *logger << "\tnode.x[0]=" << node.coords[0] << " node.x[1]=" << node.coords[1] << " node.x[2]=" < node.coords[2]; if( node.isFirstOrder() ) *logger < "First order node"; else if( node.isSecondOrder() ) *logger < "Second order node"; *logger << "\tv0.x[0]=" << nodes[tetr.vert[0]].coords[0] << " v0.x[1]=" << nodes[tetr.vert[0]].coords[1] << " v0.x[2]=" < nodes[tetr.vert[0]].coords[2]; *logger << "\tv1.x[0]=" << nodes[tetr.vert[1]].coords[0] << " v1.x[1]=" << nodes[tetr.vert[1]].coords[1] << " v1.x[2]=" < nodes[tetr.vert[1]].coords[2]; *logger << "\tv2.x[0]=" << nodes[tetr.vert[2]].coords[0] << " v2.x[1]=" << nodes[tetr.vert[2]].coords[1] << " v2.x[2]=" < nodes[tetr.vert[2]].coords[2]; *logger << "\tv3.x[0]=" << nodes[tetr.vert[3]].coords[0] << " v3.x[1]=" << nodes[tetr.vert[3]].coords[1] << " v3.x[2]=" < nodes[tetr.vert[3]].coords[2];*/ LOG_ERROR("Requested node: " << node); LOG_ERROR("Node #1: " << node0); LOG_ERROR("Node #2: " << node1); LOG_ERROR("Node #3: " << node2); LOG_ERROR("Node #4: " << node3); LOG_ERROR("Factor: " << factor[0] + factor[1] + factor[2] + factor[3]); THROW_BAD_MESH("Sum of factors is greater than 1.0"); } } for (int i = 0; i < 9; i++) { node.values[i] = (node0.values[i] * factor[0] + node1.values[i] * factor[1] + node2.values[i] * factor[2] + node3.values[i] * factor[3]); } node.setRho(node0.getRho() * factor[0] + node1.getRho() * factor[1] + node2.getRho() * factor[2] + node3.getRho() * factor[3]); node.setMaterialId(node0.getMaterialId()); LOG_TRACE("Interpolation done"); }
/** * @brief Creates logical tile from tile group and applies scan predicate. * @return true on success, false otherwise. */ bool SeqScanExecutor::DExecute() { // Scanning over a logical tile. if (children_.size() == 1 && // There will be a child node on the create index scenario, // but we don't want to use this execution flow !(GetRawNode()->GetChildren().size() > 0 && GetRawNode()->GetChildren()[0].get()->GetPlanNodeType() == PlanNodeType::CREATE && ((planner::CreatePlan *)GetRawNode()->GetChildren()[0].get()) ->GetCreateType() == CreateType::INDEX)) { // FIXME Check all requirements for children_.size() == 0 case. LOG_TRACE("Seq Scan executor :: 1 child "); PELOTON_ASSERT(target_table_ == nullptr); PELOTON_ASSERT(column_ids_.size() == 0); while (children_[0]->Execute()) { std::unique_ptr<LogicalTile> tile(children_[0]->GetOutput()); if (predicate_ != nullptr) { // Invalidate tuples that don't satisfy the predicate. for (oid_t tuple_id : *tile) { ContainerTuple<LogicalTile> tuple(tile.get(), tuple_id); auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_); if (eval.IsFalse()) { // if (predicate_->Evaluate(&tuple, nullptr, executor_context_) // .IsFalse()) { tile->RemoveVisibility(tuple_id); } } } if (0 == tile->GetTupleCount()) { // Avoid returning empty tiles continue; } /* Hopefully we needn't do projections here */ SetOutput(tile.release()); return true; } return false; } // Scanning a table else if (children_.size() == 0 || // If we are creating an index, there will be a child (children_.size() == 1 && // This check is only needed to pass seq_scan_test // unless it is possible to add a executor child // without a corresponding plan. GetRawNode()->GetChildren().size() > 0 && // Check if the plan is what we actually expect. GetRawNode()->GetChildren()[0].get()->GetPlanNodeType() == PlanNodeType::CREATE && // If it is, confirm it is for indexes ((planner::CreatePlan *)GetRawNode()->GetChildren()[0].get()) ->GetCreateType() == CreateType::INDEX)) { LOG_TRACE("Seq Scan executor :: 0 child "); PELOTON_ASSERT(target_table_ != nullptr); PELOTON_ASSERT(column_ids_.size() > 0); if (children_.size() > 0 && !index_done_) { children_[0]->Execute(); // This stops continuous executions due to // a parent and avoids multiple creations // of the same index. index_done_ = true; } concurrency::TransactionManager &transaction_manager = concurrency::TransactionManagerFactory::GetInstance(); bool acquire_owner = GetPlanNode<planner::AbstractScan>().IsForUpdate(); auto current_txn = executor_context_->GetTransaction(); // Retrieve next tile group. while (current_tile_group_offset_ < table_tile_group_count_) { auto tile_group = target_table_->GetTileGroup(current_tile_group_offset_++); auto tile_group_header = tile_group->GetHeader(); oid_t active_tuple_count = tile_group->GetNextTupleSlot(); // Construct position list by looping through tile group // and applying the predicate. std::vector<oid_t> position_list; for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) { ItemPointer location(tile_group->GetTileGroupId(), tuple_id); auto visibility = transaction_manager.IsVisible( current_txn, tile_group_header, tuple_id); // check transaction visibility if (visibility == VisibilityType::OK) { // if the tuple is visible, then perform predicate evaluation. if (predicate_ == nullptr) { position_list.push_back(tuple_id); auto res = transaction_manager.PerformRead(current_txn, location, acquire_owner); if (!res) { transaction_manager.SetTransactionResult(current_txn, ResultType::FAILURE); return res; } } else { ContainerTuple<storage::TileGroup> tuple(tile_group.get(), tuple_id); LOG_TRACE("Evaluate predicate for a tuple"); auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_); LOG_TRACE("Evaluation result: %s", eval.GetInfo().c_str()); if (eval.IsTrue()) { position_list.push_back(tuple_id); auto res = transaction_manager.PerformRead(current_txn, location, acquire_owner); if (!res) { transaction_manager.SetTransactionResult(current_txn, ResultType::FAILURE); return res; } else { LOG_TRACE("Sequential Scan Predicate Satisfied"); } } } } } // Don't return empty tiles if (position_list.size() == 0) { continue; } // Construct logical tile. std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile()); logical_tile->AddColumns(tile_group, column_ids_); logical_tile->AddPositionList(std::move(position_list)); LOG_TRACE("Information %s", logical_tile->GetInfo().c_str()); SetOutput(logical_tile.release()); return true; } } return false; }
void connection::do_read_head() { auto self(shared_from_this()); // bytes_transferred never grater than sizeof(buffer_) socket_.async_read_some(boost::asio::buffer(buffer_), strand_.wrap( [this, self](boost::system::error_code ec, std::size_t bytes_transferred) { if (!ec) { timestamp_ = time(NULL); total_bytes_received_ += bytes_transferred; // check http package size if(total_bytes_received_ > max_http_package_size) { reply_ = reply::stock_reply(reply::bad_request); do_write(); LOG_TRACE("too long http request from client: %s:u", this->get_address(), port_); return; } request_parser::result_type result; std::tie(result, std::ignore) = request_parser_.parse( request_, buffer_.data(), buffer_.data() + bytes_transferred); // check header switch(result) { case request_parser::good: // read header complete if(0 == stricmp(request_.method.c_str(), "POST")) { /// head read complete // LOG_TRACE_ALL("header length:%d, content length:%d", request_.header_length, request_.content_length); size_t content_bytes = this->total_bytes_received_ - request_.header_length; if(content_bytes > 0) { // put remain data to content request_.content.append(buffer_.data() + (bytes_transferred - content_bytes), content_bytes); } do_check(); } else if(0 == stricmp(request_.method.c_str(), "GET")) { request_handler_.handle_request(request_, reply_); do_write(); } else { // do not support other request reply_ = reply::stock_reply(reply::bad_request); do_write(); } break; case request_parser::bad: reply_ = reply::stock_reply(reply::bad_request); do_write(); break; default: // head insufficient do_read_head(); } } else { LOG_TRACE("peer interrupt ahead of time, detail:%s", ec.message().c_str()); connection_manager_.stop(shared_from_this()); /*if(ec != boost::asio::error::operation_aborted) { connection_manager_.stop(shared_from_this()); }*/ } }) ); }
void TRI_CleanupVocBase (void* data) { TRI_vector_pointer_t collections; uint64_t iterations = 0; TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(data); assert(vocbase); assert(vocbase->_state == 1); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { int state; // keep initial _state value as vocbase->_state might change during cleanup loop state = vocbase->_state; ++iterations; if (state == 2) { // shadows must be cleaned before collections are handled // otherwise the shadows might still hold barriers on collections // and collections cannot be closed properly CleanupCursors(vocbase, true); } // check if we can get the compactor lock exclusively if (TRI_CheckAndLockCompactorVocBase(vocbase)) { size_t i, n; // copy all collections TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection; TRI_primary_collection_t* primary; TRI_document_collection_t* document; collection = (TRI_vocbase_col_t*) collections._buffer[i]; TRI_READ_LOCK_STATUS_VOCBASE_COL(collection); primary = collection->_collection; if (primary == NULL) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); // we're the only ones that can unload the collection, so using // the collection pointer outside the lock is ok // maybe cleanup indexes, unload the collection or some datafiles document = (TRI_document_collection_t*) primary; // clean indexes? if (iterations % (uint64_t) CLEANUP_INDEX_ITERATIONS == 0) { document->cleanupIndexes(document); } CleanupDocumentCollection(document); } TRI_UnlockCompactorVocBase(vocbase); } if (vocbase->_state >= 1) { // server is still running, clean up unused shadows if (iterations % CLEANUP_SHADOW_ITERATIONS == 0) { CleanupCursors(vocbase, false); } // clean up expired compactor locks TRI_CleanupCompactorVocBase(vocbase); if (state == 1) { TRI_LockCondition(&vocbase->_cleanupCondition); TRI_TimedWaitCondition(&vocbase->_cleanupCondition, (uint64_t) CLEANUP_INTERVAL); TRI_UnlockCondition(&vocbase->_cleanupCondition); } } if (state == 3) { // server shutdown break; } } TRI_DestroyVectorPointer(&collections); LOG_TRACE("shutting down cleanup thread"); }
int hdmivsdb_Parse(hdmivsdb_t *vsdb, u8 * data) { u8 blockLength = 0; LOG_TRACE(); hdmivsdb_Reset(vsdb); if (data == 0) { return FALSE; } if (bitOperation_BitField(data[0], 5, 3) != 0x3) { LOG_WARNING("Invalid datablock tag"); return FALSE; } blockLength = bitOperation_BitField(data[0], 0, 5); hdmivsdb_SetLength(vsdb, blockLength); if (blockLength < 5) { LOG_WARNING("Invalid minimum length"); return FALSE; } if (bitOperation_Bytes2Dword(0x00, data[3], data[2], data[1]) != 0x000C03) { LOG_WARNING("HDMI IEEE registration identifier not valid"); return FALSE; } hdmivsdb_Reset(vsdb); hdmivsdb_SetId(vsdb, 0x000C03); vsdb->mPhysicalAddress = bitOperation_Bytes2Word(data[4], data[5]); /* parse extension fields if they exist */ if (blockLength > 5) { vsdb->mSupportsAi = bitOperation_BitField(data[6], 7, 1) == 1; vsdb->mDeepColor48 = bitOperation_BitField(data[6], 6, 1) == 1; vsdb->mDeepColor36 = bitOperation_BitField(data[6], 5, 1) == 1; vsdb->mDeepColor30 = bitOperation_BitField(data[6], 4, 1) == 1; vsdb->mDeepColorY444 = bitOperation_BitField(data[6], 3, 1) == 1; vsdb->mDviDual = bitOperation_BitField(data[6], 0, 1) == 1; } else { vsdb->mSupportsAi = FALSE; vsdb->mDeepColor48 = FALSE; vsdb->mDeepColor36 = FALSE; vsdb->mDeepColor30 = FALSE; vsdb->mDeepColorY444 = FALSE; vsdb->mDviDual = FALSE; } vsdb->mMaxTmdsClk = (blockLength > 6) ? data[7] : 0; vsdb->mVideoLatency = 0; vsdb->mAudioLatency = 0; vsdb->mInterlacedVideoLatency = 0; vsdb->mInterlacedAudioLatency = 0; if (blockLength > 7) { if (bitOperation_BitField(data[8], 7, 1) == 1) { if (blockLength < 10) { LOG_WARNING("Invalid length - latencies are not valid"); return FALSE; } if (bitOperation_BitField(data[8], 6, 1) == 1) { if (blockLength < 12) { LOG_WARNING("Invalid length - Interlaced latencies are not valid"); return FALSE; } else { vsdb->mVideoLatency = data[9]; vsdb->mAudioLatency = data[10]; vsdb->mInterlacedVideoLatency = data[11]; vsdb->mInterlacedAudioLatency = data[12]; } } else { vsdb->mVideoLatency = data[9]; vsdb->mAudioLatency = data[10]; vsdb->mInterlacedVideoLatency = data[9]; vsdb->mInterlacedAudioLatency = data[10]; } } } vsdb->mValid = TRUE; return TRUE; }
//Invoked when catalog is destroyed void StorageManager::DestroyDatabases() { LOG_TRACE("Deleting databases"); for (auto database : databases_) delete database; LOG_TRACE("Finish deleting database"); }
void TRI_SynchroniserVocBase (void* data) { TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(data); TRI_vector_pointer_t collections; assert(vocbase->_state == 1); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { size_t i, n; bool worked; // keep initial _state value as vocbase->_state might change during sync loop int state = vocbase->_state; worked = false; // copy all collections and release the lock TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); // loop over all copied collections n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(collections._buffer[i]); // if we cannot acquire the read lock instantly, we will continue. // otherwise we'll risk a multi-thread deadlock between synchroniser, // compactor and data-modification threads (e.g. POST /_api/document) if (! TRI_TRY_READ_LOCK_STATUS_VOCBASE_COL(collection)) { continue; } if (collection->_status != TRI_VOC_COL_STATUS_LOADED) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } TRI_primary_collection_t* primary = collection->_collection; // for document collection, first sync and then seal bool result = CheckSyncDocumentCollection((TRI_document_collection_t*) primary); worked |= result; result = CheckJournalDocumentCollection((TRI_document_collection_t*) primary); worked |= result; TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); } // only sleep while server is still running and no-one is waiting if (! worked && vocbase->_state == 1) { TRI_LOCK_SYNCHRONISER_WAITER_VOCBASE(vocbase); if (vocbase->_syncWaiters == 0) { TRI_WAIT_SYNCHRONISER_WAITER_VOCBASE(vocbase, (uint64_t) SYNCHRONISER_INTERVAL); } TRI_UNLOCK_SYNCHRONISER_WAITER_VOCBASE(vocbase); } // server shutdown if (state == 2) { break; } } TRI_DestroyVectorPointer(&collections); LOG_TRACE("shutting down synchroniser thread"); }
// constructor WriteAheadBackendLogger::WriteAheadBackendLogger() : BackendLogger() { logging_type = LOGGING_TYPE_NVM_WAL; frontend_logger_id = -1; // invalid LOG_TRACE("INSIDE CONSTRUCTOR"); }
static bool CheckSyncDocumentCollection (TRI_document_collection_t* document) { TRI_collection_t* base; TRI_datafile_t* journal; bool ok; bool worked; char const* synced; char* written; size_t i; size_t n; worked = false; base = &document->base.base; // ............................................................................. // the only thread MODIFYING the _journals variable is this thread, // therefore no locking is required to access the _journals // ............................................................................. n = base->_journals._length; for (i = 0; i < n; ++i) { journal = static_cast<TRI_datafile_t*>(base->_journals._buffer[i]); // we only need to care about physical datafiles if (! journal->isPhysical(journal)) { // anonymous regions do not need to be synced continue; } TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document); synced = journal->_synced; written = journal->_written; TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document); if (synced < written) { worked = true; ok = journal->sync(journal, synced, written); TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document); if (ok) { journal->_synced = written; } else { journal->_state = TRI_DF_STATE_WRITE_ERROR; } TRI_BROADCAST_JOURNAL_ENTRIES_DOC_COLLECTION(document); TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document); if (ok) { LOG_TRACE("msync succeeded %p, size %lu", synced, (unsigned long) (written - synced)); } else { LOG_ERROR("msync failed with: %s", TRI_last_error()); } } } return worked; }
bool HybridScanExecutor::SeqScanUtil() { assert(children_.size() == 0); // LOG_TRACE("Hybrid executor, Seq Scan :: 0 child"); assert(table_ != nullptr); assert(column_ids_.size() > 0); auto &transaction_manager = concurrency::TransactionManagerFactory::GetInstance(); auto current_txn = executor_context_->GetTransaction(); bool acquire_owner = GetPlanNode<planner::AbstractScan>().IsForUpdate(); // Retrieve next tile group. while (current_tile_group_offset_ < table_tile_group_count_) { LOG_TRACE("Current tile group offset : %u", current_tile_group_offset_); auto tile_group = table_->GetTileGroup(current_tile_group_offset_++); auto tile_group_header = tile_group->GetHeader(); oid_t active_tuple_count = tile_group->GetNextTupleSlot(); // Construct position list by looping through tile group // and applying the predicate. oid_t upper_bound_block = 0; if (item_pointers_.size() > 0) { auto reverse_iter = item_pointers_.rbegin(); upper_bound_block = reverse_iter->block; } std::vector<oid_t> position_list; for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) { ItemPointer location(tile_group->GetTileGroupId(), tuple_id); if (type_ == HYBRID_SCAN_TYPE_HYBRID && item_pointers_.size() > 0 && location.block <= upper_bound_block) { if (item_pointers_.find(location) != item_pointers_.end()) { continue; } } // Check transaction visibility if (transaction_manager.IsVisible(current_txn, tile_group_header, tuple_id) == VISIBILITY_OK) { // If the tuple is visible, then perform predicate evaluation. if (predicate_ == nullptr) { position_list.push_back(tuple_id); } else { expression::ContainerTuple<storage::TileGroup> tuple(tile_group.get(), tuple_id); auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue(); if (eval == true) { position_list.push_back(tuple_id); } } } else { expression::ContainerTuple<storage::TileGroup> tuple(tile_group.get(), tuple_id); auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue(); if (eval == true) { position_list.push_back(tuple_id); auto res = transaction_manager.PerformRead(current_txn, location, acquire_owner); if (!res) { transaction_manager.SetTransactionResult(current_txn, RESULT_FAILURE); return res; } } } } // Don't return empty tiles if (position_list.size() == 0) { continue; } // Construct logical tile. std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile()); logical_tile->AddColumns(tile_group, column_ids_); logical_tile->AddPositionList(std::move(position_list)); LOG_TRACE("Hybrid executor, Seq Scan :: Got a logical tile"); SetOutput(logical_tile.release()); return true; } return false; }
inline void Write(u32 addr, const T data) { addr -= HW::VADDR_GPU; u32 index = addr / 4; // Writes other than u32 are untested, so I'd rather have them abort than silently fail if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); return; } g_regs[index] = static_cast<u32>(data); switch (index) { // Memory fills are triggered once the fill value is written. case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); auto& config = g_regs.memory_fill_config[is_second_filler]; if (config.trigger) { if (config.address_start) { // Some games pass invalid values here u8* start = Mem_GetPhysicalPointer(config.GetStartAddress()); u8* end = Mem_GetPhysicalPointer(config.GetEndAddress()); if (config.fill_24bit) { // fill with 24-bit values for (u8* ptr = start; ptr < end; ptr += 3) { ptr[0] = config.value_24bit_r; ptr[1] = config.value_24bit_g; ptr[2] = config.value_24bit_b; } } else if (config.fill_32bit) { // fill with 32-bit values for (u32* ptr = (u32*)start; ptr < (u32*)end; ++ptr) *ptr = config.value_32bit; } else { // fill with 16-bit values for (u16* ptr = (u16*)start; ptr < (u16*)end; ++ptr) *ptr = config.value_16bit; } LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), config.GetEndAddress()); if (!is_second_filler) { citraFireInterrupt(0x29/*GSP_GPU::InterruptId::PSC0*/); } else { citraFireInterrupt(0x28/*GSP_GPU::InterruptId::PSC1*/); } if (!citraSettingSkipGSP) VideoCore::g_renderer->hw_rasterizer->NotifyFlush(config.GetStartAddress(), config.GetEndAddress() - config.GetStartAddress()); } // Reset "trigger" flag and set the "finish" flag // NOTE: This was confirmed to happen on hardware even if "address_start" is zero. config.trigger = 0; config.finished = 1; } break; } case GPU_REG_INDEX(display_transfer_config.trigger): { const auto& config = g_regs.display_transfer_config; if (config.trigger & 1) { if (Pica::g_debug_context) Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, nullptr); u8* src_pointer = Mem_GetPhysicalPointer(config.GetPhysicalInputAddress()); u8* dst_pointer = Mem_GetPhysicalPointer(config.GetPhysicalOutputAddress()); if (config.is_texture_copy) { u32 input_width = config.texture_copy.input_width * 16; u32 input_gap = config.texture_copy.input_gap * 16; u32 output_width = config.texture_copy.output_width * 16; u32 output_gap = config.texture_copy.output_gap * 16; size_t contiguous_input_size = config.texture_copy.size / input_width * (input_width + input_gap); VideoCore::g_renderer->hw_rasterizer->NotifyPreRead(config.GetPhysicalInputAddress(), contiguous_input_size); u32 remaining_size = config.texture_copy.size; u32 remaining_input = input_width; u32 remaining_output = output_width; while (remaining_size > 0) { u32 copy_size = std::min({ remaining_input, remaining_output, remaining_size }); std::memcpy(dst_pointer, src_pointer, copy_size); src_pointer += copy_size; dst_pointer += copy_size; remaining_input -= copy_size; remaining_output -= copy_size; remaining_size -= copy_size; if (remaining_input == 0) { remaining_input = input_width; src_pointer += input_gap; } if (remaining_output == 0) { remaining_output = output_width; dst_pointer += output_gap; } } LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> 0x%08X(%u+%u), flags 0x%08X", config.texture_copy.size, config.GetPhysicalInputAddress(), input_width, input_gap, config.GetPhysicalOutputAddress(), output_width, output_gap, config.flags); size_t contiguous_output_size = config.texture_copy.size / output_width * (output_width + output_gap); VideoCore::g_renderer->hw_rasterizer->NotifyFlush(config.GetPhysicalOutputAddress(), contiguous_output_size); citraFireInterrupt(0x2C/*GSP_GPU::InterruptId::PPF*/); break; } if (config.scaling > config.ScaleXY) { LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", config.scaling.Value()); UNIMPLEMENTED(); break; } if (config.input_linear && config.scaling != config.NoScale) { LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); UNIMPLEMENTED(); break; } bool horizontal_scale = config.scaling != config.NoScale; bool vertical_scale = config.scaling == config.ScaleXY; u32 output_width = config.output_width >> horizontal_scale; u32 output_height = config.output_height >> vertical_scale; u32 input_size = config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format); u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); VideoCore::g_renderer->hw_rasterizer->NotifyPreRead(config.GetPhysicalInputAddress(), input_size); for (u32 y = 0; y < output_height; ++y) { for (u32 x = 0; x < output_width; ++x) { Math::Vec4<u8> src_color; // Calculate the [x,y] position of the input image // based on the current output position and the scale u32 input_x = x << horizontal_scale; u32 input_y = y << vertical_scale; if (config.flip_vertically) { // Flip the y value of the output data, // we do this after calculating the [x,y] position of the input image // to account for the scaling options. y = output_height - y - 1; } u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); u32 src_offset; u32 dst_offset; if (config.input_linear) { if (!config.dont_swizzle) { // Interpret the input as linear and the output as tiled u32 coarse_y = y & ~7; u32 stride = output_width * dst_bytes_per_pixel; src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + coarse_y * stride; } else { // Both input and output are linear src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; dst_offset = (x + y * output_width) * dst_bytes_per_pixel; } } else { if (!config.dont_swizzle) { // Interpret the input as tiled and the output as linear u32 coarse_y = input_y & ~7; u32 stride = config.input_width * src_bytes_per_pixel; src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + coarse_y * stride; dst_offset = (x + y * output_width) * dst_bytes_per_pixel; } else { // Both input and output are tiled u32 out_coarse_y = y & ~7; u32 out_stride = output_width * dst_bytes_per_pixel; u32 in_coarse_y = input_y & ~7; u32 in_stride = config.input_width * src_bytes_per_pixel; src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + in_coarse_y * in_stride; dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + out_coarse_y * out_stride; } } const u8* src_pixel = src_pointer + src_offset; src_color = DecodePixel(config.input_format, src_pixel); if (config.scaling == config.ScaleX) { Math::Vec4<u8> pixel = DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); src_color = ((src_color + pixel) / 2).Cast<u8>(); } else if (config.scaling == config.ScaleXY) { Math::Vec4<u8> pixel1 = DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel); Math::Vec4<u8> pixel2 = DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel); Math::Vec4<u8> pixel3 = DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel); src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); } u8* dst_pixel = dst_pointer + dst_offset; switch (config.output_format) { case Regs::PixelFormat::RGBA8: Color::EncodeRGBA8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB8: Color::EncodeRGB8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB565: Color::EncodeRGB565(src_color, dst_pixel); break; case Regs::PixelFormat::RGB5A1: Color::EncodeRGB5A1(src_color, dst_pixel); break; case Regs::PixelFormat::RGBA4: Color::EncodeRGBA4(src_color, dst_pixel); break; default: LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", config.output_format.Value()); break; } } } LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> 0x%08x(%ux%u), dst format %x, flags 0x%08X", config.output_height * output_width * GPU::Regs::BytesPerPixel(config.output_format), config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), output_width, output_height, config.output_format.Value(), config.flags); g_regs.display_transfer_config.trigger = 0; citraFireInterrupt(0x2C/*GSP_GPU::InterruptId::PPF*/); VideoCore::g_renderer->hw_rasterizer->NotifyFlush(config.GetPhysicalOutputAddress(), output_size); } break; } // Seems like writing to this register triggers processing case GPU_REG_INDEX(command_processor_config.trigger): { const auto& config = g_regs.command_processor_config; if (config.trigger & 1) { u32* buffer = (u32*)Mem_GetPhysicalPointer(config.GetPhysicalAddress()); Pica::CommandProcessor::ProcessCommandList(buffer, config.size); g_regs.command_processor_config.trigger = 0; } break; } default: break; } }
bool HybridScanExecutor::DInit() { auto status = AbstractScanExecutor::DInit(); if (!status) return false; const planner::HybridScanPlan &node = GetPlanNode<planner::HybridScanPlan>(); table_ = node.GetTable(); index_ = node.GetDataIndex(); type_ = node.GetHybridType(); PL_ASSERT(table_ != nullptr); // SEQUENTIAL SCAN if (type_ == HYBRID_SCAN_TYPE_SEQUENTIAL) { LOG_TRACE("Sequential Scan"); current_tile_group_offset_ = START_OID; table_tile_group_count_ = table_->GetTileGroupCount(); if (column_ids_.empty()) { column_ids_.resize(table_->GetSchema()->GetColumnCount()); std::iota(column_ids_.begin(), column_ids_.end(), 0); } } // INDEX SCAN else if (type_ == HYBRID_SCAN_TYPE_INDEX) { LOG_TRACE("Index Scan"); index_ = node.GetIndex(); result_itr_ = START_OID; index_done_ = false; result_.clear(); PL_ASSERT(index_ != nullptr); column_ids_ = node.GetColumnIds(); auto key_column_ids_ = node.GetKeyColumnIds(); auto expr_types_ = node.GetExprTypes(); values_ = node.GetValues(); auto runtime_keys_ = node.GetRunTimeKeys(); predicate_ = node.GetPredicate(); key_ready_ = false; if (runtime_keys_.size() != 0) { assert(runtime_keys_.size() == values_.size()); if (!key_ready_) { values_.clear(); for (auto expr : runtime_keys_) { auto value = expr->Evaluate(nullptr, nullptr, executor_context_); LOG_TRACE("Evaluated runtime scan key: %s", value.GetInfo().c_str()); values_.push_back(value.Copy()); } key_ready_ = true; } } if (table_ != nullptr) { LOG_TRACE("Column count : %u", table_->GetSchema()->GetColumnCount()); full_column_ids_.resize(table_->GetSchema()->GetColumnCount()); std::iota(full_column_ids_.begin(), full_column_ids_.end(), 0); } } // HYBRID SCAN else if (type_ == HYBRID_SCAN_TYPE_HYBRID) { LOG_TRACE("Hybrid Scan"); table_tile_group_count_ = table_->GetTileGroupCount(); int offset = index_->GetIndexedTileGroupOff(); indexed_tile_offset_ = (offset == -1) ? INVALID_OID : (oid_t)offset; block_threshold = 0; if (indexed_tile_offset_ == INVALID_OID) { current_tile_group_offset_ = START_OID; } else { current_tile_group_offset_ = indexed_tile_offset_ + 1; std::shared_ptr<storage::TileGroup> tile_group; if (current_tile_group_offset_ < table_tile_group_count_) { tile_group = table_->GetTileGroup(current_tile_group_offset_); } else { tile_group = table_->GetTileGroup(table_tile_group_count_ - 1); } oid_t tuple_id = 0; ItemPointer location(tile_group->GetTileGroupId(), tuple_id); block_threshold = location.block; } result_itr_ = START_OID; index_done_ = false; result_.clear(); column_ids_ = node.GetColumnIds(); auto key_column_ids_ = node.GetKeyColumnIds(); auto expr_types_ = node.GetExprTypes(); values_ = node.GetValues(); auto runtime_keys_ = node.GetRunTimeKeys(); predicate_ = node.GetPredicate(); if (runtime_keys_.size() != 0) { assert(runtime_keys_.size() == values_.size()); if (!key_ready_) { values_.clear(); for (auto expr : runtime_keys_) { auto value = expr->Evaluate(nullptr, nullptr, executor_context_); LOG_TRACE("Evaluated runtime scan key: %s", value.GetInfo().c_str()); values_.push_back(value.Copy()); } key_ready_ = true; } } if (table_ != nullptr) { full_column_ids_.resize(table_->GetSchema()->GetColumnCount()); std::iota(full_column_ids_.begin(), full_column_ids_.end(), 0); } } // FALLBACK else { throw Exception("Invalid hybrid scan type : " + std::to_string(type_)); } return true; }
/**************************************************************************** ** ** ** Name: EmmDeregisteredNormalService() ** ** ** ** Description: Handles the behaviour of the UE while the EMM-SAP is in ** ** EMM-DEREGISTERED.NORMAL-SERVICE state. ** ** ** ** 3GPP TS 24.301, section 5.2.2.3.1 ** ** The UE shall initiate an attach or combined attach proce- ** ** dure. ** ** ** ** Inputs: evt: The received EMM-SAP event ** ** Others: emm_fsm_status ** ** ** ** Outputs: None ** ** Return: RETURNok, RETURNerror ** ** Others: emm_fsm_status ** ** ** ***************************************************************************/ int EmmDeregisteredNormalService(const emm_reg_t *evt) { LOG_FUNC_IN; int rc = RETURNerror; assert(emm_fsm_get_status() == EMM_DEREGISTERED_NORMAL_SERVICE); switch (evt->primitive) { case _EMMREG_REGISTER_REQ: /* * The user manually re-selected a PLMN to register to */ rc = emm_fsm_set_status(EMM_DEREGISTERED_PLMN_SEARCH); if (rc != RETURNerror) { /* Process the network registration request */ rc = emm_fsm_process(evt); } break; case _EMMREG_ATTACH_INIT: /* * Initiate the attach procedure for EPS services */ rc = emm_proc_attach(EMM_ATTACH_TYPE_EPS); break; case _EMMREG_ATTACH_REQ: /* * An EPS network attach has been requested (Attach Request * message successfully delivered to the network); * enter state EMM-REGISTERED-INITIATED */ rc = emm_fsm_set_status(EMM_REGISTERED_INITIATED); break; case _EMMREG_LOWERLAYER_SUCCESS: /* * Initial NAS message has been successfully delivered * to the network */ rc = emm_proc_lowerlayer_success(); break; case _EMMREG_LOWERLAYER_FAILURE: /* * Initial NAS message failed to be delivered to the network */ rc = emm_proc_lowerlayer_failure(TRUE); break; case _EMMREG_LOWERLAYER_RELEASE: /* * NAS signalling connection has been released */ rc = emm_proc_lowerlayer_release(); break; case _EMMREG_ATTACH_CNF: /* * Attach procedure successful and default EPS bearer * context activated; * enter state EMM-REGISTERED. */ rc = emm_fsm_set_status(EMM_REGISTERED); break; default: LOG_TRACE(ERROR, "EMM-FSM - Primitive is not valid (%d)", evt->primitive); break; } LOG_FUNC_RETURN (rc); }
void add_const_vii_base::setupIOMappings( ) { int ninput_streams = 0; int noutput_streams = 0; std::vector<std::string>::iterator pname; std::string sid(""); int inMode=RealMode; if ( !validGRBlock() ) return; ninput_streams = gr_sptr->get_max_input_streams(); gr_io_signature_sptr g_isig = gr_sptr->input_signature(); noutput_streams = gr_sptr->get_max_output_streams(); gr_io_signature_sptr g_osig = gr_sptr->output_signature(); LOG_DEBUG( add_const_vii_base, "GNUHAWK IO MAPPINGS IN/OUT " << ninput_streams << "/" << noutput_streams ); // // Someone reset the GR Block so we need to clean up old mappings if they exists // we need to reset the io signatures and check the vlens // if ( _istreams.size() > 0 || _ostreams.size() > 0 ) { LOG_DEBUG( add_const_vii_base, "RESET INPUT SIGNATURE SIZE:" << _istreams.size() ); IStreamList::iterator istream; for ( int idx=0 ; istream != _istreams.end(); idx++, istream++ ) { // re-add existing stream definitons LOG_DEBUG( add_const_vii_base, "ADD READ INDEX TO GNU RADIO BLOCK"); if ( ninput_streams == -1 ) gr_sptr->add_read_index(); // setup io signature istream->associate( gr_sptr ); } LOG_DEBUG( add_const_vii_base, "RESET OUTPUT SIGNATURE SIZE:" << _ostreams.size() ); OStreamList::iterator ostream; for ( int idx=0 ; ostream != _ostreams.end(); idx++, ostream++ ) { // need to evaluate new settings...??? ostream->associate( gr_sptr ); } return; } // // Setup mapping of RH port to GNU RADIO Block input streams // For version 1, we are ignoring the GNU Radio input stream -1 case that allows multiple data // streams over a single connection. We are mapping a single RH Port to a single GNU Radio stream. // Stream Identifiers will be pass along as they are received // LOG_TRACE( add_const_vii_base, "setupIOMappings INPUT PORTS: " << inPorts.size() ); pname = inputPortOrder.begin(); for( int i=0; pname != inputPortOrder.end(); pname++ ) { // grab ports based on their order in the scd.xml file RH_ProvidesPortMap::iterator p_in = inPorts.find(*pname); if ( p_in != inPorts.end() ) { bulkio::InLongPort *port = dynamic_cast< bulkio::InLongPort * >(p_in->second); int mode = inMode; sid = ""; // need to add read index to GNU Radio Block for processing streams when max_input == -1 if ( ninput_streams == -1 ) gr_sptr->add_read_index(); // check if we received SRI during setup BULKIO::StreamSRISequence_var sris = port->activeSRIs(); if ( sris->length() > 0 ) { BULKIO::StreamSRI sri = sris[sris->length()-1]; mode = sri.mode; } std::vector<int> in; io_mapping.push_back( in ); _istreams.push_back( gr_istream< bulkio::InLongPort > ( port, gr_sptr, i, mode, sid )); LOG_DEBUG( add_const_vii_base, "ADDING INPUT MAP IDX:" << i << " SID:" << sid ); // increment port counter i++; } } // // Setup mapping of RH port to GNU RADIO Block input streams // For version 1, we are ignoring the GNU Radio output stream -1 case that allows multiple data // streams over a single connection. We are mapping a single RH Port to a single GNU Radio stream. // LOG_TRACE( add_const_vii_base, "setupIOMappings OutputPorts: " << outPorts.size() ); pname = outputPortOrder.begin(); for( int i=0; pname != outputPortOrder.end(); pname++ ) { // grab ports based on their order in the scd.xml file RH_UsesPortMap::iterator p_out = outPorts.find(*pname); if ( p_out != outPorts.end() ) { bulkio::OutLongPort *port = dynamic_cast< bulkio::OutLongPort * >(p_out->second); int idx = -1; BULKIO::StreamSRI sri = createOutputSRI( i, idx ); if (idx == -1) idx = i; if(idx < (int)io_mapping.size()) io_mapping[idx].push_back(i); int mode = sri.mode; sid = sri.streamID; _ostreams.push_back( gr_ostream< bulkio::OutLongPort > ( port, gr_sptr, i, mode, sid )); LOG_DEBUG( add_const_vii_base, "ADDING OUTPUT MAP IDX:" << i << " SID:" << sid ); _ostreams[i].setSRI(sri, i ); // increment port counter i++; } } }
bool SortedAggregator::Advance(AbstractTuple *next_tuple) { bool start_new_agg = false; // Check if we are starting a new aggregate tuple if (delegate_tuple_values_.empty()) { // No current group LOG_TRACE("Current group keys are empty!"); start_new_agg = true; } else { // Current group exists PL_ASSERT(delegate_tuple_values_.size() == num_input_columns_); // Check whether crossed group boundary for (oid_t grpColOffset = 0; grpColOffset < node->GetGroupbyColIds().size(); grpColOffset++) { common::Value lval = ( next_tuple->GetValue(node->GetGroupbyColIds()[grpColOffset])); common::Value rval = ( delegate_tuple_.GetValue(node->GetGroupbyColIds()[grpColOffset])); common::Value cmp = (lval.CompareNotEquals(rval)); bool not_equal = cmp.IsTrue(); if (not_equal) { LOG_TRACE("Group-by columns changed."); // Call helper to output the current group result if (!Helper(node, aggregates, output_table, &delegate_tuple_, this->executor_context)) { return false; } start_new_agg = true; break; } } } // If we have started a new aggregate tuple if (start_new_agg) { LOG_TRACE("Started a new group!"); // Create aggregate for (oid_t aggno = 0; aggno < node->GetUniqueAggTerms().size(); aggno++) { // Clean up previous aggregate delete aggregates[aggno]; aggregates[aggno] = GetAggInstance(node->GetUniqueAggTerms()[aggno].aggtype); bool distinct = node->GetUniqueAggTerms()[aggno].distinct; aggregates[aggno]->SetDistinct(distinct); } // Update delegate tuple values delegate_tuple_values_.clear(); for (oid_t col_id = 0; col_id < num_input_columns_; col_id++) { // delegate_tuple_values_ has the ownership common::Value val = next_tuple->GetValue(col_id); delegate_tuple_values_.push_back(val); } } // Update the aggregation calculation for (oid_t aggno = 0; aggno < node->GetUniqueAggTerms().size(); aggno++) { auto predicate = node->GetUniqueAggTerms()[aggno].expression; common::Value value = common::ValueFactory::GetIntegerValue(1); if (predicate) { value = node->GetUniqueAggTerms()[aggno].expression->Evaluate( next_tuple, nullptr, this->executor_context); } aggregates[aggno]->Advance(value); } return true; }
template < typename IN_PORT_TYPE, typename OUT_PORT_TYPE > int add_const_vii_base::_forecastAndProcess( bool &eos, typename std::vector< gr_istream< IN_PORT_TYPE > > &istreams , typename std::vector< gr_ostream< OUT_PORT_TYPE > > &ostreams ) { typedef typename std::vector< gr_istream< IN_PORT_TYPE > > _IStreamList; typedef typename std::vector< gr_ostream< OUT_PORT_TYPE > > _OStreamList; typename _OStreamList::iterator ostream; typename _IStreamList::iterator istream = istreams.begin(); int nout = 0; bool dataReady = false; if ( !eos ) { uint64_t max_items_avail = 0; for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) { LOG_TRACE( add_const_vii_base, "GET MAX ITEMS: STREAM:"<< idx << " NITEMS/SCALARS:" << istream->nitems() << "/" << istream->_data.size() ); max_items_avail = std::max( istream->nitems(), max_items_avail ); } if ( max_items_avail == 0 ) { LOG_TRACE( add_const_vii_base, "DATA CHECK - MAX ITEMS NOUTPUT/MAX_ITEMS:" << noutput_items << "/" << max_items_avail); return -1; } // // calc number of output elements based on input items available // noutput_items = 0; if ( !gr_sptr->fixed_rate() ) { noutput_items = round_down((int32_t) (max_items_avail * gr_sptr->relative_rate()), gr_sptr->output_multiple()); LOG_TRACE( add_const_vii_base, " VARIABLE FORECAST NOUTPUT == " << noutput_items ); } else { istream = istreams.begin(); for ( int i=0; istream != istreams.end(); i++, istream++ ) { int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() ); if ( gr_sptr->output_multiple_set() ) { t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple()); } if ( t_noutput_items > 0 ) { if ( noutput_items == 0 ) { noutput_items = t_noutput_items; } if ( t_noutput_items <= noutput_items ) { noutput_items = t_noutput_items; } } } LOG_TRACE( add_const_vii_base, " FIXED FORECAST NOUTPUT/output_multiple == " << noutput_items << "/" << gr_sptr->output_multiple()); } // // ask the block how much input they need to produce noutput_items... // if enough data is available to process then set the dataReady flag // int32_t outMultiple = gr_sptr->output_multiple(); while ( !dataReady && noutput_items >= outMultiple ) { // // ask the block how much input they need to produce noutput_items... // gr_sptr->forecast(noutput_items, _ninput_items_required); LOG_TRACE( add_const_vii_base, "--> FORECAST IN/OUT " << _ninput_items_required[0] << "/" << noutput_items ); istream = istreams.begin(); uint32_t dr_cnt=0; for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) { // check if buffer has enough elements _input_ready[idx] = false; if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) { _input_ready[idx] = true; dr_cnt++; } LOG_TRACE( add_const_vii_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" << istream->nelems() << "/" << istream->nitems() << "/" << _ninput_items_required[idx] << "/" << _input_ready[idx]); } if ( dr_cnt < istreams.size() ) { if ( outMultiple > 1 ) { noutput_items -= outMultiple; } else { noutput_items /= 2; } } else { dataReady = true; } LOG_TRACE( add_const_vii_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady ); } // check if data is ready... if ( !dataReady ) { LOG_TRACE( add_const_vii_base, "DATA CHECK - NOT ENOUGH DATA AVAIL/REQ:" << _istreams[0].nitems() << "/" << _ninput_items_required[0] ); return -1; } // reset looping variables int ritems = 0; int nitems = 0; // reset caching vectors _output_items.clear(); _input_items.clear(); _ninput_items.clear(); istream = istreams.begin(); for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) { // check if the stream is ready if ( !_input_ready[idx] ) { continue; } // get number of items remaining try { ritems = gr_sptr->nitems_read( idx ); } catch(...){ // something bad has happened, we are missing an input stream LOG_ERROR( add_const_vii_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" << istream->streamID ); return -2; } nitems = istream->nitems() - ritems; LOG_TRACE( add_const_vii_base, " ISTREAM: IDX:" << idx << " ITEMS AVAIL/READ/REQ " << nitems << "/" << ritems << "/" << _ninput_items_required[idx] ); if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) { //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0; _ninput_items.push_back( nitems ); _input_items.push_back( (const void *) (istream->read_pointer(ritems)) ); } } // // setup output buffer vector based on noutput.. // ostream = ostreams.begin(); for( ; ostream != ostreams.end(); ostream++ ) { ostream->resize(noutput_items); _output_items.push_back((void*)(ostream->write_pointer()) ); } nout=0; if ( _input_items.size() != 0 && serviceThread->threadRunning() ) { LOG_TRACE( add_const_vii_base, " CALLING WORK.....N_OUT:" << noutput_items << " N_IN:" << nitems << " ISTREAMS:" << _input_items.size() << " OSTREAMS:" << _output_items.size()); nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items); LOG_TRACE( add_const_vii_base, "RETURN WORK ..... N_OUT:" << nout); } // check for stop condition from work method if ( nout < gr_block::WORK_DONE ) { LOG_WARN( add_const_vii_base, "WORK RETURNED STOP CONDITION..." << nout ); nout=0; eos = true; } } if (nout != 0 or eos ) { noutput_items = nout; LOG_TRACE( add_const_vii_base, " WORK RETURNED: NOUT : " << nout << " EOS:" << eos); ostream = ostreams.begin(); typename IN_PORT_TYPE::dataTransfer *pkt=NULL; for ( int idx=0 ; ostream != ostreams.end(); idx++, ostream++ ) { pkt=NULL; int inputIdx = idx; if ( (size_t)(inputIdx) >= istreams.size() ) { for ( inputIdx= istreams.size()-1; inputIdx > -1; inputIdx--) { if ( istreams[inputIdx].pkt != NULL ) { pkt = istreams[inputIdx].pkt; break; } } } else { pkt = istreams[inputIdx].pkt; } LOG_TRACE( add_const_vii_base, "PUSHING DATA ITEMS/STREAM_ID " << ostream->nitems() << "/" << ostream->streamID ); if ( _maintainTimeStamp ) { // set time stamp for output samples based on input time stamp if ( ostream->nelems() == 0 ) { #ifdef TEST_TIME_STAMP LOG_DEBUG( add_const_vii_base, "SEED - TS SRI: xdelta:" << std::setprecision(12) << ostream->sri.xdelta ); LOG_DEBUG( add_const_vii_base, "OSTREAM WRITE: maint:" << _maintainTimeStamp ); LOG_DEBUG( add_const_vii_base, " mode:" << ostream->tstamp.tcmode ); LOG_DEBUG( add_const_vii_base, " status:" << ostream->tstamp.tcstatus ); LOG_DEBUG( add_const_vii_base, " offset:" << ostream->tstamp.toff ); LOG_DEBUG( add_const_vii_base, " whole:" << std::setprecision(10) << ostream->tstamp.twsec ); LOG_DEBUG( add_const_vii_base, "SEED - TS frac:" << std::setprecision(12) << ostream->tstamp.tfsec ); #endif ostream->setTimeStamp( pkt->T, _maintainTimeStamp ); } // write out samples, and set next time stamp based on xdelta and noutput_items ostream->write ( noutput_items, eos ); } else { // use incoming packet's time stamp to forward if ( pkt ) { #ifdef TEST_TIME_STAMP LOG_DEBUG( add_const_vii_base, "OSTREAM SRI: items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta ); LOG_DEBUG( add_const_vii_base, "PKT - TS maint:" << _maintainTimeStamp ); LOG_DEBUG( add_const_vii_base, " mode:" << pkt->T.tcmode ); LOG_DEBUG( add_const_vii_base, " status:" << pkt->T.tcstatus ); LOG_DEBUG( add_const_vii_base, " offset:" << pkt->T.toff ); LOG_DEBUG( add_const_vii_base, " whole:" << std::setprecision(10) << pkt->T.twsec ); LOG_DEBUG( add_const_vii_base, "PKT - TS frac:" << std::setprecision(12) << pkt->T.tfsec ); #endif ostream->write( noutput_items, eos, pkt->T ); } else { #ifdef TEST_TIME_STAMP LOG_DEBUG( add_const_vii_base, "OSTREAM SRI: items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta ); LOG_DEBUG( add_const_vii_base, "OSTREAM TOD maint:" << _maintainTimeStamp ); LOG_DEBUG( add_const_vii_base, " mode:" << ostream->tstamp.tcmode ); LOG_DEBUG( add_const_vii_base, " status:" << ostream->tstamp.tcstatus ); LOG_DEBUG( add_const_vii_base, " offset:" << ostream->tstamp.toff ); LOG_DEBUG( add_const_vii_base, " whole:" << std::setprecision(10) << ostream->tstamp.twsec ); LOG_DEBUG( add_const_vii_base, "OSTREAM TOD frac:" << std::setprecision(12) << ostream->tstamp.tfsec ); #endif // use time of day as time stamp ostream->write( noutput_items, eos, _maintainTimeStamp ); } } } // for ostreams } return nout; }
shared_ptr<Node> ClumpData::makeClump(const vector<shared_ptr<Node>>& nn, shared_ptr<Node> centralNode, bool intersecting){ if(nn.empty()) throw std::runtime_error("ClumpData::makeClump: 0 nodes."); /* TODO? check that nodes are unique */ shared_ptr<ClumpData> clump; auto cNode=(centralNode?centralNode:make_shared<Node>()); if(!centralNode || !centralNode->hasData<DemData>()){ clump=make_shared<ClumpData>(); clump->setClump(); cNode->setData<DemData>(clump); }else{ if(!centralNode->getData<DemData>().isA<ClumpData>()) throw std::runtime_error("ClumpData::makeClump: centralNode has DemData attached, but must have a ClumpData attached instead."); clump=static_pointer_cast<ClumpData>(centralNode->getDataPtr<DemData>()); clump->setClump(); } size_t N=nn.size(); if(N==1){ LOG_DEBUG("Treating 1-clump specially."); cNode->pos=nn[0]->pos; cNode->ori=nn[0]->ori; clump->nodes.push_back(nn[0]); auto& d0=nn[0]->getData<DemData>(); d0.setClumped(cNode); clump->relPos.push_back(Vector3r::Zero()); clump->relOri.push_back(Quaternionr::Identity()); clump->mass=d0.mass; clump->inertia=d0.inertia; clump->equivRad=(clump->inertia.array()/clump->mass).sqrt().mean(); return cNode; } /* clumps with more than one particle */ if(intersecting) woo::NotImplementedError("Self-intersecting clumps not yet implemented; use SphereClumpGeom instead."); double M=0; // mass Vector3r Sg=Vector3r::Zero(); // static moment, for getting clump's centroid Matrix3r Ig=Matrix3r::Zero(); // tensors of inertia; is upper triangular // first loop: compute clump's centroid and principal orientation for(const auto& n: nn){ const auto& dem=n->getData<DemData>(); if(dem.isClumped()) woo::RuntimeError("Node "+lexical_cast<string>(n)+": already clumped."); if(dem.parRef.empty()) woo::RuntimeError("Node "+lexical_cast<string>(n)+": back-references (demData.parRef) empty (Node does not belong to any particle)"); M+=dem.mass; Sg+=dem.mass*n->pos; Ig+=woo::Volumetric::inertiaTensorTranslate(woo::Volumetric::inertiaTensorRotate(dem.inertia.asDiagonal(),n->ori.conjugate()),dem.mass,-1.*n->pos); } if(M>0){ woo::Volumetric::computePrincipalAxes(M,Sg,Ig,cNode->pos,cNode->ori,clump->inertia); clump->mass=M; clump->equivRad=(clump->inertia.array()/clump->mass).sqrt().mean(); } else { // nodes have no mass; in that case, centralNode is used if(!centralNode) throw std::runtime_error("ClumpData::makeClump: no nodes with mass, therefore centralNode must be given, of which position will be used instead"); clump->equivRad=NaN; // do not touch clump->mass or clump->inertia here // if centralNode was given, it is user-provided } // block massless nodes (unless node was given by the user) if(!centralNode && !(clump->mass>0)) clump->setBlockedAll(); clump->nodes.reserve(N); clump->relPos.reserve(N); clump->relOri.reserve(N); for(size_t i=0; i<N; i++){ const auto& n=nn[i]; auto& dem=n->getData<DemData>(); clump->nodes. push_back(n); clump->relPos.push_back(cNode->ori.conjugate()*(n->pos-cNode->pos)); clump->relOri.push_back(cNode->ori.conjugate()*n->ori); #ifdef WOO_DEBUG AngleAxisr aa(*(clump->relOri.rbegin())); #endif LOG_TRACE("relPos="<<clump->relPos.rbegin()->transpose()<<", relOri="<<aa.axis()<<":"<<aa.angle()); dem.setClumped(cNode); } return cNode; }
cid_t SimpleCheckpoint::DoRecovery() { // No checkpoint to recover from if (checkpoint_version < 0) { return 0; } // we open checkpoint file in read + binary mode std::string file_name = ConcatFileName(checkpoint_dir, checkpoint_version); bool success = LoggingUtil::InitFileHandle(file_name.c_str(), file_handle_, "rb"); if (!success) { return 0; } auto size = LoggingUtil::GetLogFileSize(file_handle_); PL_ASSERT(size > 0); file_handle_.size = size; bool should_stop = false; cid_t commit_id = 0; while (!should_stop) { auto record_type = LoggingUtil::GetNextLogRecordType(file_handle_); switch (record_type) { case LOGRECORD_TYPE_WAL_TUPLE_INSERT: { LOG_TRACE("Read checkpoint insert entry"); InsertTuple(commit_id); break; } case LOGRECORD_TYPE_TRANSACTION_COMMIT: { should_stop = true; break; } case LOGRECORD_TYPE_TRANSACTION_BEGIN: { LOG_TRACE("Read checkpoint begin entry"); TransactionRecord txn_rec(record_type); if (LoggingUtil::ReadTransactionRecordHeader(txn_rec, file_handle_) == false) { LOG_ERROR("Failed to read checkpoint begin entry"); return false; } commit_id = txn_rec.GetTransactionId(); break; } default: { LOG_ERROR("Invalid checkpoint entry"); should_stop = true; break; } } } // After finishing recovery, set the next oid with maximum oid // observed during the recovery auto &manager = catalog::Manager::GetInstance(); if (max_oid_ > manager.GetNextOid()) { manager.SetNextOid(max_oid_); } // FIXME this is not thread safe for concurrent checkpoint recovery concurrency::TransactionManagerFactory::GetInstance().SetNextCid(commit_id); CheckpointManager::GetInstance().SetRecoveredCid(commit_id); return commit_id; }
void TileGroupHeader::PrintVisibility(txn_id_t txn_id, cid_t at_cid) { oid_t active_tuple_slots = GetCurrentNextTupleSlot(); std::stringstream os; os << "\t-----------------------------------------------------------\n"; for (oid_t header_itr = 0; header_itr < active_tuple_slots; header_itr++) { bool own = (txn_id == GetTransactionId(header_itr)); bool activated = (at_cid >= GetBeginCommitId(header_itr)); bool invalidated = (at_cid >= GetEndCommitId(header_itr)); txn_id_t txn_id = GetTransactionId(header_itr); cid_t beg_commit_id = GetBeginCommitId(header_itr); cid_t end_commit_id = GetEndCommitId(header_itr); bool insert_commit = GetInsertCommit(header_itr); bool delete_commit = GetDeleteCommit(header_itr); int width = 10; os << "\tslot :: " << std::setw(width) << header_itr; os << " txn id : "; if (txn_id == MAX_TXN_ID) os << std::setw(width) << "MAX_TXN_ID"; else os << std::setw(width) << txn_id; os << " beg cid : "; if (beg_commit_id == MAX_CID) os << std::setw(width) << "MAX_CID"; else os << std::setw(width) << beg_commit_id; os << " end cid : "; if (end_commit_id == MAX_CID) os << std::setw(width) << "MAX_CID"; else os << std::setw(width) << end_commit_id; os << " insert commit : "; if (insert_commit == true) os << "O"; else os << "X"; os << " delete commit : "; if (delete_commit == true) os << "O"; else os << "X"; peloton::ItemPointer location = GetNextItemPointer(header_itr); os << " prev : " << "[ " << location.block << " , " << location.offset << " ]"; //<< os << " own : " << own; os << " activated : " << activated; os << " invalidated : " << invalidated << " "; // Visible iff past Insert || Own Insert if ((!own && activated && !invalidated) || (own && !activated && !invalidated)) os << "\t\t[ true ]\n"; else os << "\t\t[ false ]\n"; } os << "\t-----------------------------------------------------------\n"; LOG_TRACE("%s", os.str().c_str()); }