void UnitsClassWidget::linkToDatastoreItem(const std::list<openfluid::fluidx::DatastoreItemDescriptor*>& DSList) { mp_LayerSource = nullptr; // disconnect signal to avoid multiple refresh of map, // due to changeVisible slot disconnect(ui->VisibleCheckBox,SIGNAL(toggled(bool)),this,SLOT(changeVisible())); if (!DSList.empty()) { mp_LayerSource = DSList.front(); ui->LayerSourceLabel->setText(QDir::toNativeSeparators(QString::fromStdString(mp_LayerSource->getRelativePath()))); ui->StyleParamsWidget->setEnabled(true); } else { ui->LayerSourceLabel->setText(tr("(no layer to display)")); ui->StyleParamsWidget->setEnabled(false); ui->VisibleCheckBox->setChecked(false); } connect(ui->VisibleCheckBox,SIGNAL(toggled(bool)),this,SLOT(changeVisible())); }
void hpc_logger::write_buffer_list(std::list<buffer_info>& llist) { while (!llist.empty()) { buffer_info new_buffer_info = llist.front(); llist.pop_front(); if (_current_log_file_bytes + new_buffer_info.buffer_size >= MAX_FILE_SIZE) { _current_log->close(); delete _current_log; _current_log = nullptr; create_log_file(); } _current_log->write(new_buffer_info.buffer, new_buffer_info.buffer_size); _current_log_file_bytes += new_buffer_info.buffer_size; free(new_buffer_info.buffer); } }
// Returns true if anything was executed. bool __RunOnePendingInterrupt() { if (inInterrupt || !interruptsEnabled) { // Already in an interrupt! We'll keep going when it's done. return false; } // Can easily prioritize between different kinds of interrupts if necessary. retry: if (pendingInterrupts.size()) { // If we came from CoreTiming::Advance(), we might've come from a waiting thread's callback. // To avoid "injecting" return values into our saved state, we context switch here. __KernelSwitchOffThread("interrupt"); PendingInterrupt pend = pendingInterrupts.front(); SubIntrHandler *handler = intrHandlers[pend.intr].get(pend.subintr); if (handler == NULL) { WARN_LOG(HLE, "Ignoring interrupt, already been released."); pendingInterrupts.pop_front(); goto retry; } intState.save(); handler->copyArgsToCPU(pend); currentMIPS->r[MIPS_REG_RA] = __KernelInterruptReturnAddress(); inInterrupt = true; return true; } else { // DEBUG_LOG(HLE, "No more interrupts!"); return false; } }
void* GetConnect() { void* con; EnterCriticalSection(&secLock); if(connLists.size()>0)//连接池容器中还有连接 { con=connLists.front();//得到第一个连接 connLists.pop_front();//移除第一个连接 if(YTConn_GetStatus(con) < 0)//如果连接已经被关闭,删除后重新建立一个 { YTConn_Close(con); YTConn_Connect(con); } LeaveCriticalSection(&secLock); return con; } else { if(iCurConnects< imaxConnects) {//还可以创建新的连接 con= YTConn_NewConn(ptrConfig); if (YTConn_Connect(con) < 0) { YTConn_DelConn(con); con = NULL; } LeaveCriticalSection(&secLock); return con; } else {//建立的连接数已经达到maxSize LeaveCriticalSection(&secLock); return NULL; } } }
/** * @brief Move half the population from the first Cell to the last Cell of cellPath * @details It should be noted that movePopulation() check wether it's legal to do that move before * * @param cellPath a list of pointer to Hexacell, all HexaCell should be adjacent * @param playerID the Id of the player trying to do that move */ void MainWindow::movePopulation(std::list<HexaCell*> cellPath, int playerID ) { HexaCell* startCell = cellPath.front(); HexaCell* endCell = cellPath.back(); int popToMove = (startCell)->getPopulation() / 2; if (playerID != startCell->getPlayerID()){ return; } if (endCell->getPlayerID() == playerID) { startCell->decPopulation(popToMove); endCell->incPopulation(popToMove); } else{ if (endCell->getPopulation() > popToMove){ startCell->decPopulation(popToMove); endCell->decPopulation(popToMove); } else{ startCell->decPopulation(popToMove); endCell->setPopulation(popToMove - endCell->getPopulation()); if (endCell->getPlayerID() == 1) hexaCellBoard->removePlayerCell(endCell); else if (endCell->getPlayerID() == 2) hexaCellBoard->removeBotCell(endCell); if (playerID == 1) hexaCellBoard->addPlayerCell(endCell); if (playerID == 2) hexaCellBoard->addBotCell(endCell); endCell->setPlayerID(playerID); } } startCell->update(); endCell->update(); }
virtual void AddNodeStmt(Node* n, const AstNodePtr& s) { PtrAnal::StmtRef s_stmts = m.translate_stmt(s); if (s_stmts.size() == 0) { if (fa.IsStatement(s)) return; PtrAnal::Stmt s_exp = m.translate_exp(s).stmt; assert(s_exp != 0); s_stmts.push_back(s_exp); } if (n->size()) { m.contrl_flow(n->back(), s_stmts.front(), CFGConfig::ALWAYS); } else if (n == &nodes.front()) m.contrl_flow(defn, s_stmts.front(), CFGConfig::ALWAYS); PtrAnal::StmtRef::const_iterator p = s_stmts.begin(); n->push_back(*p); for (++p; p != s_stmts.end(); ++p) { PtrAnal::Stmt cur = *p; m.contrl_flow(n->back(), cur, CFGConfig::ALWAYS); n->push_back(cur); } std::map<Node*, EdgeInfo>::iterator p_pending = pending_in.find(n); if (p_pending != pending_in.end()) { EdgeInfo& cur = (*p_pending).second; for (std::list<std::pair<Node*,EdgeType> >::const_iterator p_edge=cur.begin(); p_edge != cur.end(); ++p_edge) { Node* n1 = (*p_edge).first; if (n1->size()) m.contrl_flow( n1->back(), n->front(), (*p_edge).second); else pending_out[n1].push_back(std::pair<Node*,EdgeType>(n, (*p_edge).second)); } pending_in.erase(p_pending); } }
void CLogRequest::MassQueue(std::list<std::string>& queue ) { while (!queue.empty()) { std::string event = queue.front(); queue.pop_front(); if(mData.size() == 0) { mData = event; } else { mData += "~" + event; if ( mData.length() > 300) { Send(); CLogRequest newRequest(mTrackUrl); newRequest.MassQueue(queue); return; } } } Send(); }
void test_with_empty_headers() { multipartparser parser; #define BOUNDARY "boundary" #define BODY \ "--" BOUNDARY "\r\n" \ "\r\n" \ "This is implicitly typed plain ASCII text.\r\n" \ "It does NOT end with a linebreak." \ "\r\n--" BOUNDARY "--\r\n" init_globals(); multipartparser_init(&parser, BOUNDARY); assert(multipartparser_execute(&parser, &g_callbacks, BODY, strlen(BODY)) == strlen(BODY)); assert(g_body_begin_called); assert(g_parts.size() == 1); assert(g_parts.front().headers.empty()); assert(g_body_end_called); #undef BOUNDARY #undef BODY }
// SIGINT alarm handler: alarm set by entity handler. Does // slow response when fired void AlarmHandler(int sig) { if (sig == SIGALRM) { OC_LOG (INFO, TAG, "Server starting slow response"); if (gRequestList.empty()) { OC_LOG (INFO, TAG, "No requests to service"); return; } // Get the request from the list OCEntityHandlerRequest *entityHandlerRequest = gRequestList.front(); gRequestList.pop_front(); if (entityHandlerRequest->method == OC_REST_GET) { OC_LOG (INFO, TAG, "Received OC_REST_GET from client"); ProcessGetRequest (entityHandlerRequest); } else { OC_LOG_V (INFO, TAG, "Received unsupported method %d from client", entityHandlerRequest->method); } // Free the request OCFree(entityHandlerRequest->query); OCFree(entityHandlerRequest->reqJSONPayload); OCFree(entityHandlerRequest); // If there are more requests in list, re-arm the alarm signal if (gRequestList.empty()) { alarm(SLOW_RESPONSE_DELAY_SEC); } } }
void _MaxSizeLogAppender::roll() { std::stringstream newPath_stream; newPath_stream << context.path << "." << getCurrDateTime(); std::stringstream postfix_stream; postfix_stream << "(" << currBackupCount++ << ")"; std::string newPath = rename(newPath_stream.str().c_str(), postfix_stream.str().c_str()); backupList.push_back(newPath); if (backupList.size() > (size_t) maxBackupCount) { std::string remove_path = backupList.front(); backupList.pop_front(); int e = remove(remove_path.c_str()); if (e != 0) { perror("remove"); } } }
char *send_command(zmq::socket_t &sock, std::list<Value> ¶ms) { if (params.size() == 0) return 0; Value cmd_val = params.front(); params.pop_front(); std::string cmd = cmd_val.asString(); char *msg = MessageEncoding::encodeCommand(cmd, ¶ms); {FileLogger fl(program_name); fl.f() << "sending: " << msg << "\n"; } if (options.verbose) std::cout << " sending: " << msg << "\n"; sendMessage(sock, msg); size_t size = strlen(msg); free(msg); {FileLogger fl(program_name); fl.f() << "getting reply:\n"; } zmq::message_t reply; if (sock.recv(&reply)) { {FileLogger fl(program_name); fl.f() << "got reply:\n"; } size = reply.size(); char *data = (char *)malloc(size+1); memcpy(data, reply.data(), size); data[size] = 0; return data; } return 0; }
/* Socket dapat ditulis */ void write_cb(ev::io &watcher) { if (write_queue.empty()) { io.set(ev::READ); return; } Buffer* buffer = write_queue.front(); ssize_t written = write(watcher.fd, buffer->dpos(), buffer->nbytes()); if (written < 0) { perror("read error"); return; } buffer->pos += written; if (buffer->nbytes() == 0) { write_queue.pop_front(); delete buffer; } /* Tutup dan bebaskan watcher saat selesai mengirim response (stateless) */ io.stop(); close(sfd); }
void performFilling() { Range *range = generateRangeAndReplaceColor(start_point.y, start_point.x, UP); ranges.push_back(range); ranges.push_back(new Range(range->line, range->start, range->end, DOWN)); int row; while (!ranges.empty()) { range = ranges.front(); if (range->direction == UP) { row = range->line - 1; if (row >= 0) { checkRangeAndGenerateNewRanges(range, row, UP); } } else { row = range->line + 1; if (row < y_size) { checkRangeAndGenerateNewRanges(range, row, DOWN); } } ranges.pop_front(); delete(range); } }
void matchRecord::getWrstBattPship(std::list<playerInnsRecord> &b1, std::list<playerInnsRecord> &b2 ) const { for (int i = 0; i < nPlayersPerTeam ; i += 2) { // test whether this batting pship is worse than the worst seen so far if ( battingRec[i].runsScored + battingRec[i + 1].runsScored < b1.front().runsScored + b2.front().runsScored ) { // remove all but one record from both lists while (b1.size() > 1) b1.pop_back(); while (b2.size() > 1) b2.pop_back(); // update remaining records b1.front() = playerInnsRecord(battingRec[i].runsScored, battingRec[i].batsmansName, teamName, date ); b2.front() = playerInnsRecord(battingRec[i + 1].runsScored, battingRec[i + 1].batsmansName, teamName, date ); } else // test whether this batting pship is equal to the worst seen so far if ( battingRec[i].runsScored + battingRec[i + 1].runsScored == b1.front().runsScored + b2.front().runsScored ) { // add record to list b1.push_back(playerInnsRecord(battingRec[i].runsScored, battingRec[i].batsmansName, teamName, date )); b2.push_back(playerInnsRecord(battingRec[i + 1].runsScored, battingRec[i + 1].batsmansName, teamName, date )); } } }
/** \copydoc Option::proceed */ void proceed (const std::list<std::string>& args, IProperties& props) { props.add (0, getName(), args.front()); }
CHECK_TEST_CONDITION(r); CHECK_EQ(2, c.get_alternative_blocks_count()); // Some blocks that were in main chain are in alt chain now BOOST_FOREACH(block b, alt_blocks) { CHECK_TEST_CONDITION(m_chain_1.end() != std::find(m_chain_1.begin(), m_chain_1.end(), b)); } std::vector<cryptonote::block> chain; map_hash2tx_t mtx; r = find_block_chain(events, chain, mtx, get_block_hash(blocks.back())); CHECK_TEST_CONDITION(r); CHECK_EQ(MK_COINS(8), get_balance(m_recipient_account_1, chain, mtx)); CHECK_EQ(MK_COINS(3), get_balance(m_recipient_account_2, chain, mtx)); CHECK_EQ(MK_COINS(14), get_balance(m_recipient_account_3, chain, mtx)); CHECK_EQ(MK_COINS(16), get_balance(m_recipient_account_4, chain, mtx)); std::list<transaction> tx_pool; c.get_pool_transactions(tx_pool); CHECK_EQ(1, tx_pool.size()); CHECK_TEST_CONDITION(!(tx_pool.front() == m_tx_pool.front())); std::vector<size_t> tx_outs; uint64_t transfered; lookup_acc_outs(m_recipient_account_2.get_keys(), tx_pool.front(), tx_outs, transfered); CHECK_EQ(MK_COINS(7), transfered); return true; }
bool listPointsEqual(std::list<Point2f> l1, std::list<Point2f> l2) { return l1.front() == l2.front(); }
void Vimpc::Run(std::string hostname, uint16_t port) { int input = ERR; // Keyboard input event handler Vimpc::EventHandler(Event::Input, [&input] (EventData const & Data) { input = Data.input; }); // Refresh the mode after a status update Vimpc::EventHandler(Event::StatusUpdate, [this] (EventData const & Data) { if (screen_.PagerIsVisible() == false) { Ui::Mode & mode = assert_reference(modeTable_[currentMode_]); mode.Refresh(); } }); // Set up the display { Ui::Mode & mode = assert_reference(modeTable_[currentMode_]); mode.Initialise(0); } SetSkipConfigConnects((hostname != "") || (port != 0)); // Parse the config file commandMode_.SetQueueCommands(true); bool const configExecutionResult = Config::ExecuteConfigCommands(commandMode_); SetSkipConfigConnects(false); screen_.Start(); if (configExecutionResult == true) { // If we didn't connect to a host from the config file, just connect to the default if (commandMode_.ConnectionAttempt() == false) { client_.Connect(hostname, port); } screen_.Update(); commandMode_.SetQueueCommands(false); // The main loop while (Running == true) { screen_.UpdateErrorDisplay(); { UniqueLock<Mutex> Lock(QueueMutex); if ((Queue.empty() == false) || (ConditionWait(Condition, Lock, 100) != false)) { if (Queue.empty() == false) { EventPair const Event = Queue.front(); Queue.pop_front(); Lock.unlock(); if ((userEvents_ == false) && (Event.second.user == true)) { Debug("Discarding user event"); continue; } for (auto func : Handler[Event.first]) { func(Event.second); } EventMutex.lock(); for (auto cond : WaitConditions[Event.first]) { cond->notify_all(); } EventMutex.unlock(); Debug("Event triggered: " + EventStrings::Default[Event.first]); } } } if (input != ERR) { screen_.ClearErrorDisplay(); if ((screen_.PagerIsVisible() == true) #ifdef HAVE_MOUSE_SUPPORT && (input != KEY_MOUSE) #endif ) { if (screen_.PagerIsFinished() == true) { screen_.HidePagerWindow(); } else { screen_.PagerWindowNext(); } } else { Handle(input); } } bool const Resize = screen_.Resize(); QueueMutex.lock(); if (((input != ERR) || (Resize == true)) || (requireRepaint_ == true)) { QueueMutex.unlock(); Repaint(); } else { QueueMutex.unlock(); } input = ERR; } } }
const char *PeekNext() const { return IsEmpty() ? NULL : args.front(); }
Cord getFront(void){ return cords.front(); }
bool stack_compare(const std::list<item> &lhs, const std::list<item> &rhs) { return lhs.front() < rhs.front(); }
void ExecutionPath::checkScope(const Token *tok, std::list<ExecutionPath *> &checks) { if (!tok || tok->str() == "}" || checks.empty()) return; const std::auto_ptr<ExecutionPath> check(checks.front()->copy()); for (; tok; tok = tok->next()) { // might be a noreturn function.. if (Token::simpleMatch(tok->tokAt(-2), ") ; }") && Token::Match(tok->linkAt(-2)->tokAt(-2), "[;{}] %var% (") && tok->linkAt(-2)->previous()->varId() == 0) { ExecutionPath::bailOut(checks); return; } if (Token::simpleMatch(tok, "union {")) { tok = tok->next()->link(); continue; } if (tok->str() == "}") return; if (tok->str() == "break") { ExecutionPath::bailOut(checks); return; } if (Token::simpleMatch(tok, "while (")) { // parse condition if (checks.size() > 10 || check->parseCondition(*tok->tokAt(2), checks)) { ExecutionPath::bailOut(checks); return; } // skip "while (fgets()!=NULL)" if (Token::simpleMatch(tok, "while ( fgets (")) { const Token *tok2 = tok->linkAt(3); if (Token::simpleMatch(tok2, ") ) {")) { tok = tok2->linkAt(2); if (!tok) break; continue; } } } // goto/setjmp/longjmp => bailout else if (Token::Match(tok, "goto|setjmp|longjmp")) { ExecutionPath::bailOut(checks); return; } // ?: => bailout if (tok->str() == "?") { for (const Token *tok2 = tok; tok2 && tok2->str() != ";"; tok2 = tok2->next()) { if (tok2->varId() > 0) ExecutionPath::bailOutVar(checks, tok2->varId()); } } // for/while/switch/do .. bail out else if (Token::Match(tok, "for|while|switch|do")) { // goto { const Token *tok2 = tok->next(); if (tok2 && tok2->str() == "(") tok2 = tok2->link(); if (tok2 && tok2->str() == ")") tok2 = tok2->next(); if (!tok2 || tok2->str() != "{") { ExecutionPath::bailOut(checks); return; } if (tok->str() == "switch") { // parse condition if (checks.size() > 10 || check->parseCondition(*tok->next(), checks)) { ExecutionPath::bailOut(checks); return; } // what variable ids should the if be counted for? std::set<unsigned int> countif; std::list<ExecutionPath *> newchecks; for (const Token* tok3 = tok2->next(); tok3; tok3 = tok3->next()) { if (tok3->str() == "{") tok3 = tok3->link(); else if (tok3->str() == "}") break; else if (tok3->str() == "case" && !Token::Match(tok3, "case %num% : ; case")) { parseIfSwitchBody(tok3, checks, newchecks, countif); } } // Add newchecks to checks.. std::copy(newchecks.begin(), newchecks.end(), std::back_inserter(checks)); // Increase numberOfIf std::list<ExecutionPath *>::iterator it; for (it = checks.begin(); it != checks.end(); ++it) { if (countif.find((*it)->varId) != countif.end()) (*it)->numberOfIf++; } } // no switch else { for (const Token *tok3 = tok; tok3 && tok3 != tok2; tok3 = tok3->next()) { if (tok3->varId()) ExecutionPath::bailOutVar(checks, tok3->varId()); } // it is not certain that a for/while will be executed: for (std::list<ExecutionPath *>::iterator it = checks.begin(); it != checks.end();) { if ((*it)->numberOfIf > 0) { delete *it; checks.erase(it++); } else ++it; } // #2231 - loop body only contains a conditional initialization.. if (Token::simpleMatch(tok2->next(), "if (")) { // Start { for the if block const Token *tok3 = tok2->linkAt(2); if (Token::simpleMatch(tok3,") {")) { tok3 = tok3->next(); // End } for the if block const Token *tok4 = tok3->link(); if (Token::Match(tok3, "{ %var% =") && Token::simpleMatch(tok4, "} }") && Token::simpleMatch(tok4->tokAt(-2), "break ;")) { // Is there a assignment and then a break? const Token *t = Token::findsimplematch(tok3, ";"); if (t && t->tokAt(3) == tok4) { for (std::list<ExecutionPath *>::iterator it = checks.begin(); it != checks.end(); ++it) { if ((*it)->varId == tok3->next()->varId()) { (*it)->numberOfIf++; break; } } tok = tok2->link(); continue; } } } } // parse loop bodies check->parseLoopBody(tok2->next(), checks); } // skip { .. } tok2 = tok2->link(); // if "do { .. } while ( .." , goto end of while.. if (Token::simpleMatch(tok, "do {") && Token::simpleMatch(tok2, "} while (")) tok2 = tok2->linkAt(2); // bail out all variables if the scope contains a "return" // bail out all variables used in this for/while/switch/do for (; tok && tok != tok2; tok = tok->next()) { if (tok->str() == "return") ExecutionPath::bailOut(checks); if (tok->varId()) ExecutionPath::bailOutVar(checks, tok->varId()); } continue; } // bailout used variables in '; FOREACH ( .. ) { .. }' else if (tok->str() != "if" && Token::Match(tok->previous(), "[;{}] %var% (")) { // goto { const Token *tok2 = tok->next()->link()->next(); if (tok2 && tok2->str() == "{") { // goto "}" tok2 = tok2->link(); // bail out all variables used in "{ .. }" for (; tok && tok != tok2; tok = tok->next()) { if (tok->varId()) ExecutionPath::bailOutVar(checks, tok->varId()); } if (!tok) break; } } // .. ) { ... } => bail out if (tok->str() == ")" && tok->next() && tok->next()->str() == "{") { ExecutionPath::bailOut(checks); return; } if ((tok->str() == "abort" || tok->str() == "exit") && tok->next() && tok->next()->str() == "(") { ExecutionPath::bailOut(checks); return; } // don't parse into "struct type { .." if (Token::Match(tok, "struct|union|class %type% {|:")) { while (tok && tok->str() != "{" && tok->str() != ";") tok = tok->next(); tok = tok ? tok->link() : 0; if (!tok) { ExecutionPath::bailOut(checks); return; } } if (Token::simpleMatch(tok, "= {")) { // GCC struct initialization.. bail out if (Token::Match(tok->tokAt(2), ". %var% =")) { ExecutionPath::bailOut(checks); return; } const Token * const end = tok->next()->link(); while (tok && tok != end) { if (Token::Match(tok, "[{,] & %var% [,}]")) ExecutionPath::bailOutVar(checks, tok->tokAt(2)->varId()); tok = tok->next(); } if (!tok) { ExecutionPath::bailOut(checks); return; } continue; } // ; { ... } if (Token::Match(tok->previous(), "[;{}:] {")) { ExecutionPath::checkScope(tok->next(), checks); tok = tok->link(); continue; } if (tok->str() == "if" && tok->next() && tok->next()->str() == "(") { // what variable ids should the numberOfIf be counted for? std::set<unsigned int> countif; std::list<ExecutionPath *> newchecks; while (tok->str() == "if" && tok->next() && tok->next()->str() == "(") { // goto "(" tok = tok->next(); // parse condition if (checks.size() > 10 || check->parseCondition(*tok->next(), checks)) { ExecutionPath::bailOut(checks); ExecutionPath::bailOut(newchecks); return; } // goto ")" tok = tok->link(); // goto "{" tok = tok->next(); if (!tok || tok->str() != "{") { ExecutionPath::bailOut(checks); ExecutionPath::bailOut(newchecks); return; } // Recursively check into the if .. parseIfSwitchBody(tok->next(), checks, newchecks, countif); // goto "}" tok = tok->link(); // there is no else => break out if (!tok->next() || tok->next()->str() != "else") break; // parse next "if".. tok = tok->tokAt(2); if (!tok) { ExecutionPath::bailOut(newchecks); return; } if (tok->str() == "if") continue; // there is no "if".. ExecutionPath::checkScope(tok->next(), checks); tok = tok->link(); if (!tok) { ExecutionPath::bailOut(newchecks); return; } } // Add newchecks to checks.. std::copy(newchecks.begin(), newchecks.end(), std::back_inserter(checks)); // Increase numberOfIf std::list<ExecutionPath *>::iterator it; for (it = checks.begin(); it != checks.end(); ++it) { if (countif.find((*it)->varId) != countif.end()) (*it)->numberOfIf++; } // Delete checks that have numberOfIf >= 2 for (it = checks.begin(); it != checks.end();) { if ((*it)->varId > 0 && (*it)->numberOfIf >= 2) { delete *it; checks.erase(it++); } else { ++it; } } } tok = check->parse(*tok, checks); if (!tok || checks.empty()) return; // return/throw ends all execution paths if (tok->str() == "return" || tok->str() == "throw" || tok->str() == "continue" || tok->str() == "break") { ExecutionPath::bailOut(checks); } } }
// shunting yard algorithm double evaluate(std::list<std::string> &tokens) { std::stack<double> operands; std::stack<std::string> operators; while (!tokens.empty()) { if (!isANumber(tokens.front())) { // token is an operator std::string poppedOperator = tokens.front(); tokens.pop_front(); if (operators.empty()) { // empty operator stack, just push it on operators.push(poppedOperator); } else if (poppedOperator == "(") { // left paren, just push it on operators.push(poppedOperator); } else if (poppedOperator == ")") { // right paren, pop off and apply all operators until we find the left paren while (operators.top() != "(") { double operandTwo = operands.top(); operands.pop(); double operandOne = operands.top(); operands.pop(); std::string operatorToApply = operators.top(); operators.pop(); double newOperand = applyOperatorToOperands(operandOne, operandTwo, operatorToApply); operands.push(newOperand); } // found the left paren, pop it off if (!operators.empty()) { operators.pop(); } // check for unary operator - if (!operators.empty()) { if (operators.top() == "-" && operands.size() == 1) { // apply unary operator - double operand = operands.top(); operands.pop(); operators.pop(); operands.push(operand * -1); } } } else if (tokens.front() == "(") { // corner case, singly parened number operators.push(poppedOperator); } else { // check for operator precedence while (topOfStackHasHigherOrEqualPrecedenceThanOperator(poppedOperator, operators)) { double operandTwo = operands.top(); operands.pop(); double operandOne = operands.top(); operands.pop(); std::string operatorToApply = operators.top(); operators.pop(); double newOperand = applyOperatorToOperands(operandOne, operandTwo, operatorToApply); operands.push(newOperand); } // precedence cleared, push operator operators.push(poppedOperator); } } else { // token is a number, push to operand stack operands.push(toDouble(tokens.front())); tokens.pop_front(); } } // end of token list, apply all remaining operators while (!operators.empty()) { double operandTwo = operands.top(); operands.pop(); double operandOne = operands.top(); operands.pop(); std::cout << operandOne << " " << operandTwo << " " << operators.top() << std::endl; std::string operatorToApply = operators.top(); operators.pop(); double newOperand = applyOperatorToOperands(operandOne, operandTwo, operatorToApply); operands.push(newOperand); } return operands.top(); }
Tree::Expr ConsDefWS::group(const std::list<EquationDefinition>& defs) { if (defs.size() != 1) { throw "too many definitions"; } auto& cons = get<Parser::ConstructorDecl>(*defs.front().parsed()); Tree::TupleExpr::TuplePairs pairs { {Tree::DimensionExpr{DIM_TYPE}, cons.type}, {Tree::DimensionExpr{DIM_CONS}, cons.name} }; std::map<u32string, dimension_index> rewrites; std::vector<dimension_index> dims; for (size_t i = 0; i != cons.args.size(); ++i) { std::ostringstream argos; argos << "arg" << i; pairs.push_back(std::make_pair( Tree::DimensionExpr(utf8_to_utf32(argos.str())), Tree::IdentExpr(cons.args[i]) )); auto dim = m_system.nextHiddenDim(); dims.push_back(dim); rewrites.insert({cons.args[i], dim}); } Tree::Expr guard = fixupGuardArgs(cons.guard, rewrites); Tree::ConditionalBestfitExpr cond; cond.declarations.push_back(std::make_tuple ( defs.front().start(), guard, Tree::Expr(), Tree::TupleExpr{pairs} )); Tree::Expr abstractions = cond; auto dimIter = dims.rbegin(); for (auto argsIter = cons.args.rbegin(); argsIter != cons.args.rend(); ++argsIter, ++dimIter) { auto base = Tree::BaseAbstractionExpr(*argsIter, abstractions); base.dims.push_back(*dimIter); abstractions = std::move(base); } return abstractions; }
void syncFixUp(OperationContext* txn, FixUpInfo& fixUpInfo, OplogReader* oplogreader, ReplicationCoordinator* replCoord) { DBClientConnection* them = oplogreader->conn(); // fetch all first so we needn't handle interruption in a fancy way unsigned long long totalSize = 0; list< pair<DocID, BSONObj> > goodVersions; BSONObj newMinValid; // fetch all the goodVersions of each document from current primary DocID doc; unsigned long long numFetched = 0; try { for (set<DocID>::iterator it = fixUpInfo.toRefetch.begin(); it != fixUpInfo.toRefetch.end(); it++) { doc = *it; verify(!doc._id.eoo()); { // TODO : slow. lots of round trips. numFetched++; BSONObj good = them->findOne(doc.ns, doc._id.wrap(), NULL, QueryOption_SlaveOk).getOwned(); totalSize += good.objsize(); uassert(13410, "replSet too much data to roll back", totalSize < 300 * 1024 * 1024); // note good might be eoo, indicating we should delete it goodVersions.push_back(pair<DocID, BSONObj>(doc,good)); } } newMinValid = oplogreader->getLastOp(rsOplogName); if (newMinValid.isEmpty()) { error() << "rollback error newMinValid empty?"; return; } } catch (DBException& e) { LOG(1) << "rollback re-get objects: " << e.toString(); error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << doc._id << ' ' << numFetched << '/' << fixUpInfo.toRefetch.size(); throw e; } log() << "rollback 3.5"; if (fixUpInfo.rbid != getRBID(oplogreader->conn())) { // our source rolled back itself. so the data we received isn't necessarily consistent. warning() << "rollback rbid on source changed during rollback, cancelling this attempt"; return; } // update them log() << "rollback 4 n:" << goodVersions.size(); bool warn = false; invariant(!fixUpInfo.commonPointOurDiskloc.isNull()); invariant(txn->lockState()->isW()); // we have items we are writing that aren't from a point-in-time. thus best not to come // online until we get to that point in freshness. Timestamp minValid = newMinValid["ts"].timestamp(); log() << "minvalid=" << minValid.toStringLong(); setMinValid(txn, minValid); // any full collection resyncs required? if (!fixUpInfo.collectionsToResyncData.empty() || !fixUpInfo.collectionsToResyncMetadata.empty()) { for (const string& ns : fixUpInfo.collectionsToResyncData) { log() << "rollback 4.1.1 coll resync " << ns; fixUpInfo.collectionsToResyncMetadata.erase(ns); const NamespaceString nss(ns); Database* db = dbHolder().openDb(txn, nss.db().toString()); invariant(db); { WriteUnitOfWork wunit(txn); db->dropCollection(txn, ns); wunit.commit(); } { string errmsg; // This comes as a GlobalWrite lock, so there is no DB to be acquired after // resume, so we can skip the DB stability checks. Also // copyCollectionFromRemote will acquire its own database pointer, under the // appropriate locks, so just releasing and acquiring the lock is safe. invariant(txn->lockState()->isW()); Lock::TempRelease release(txn->lockState()); bool ok = copyCollectionFromRemote(txn, them->getServerAddress(), ns, errmsg); uassert(15909, str::stream() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg, ok); } } for (const string& ns : fixUpInfo.collectionsToResyncMetadata) { log() << "rollback 4.1.2 coll metadata resync " << ns; const NamespaceString nss(ns); auto db = dbHolder().openDb(txn, nss.db().toString()); invariant(db); auto collection = db->getCollection(ns); invariant(collection); auto cce = collection->getCatalogEntry(); const std::list<BSONObj> info = them->getCollectionInfos(nss.db().toString(), BSON("name" << nss.coll())); if (info.empty()) { // Collection dropped by "them" so we should drop it too. log() << ns << " not found on remote host, dropping"; fixUpInfo.toDrop.insert(ns); continue; } invariant(info.size() == 1); CollectionOptions options; auto status = options.parse(info.front()); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to parse options " << info.front() << ": " << status.toString()); } WriteUnitOfWork wuow(txn); if (options.flagsSet || cce->getCollectionOptions(txn).flagsSet) { cce->updateFlags(txn, options.flags); } status = collection->setValidator(txn, options.validator); if (!status.isOK()) { throw RSFatalException(str::stream() << "Failed to set validator: " << status.toString()); } wuow.commit(); } // we did more reading from primary, so check it again for a rollback (which would mess // us up), and make minValid newer. log() << "rollback 4.2"; string err; try { newMinValid = oplogreader->getLastOp(rsOplogName); if (newMinValid.isEmpty()) { err = "can't get minvalid from sync source"; } else { Timestamp minValid = newMinValid["ts"].timestamp(); log() << "minvalid=" << minValid.toStringLong(); setMinValid(txn, minValid); } } catch (DBException& e) { err = "can't get/set minvalid: "; err += e.what(); } if (fixUpInfo.rbid != getRBID(oplogreader->conn())) { // our source rolled back itself. so the data we received isn't necessarily // consistent. however, we've now done writes. thus we have a problem. err += "rbid at primary changed during resync/rollback"; } if (!err.empty()) { severe() << "rolling back : " << err << ". A full resync will be necessary."; // TODO: reset minvalid so that we are permanently in fatal state // TODO: don't be fatal, but rather, get all the data first. throw RSFatalException(); } log() << "rollback 4.3"; } map<string,shared_ptr<Helpers::RemoveSaver> > removeSavers; log() << "rollback 4.6"; // drop collections to drop before doing individual fixups - that might make things faster // below actually if there were subsequent inserts to rollback for (set<string>::iterator it = fixUpInfo.toDrop.begin(); it != fixUpInfo.toDrop.end(); it++) { log() << "rollback drop: " << *it; Database* db = dbHolder().get(txn, nsToDatabaseSubstring(*it)); if (db) { WriteUnitOfWork wunit(txn); shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[*it]; if (!removeSaver) removeSaver.reset(new Helpers::RemoveSaver("rollback", "", *it)); // perform a collection scan and write all documents in the collection to disk boost::scoped_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, *it, db->getCollection(*it))); BSONObj curObj; PlanExecutor::ExecState execState; while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) { removeSaver->goingToDelete(curObj); } if (execState != PlanExecutor::IS_EOF) { if (execState == PlanExecutor::FAILURE && WorkingSetCommon::isValidStatusMemberObject(curObj)) { Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj); severe() << "rolling back createCollection on " << *it << " failed with " << errorStatus << ". A full resync is necessary."; } else { severe() << "rolling back createCollection on " << *it << " failed. A full resync is necessary."; } throw RSFatalException(); } db->dropCollection(txn, *it); wunit.commit(); } } log() << "rollback 4.7"; OldClientContext ctx(txn, rsOplogName); Collection* oplogCollection = ctx.db()->getCollection(rsOplogName); uassert(13423, str::stream() << "replSet error in rollback can't find " << rsOplogName, oplogCollection); unsigned deletes = 0, updates = 0; time_t lastProgressUpdate = time(0); time_t progressUpdateGap = 10; for (list<pair<DocID, BSONObj> >::iterator it = goodVersions.begin(); it != goodVersions.end(); it++) { time_t now = time(0); if (now - lastProgressUpdate > progressUpdateGap) { log() << deletes << " delete and " << updates << " update operations processed out of " << goodVersions.size() << " total operations"; lastProgressUpdate = now; } const DocID& doc = it->first; BSONObj pattern = doc._id.wrap(); // { _id : ... } try { verify(doc.ns && *doc.ns); if (fixUpInfo.collectionsToResyncData.count(doc.ns)) { // we just synced this entire collection continue; } // keep an archive of items rolled back shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[doc.ns]; if (!removeSaver) removeSaver.reset(new Helpers::RemoveSaver("rollback", "", doc.ns)); // todo: lots of overhead in context, this can be faster OldClientContext ctx(txn, doc.ns); // Add the doc to our rollback file BSONObj obj; Collection* collection = ctx.db()->getCollection(doc.ns); // Do not log an error when undoing an insert on a no longer existent collection. // It is likely that the collection was dropped as part of rolling back a // createCollection command and regardless, the document no longer exists. if (collection) { bool found = Helpers::findOne(txn, collection, pattern, obj, false); if (found) { removeSaver->goingToDelete(obj); } else { error() << "rollback cannot find object: " << pattern << " in namespace " << doc.ns; } } if (it->second.isEmpty()) { // wasn't on the primary; delete. // TODO 1.6 : can't delete from a capped collection. need to handle that here. deletes++; if (collection) { if (collection->isCapped()) { // can't delete from a capped collection - so we truncate instead. if // this item must go, so must all successors!!! try { // TODO: IIRC cappedTruncateAfter does not handle completely empty. // this will crazy slow if no _id index. long long start = Listener::getElapsedTimeMillis(); RecordId loc = Helpers::findOne(txn, collection, pattern, false); if (Listener::getElapsedTimeMillis() - start > 200) warning() << "roll back slow no _id index for " << doc.ns << " perhaps?"; // would be faster but requires index: // RecordId loc = Helpers::findById(nsd, pattern); if (!loc.isNull()) { try { collection->temp_cappedTruncateAfter(txn, loc, true); } catch (DBException& e) { if (e.getCode() == 13415) { // hack: need to just make cappedTruncate do this... MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wunit(txn); uassertStatusOK(collection->truncate(txn)); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( txn, "truncate", collection->ns().ns()); } else { throw e; } } } } catch (DBException& e) { error() << "rolling back capped collection rec " << doc.ns << ' ' << e.toString(); } } else {
//============================================================================ void plNetLinkingMgr::ExecNextOp () { plNetLinkingMgr * lm = plNetLinkingMgr::GetInstance(); if (!s_opqueue.size()) return; NlmOp* op = s_opqueue.front(); switch (op->opcode) { case kNlmOpNoOp: break; case kNlmOpWaitOp: return; // don't allow wait op to be unlinked/deleted from list case kNlmOpJoinAgeOp: { ASSERT(!s_ageJoiner); ASSERT(!s_ageLeaver); // Insert a wait operation into the exec queue NlmOpWaitOp * waitOp = new NlmOpWaitOp; QueueOp(waitOp, true); NlmJoinAgeOp * joinAgeOp = (NlmJoinAgeOp *)op; NCAgeJoinerCreate( &s_ageJoiner, joinAgeOp->age, joinAgeOp->muteSfx, NCAgeJoinerCallback, waitOp ); } break; case kNlmOpLeaveAgeOp: { ASSERT(!s_ageJoiner); ASSERT(!s_ageLeaver); // Insert a wait operation into the exec queue NlmOpWaitOp * waitOp = new NlmOpWaitOp; QueueOp(waitOp, true); lm->SetEnabled(false); lm->fLinkedIn = false; NlmLeaveAgeOp * leaveAgeOp = (NlmLeaveAgeOp *)op; NCAgeLeaverCreate( &s_ageLeaver, leaveAgeOp->quitting, leaveAgeOp->muteSfx, NCAgeLeaverCallback, waitOp ); } break; default: break; } s_opqueue.remove(op); delete op; }
int main(int argc, char *argv[]) { if (argc < 4) { return 1; } long processNum = atoi(argv[4]); long threadNum = atoi(argv[3]); long dtime = atol(argv[2]) * 1000; char* testName = argv[1]; long cycles = 0; Task task = get_test(testName, doneTask); int pipefd[2]; // open pipe if (pipe(pipefd) == -1) { perror("pipe"); exit(EXIT_FAILURE); } for (long i = 0; i < processNum; ++i) { if (fork() != 0) { continue; } // close unused read end close(pipefd[0]); pthread_mutexattr_t test4_mutexattr; pthread_mutex_init(&test4_mutex, &test4_mutexattr); pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN * 8); pthread_mutexattr_t mutexattr; pthread_mutex_init(&mutex, &mutexattr); pthread_condattr_t condattr; pthread_cond_init(&cond, &condattr); int rc = 0; pthread_t pid; if ((rc = pthread_mutex_lock(&mutex)) != 0) { std::cout << "pthread_mutex_lock " << rc << " " << strerror(rc) << std::endl; return 3; } time_start(); bool working = true; while ((working = (time_stop() < dtime)) || workingNum) { for (long i = workingNum; i < threadNum; ++i) { if ((rc = pthread_create(&pid, &attr, task, NULL)) != 0) { std::cout << "pthread_create " << rc << " " << strerror(rc) << std::endl; return 2; } ++workingNum; } if (workingNum == threadNum) { if ((rc = pthread_cond_wait(&cond, &mutex)) != 0) { std::cout << "pthread_cond_wait " << rc << " " << strerror(rc) << std::endl; return 3; } } //pthread_join(pids.front(), NULL); //pids.pop_front(); cycles += threadNum - workingNum; if (!working) { threadNum = workingNum; } while (!terminated_pids.empty()) { pthread_detach(terminated_pids.front()); terminated_pids.pop_front(); } } //cycles += workingNum; if ((rc = pthread_mutex_unlock(&mutex)) != 0) { std::cout << "pthread_mutex_unlock " << rc << " " << strerror(rc) << std::endl; return 3; } write(pipefd[1], &cycles, sizeof(cycles)); close(pipefd[1]); return 0; } // close unused write end close(pipefd[1]); long double result = 0; while (read(pipefd[0], &cycles, sizeof(cycles)) > 0) { result += cycles; } close(pipefd[0]); std::cout << ((long double) result * 1000) / dtime << std::endl; return 0; }
virtual void perform() { for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.signal(); while(pause_) { condition_.wait(mutex_); } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) return; while(pending_requests_.size() == 0) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) return; } // now locked again, shift a request req = pending_requests_.front(); pending_requests_.pop_front(); state = cRunning; } // mutex now unlock, allowing others to push more requests // LLVMCompiler* jit = new LLVMCompiler(); { timer::Running timer(ls_->time_spent); jit->compile(ls_, req->vmmethod(), req->is_block()); jit->generate_function(ls_); } if(show_machine_code_) { jit->show_machine_code(); } // Ok, compiled, generated machine code, now update MachineMethod // Ok, now we are manipulating managed memory, so make // sure the GC doesn't run. ls_->shared().gc_dependent(); req->vmmethod()->set_jitted(jit->llvm_function(), jit->code_bytes(), jit->function_pointer()); if(req->is_block()) { BlockEnvironment* be = req->block_env(); if(!be) { llvm::outs() << "Fatal error in JIT. Expected a BlockEnvironment.\n"; } else { be->set_native_function(jit->function_pointer()); } } else { MachineMethod* mm = req->machine_method(); if(!mm) { llvm::outs() << "Fatal error in JIT. Expected a MachineMethod.\n"; } else { mm->update(req->vmmethod(), jit); mm->activate(); } } int which = ls_->add_jitted_method(); if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT finished background compiling " << which << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); } }
//From still, decide what the next mode should be int decideNextMode() { out_twist.linear.x = 0.0; out_twist.angular.z = 0.0; if (followsPath) { ROS_INFO("following path"); //Check if we have arrived if (fabs(curPosOri.linear.x - targetPos.x) <= NODE_DIST_LIMIT && fabs(curPosOri.linear.y - targetPos.y) <= NODE_DIST_LIMIT) { //Close enough ROS_INFO("reached node (%f, %f)", targetPos.x, targetPos.y); if(path.empty()){ ROS_INFO("reached end of path - go to STRAIGHT_FORWARD"); mode = STRAIGHT_FORWARD; prevmode = STILL; followsPath = false; return mode; } //Compute direction to next node geometry_msgs::Point next = path.front(); ROS_INFO("target position: (%f, %f)", next.x, next.y); ROS_INFO("current position: (%f, %f)", curPosOri.linear.x, curPosOri.linear.y); while(fabs(curPosOri.linear.x - next.x) <= NODE_DIST_LIMIT && fabs(curPosOri.linear.y - next.y) <= NODE_DIST_LIMIT){ //possible dirty bugfix, check if next node is already reached if(path.empty()){ ROS_INFO("reached end of path - go to STRAIGHT_FORWARD"); mode = STRAIGHT_FORWARD; prevmode = STILL; followsPath = false; return mode; } ROS_INFO("removing close points from path"); path.pop_front(); next = path.front(); ROS_INFO("target position: (%f, %f)", next.x, next.y); ROS_INFO("current position: (%f, %f)", curPosOri.linear.x, curPosOri.linear.y); } int dir; ROS_INFO("x diff: %f", fabs(curPosOri.linear.x - next.x)); ROS_INFO("y diff: %f", fabs(curPosOri.linear.y - next.y)); if (fabs(curPosOri.linear.x - next.x) > fabs(curPosOri.linear.y - next.y)) { if (curPosOri.linear.x > next.x) { dir = 0; //west } else { dir = 2; //east } } else { if (curPosOri.linear.y > next.y) { dir = 1; //south } else { dir = 3; //north } } int curDir = (int)(floor(angle/(PI/2.0)+0.5)); //Find the closest NSWE orientation if (curDir >= 0) { curDir = curDir % 4; } else { curDir = (curDir + 400000000) % 4; } ROS_INFO("current direction: %d, target direction: %d", curDir, dir); if (dir == curDir) { //No turn required targetPos = path.front(); path.pop_front(); mode = STRAIGHT_FORWARD; // if (path.empty()) { // //We are facing the final node, which should hold the object. // //We shouldn't go there - just observe from where we are standing. // //TODO Decide what to do next. I don't know if we will be given a new object // //to find the path to, or if something else should be done. // mode = STRAIGHT_FORWARD; // } else { // mode = STRAIGHT_FORWARD; // } } else if (dir == (curDir+1)%4) { //Rotate left mode = LEFT_ROTATE; prevmode = STILL; targetAngle = floor((angle/(PI/2.0))+0.5) * PI/2.0 + PI/2.0; } else if ((dir+1)%4 == curDir) { //Rotate right mode = RIGHT_ROTATE; prevmode = STILL; targetAngle = floor((angle/(PI/2.0))+0.5) * PI/2.0 - PI/2.0; } else { //Rotate left, continuing 180 degrees. mode = LEFT_ROTATE; prevmode = STILL; targetAngle = floor((angle/(PI/2.0))+0.5) * PI/2.0 + PI/2.0; } return mode; } } out_twist.linear.x = 0.0; out_twist.angular.z = 0.0; if (delta_enc[0] != 0 || delta_enc[1] != 0) { //Do nothing ROS_INFO("still in still"); return STILL; } //Rohit: check the previous mode and accordingly decide the next mode if (prevmode==RIGHT_WALL_ALIGN){ prevmode=mode; rightBool1 = true; leftBool1 = true; mode = RIGHT_WALL_FOLLOW; } else if (prevmode==LEFT_WALL_ALIGN){ prevmode=mode; leftBool1 = true; rightBool1 = true; mode = LEFT_WALL_FOLLOW; } else if(prevmode==LEFT_ROTATE){ prevmode=mode; mode = RIGHT_WALL_ALIGN; } else if(prevmode==RIGHT_ROTATE){ prevmode=mode; mode = LEFT_WALL_ALIGN; } else if (in_ir.front_left > IR_SHORT_LIMIT || in_ir.back_left > IR_SHORT_LIMIT) { prevmode=mode; mode = LEFT_ROTATE; targetAngle = floor((angle/(PI/2.0))+0.5) * PI/2.0 + PI/2.0; //ROS_INFO("target Angle = %lf", (targetAngle*360)/TWOPI); //ROS_INFO("Left rotate:First if"); } else if (in_ir.front_right > IR_SHORT_LIMIT || in_ir.back_right > IR_SHORT_LIMIT) { prevmode=mode; mode = RIGHT_ROTATE; targetAngle = floor((angle/(PI/2.0))+0.5) * PI/2.0 - PI/2.0; //ROS_INFO("target Angle = %lf", (targetAngle*360)/TWOPI); } else { //TODO go back prevmode=mode; mode = LEFT_ROTATE; targetAngle = floor((angle/(PI/2.0))+0.5) * PI/2.0 + PI/2.0; //ROS_INFO("target Angle = %lf", (targetAngle*360)/TWOPI); //ROS_INFO("Left rotate:Second if"); } return mode; }
void GLRenderer::doBucketing(std::list<GLRenderListItem*> <b) { GLRenderListBucketItem *bucket; GLRenderListItem *cur; if(ltb.empty()) return; cur = ltb.front(); Console_DPrintf("bucketing list of %d elements\n", ltb.size()); while(typeid(*cur) == typeid(GLRenderListMeshItem)) { ltb.pop_front(); bucket = addListItemToBucket(cur, ltb); for(std::list<GLRenderListItem*>::iterator itr = ltb.begin(); itr != ltb.end(); itr++) { if(typeid(GLRenderListMeshItem) == typeid(**itr) && bucket->equals(*itr)) { bucket->add(*itr); itr = ltb.erase(itr); } } cur = ltb.front(); Console_DPrintf("bucketing...\n"); } Console_DPrintf("bucketed list of %d elements\n", ltb.size()); /*int bucketSize=0; std::list<GLRenderListItem*>::iterator itr = ltb.begin(); std::list<GLRenderListItem*>::iterator start = itr; for(itr++; itr != ltb.end(); itr++) { if(typeid(GLRenderListMeshItem) == typeid(**start) && typeid(**start) == typeid(**itr)) { GLRenderListMeshItem *s = dynamic_cast<GLRenderListMeshItem*>(*start); GLRenderListMeshItem *e = dynamic_cast<GLRenderListMeshItem*>(*itr); if(s->getMesh() != e->getMesh()) { if(bucketSize>1) { std::list<GLRenderListItem*> l; l.splice(l.begin(), ltb, start, itr); //Console_DPrintf("Created bucket[%d] of size %d\n", bucketCount, bucketSize); //bucket it! buckets[bucketCount].setMaterial(s->getMaterial()); buckets[bucketCount].add(l); if(s->getSceneObject()->model>0) buckets[bucketCount].setModel(s->getSceneObject()->model); ltb.insert(itr, &buckets[bucketCount]); bucketCount++; } bucketSize=0; start = itr; } else { bucketSize++; } } else { bucketSize=0; start = itr; } }*/ }