unsigned Ndb_cluster_connection::max_nodegroup() { TransporterFacade *tp = m_impl.m_transporter_facade; if (tp == 0 || tp->ownId() == 0) return 0; Bitmask<MAX_NDB_NODES> ng; tp->lock_mutex(); for(unsigned i= 0; i < no_db_nodes(); i++) { //************************************************ // If any node is answering, ndb is answering //************************************************ trp_node n = tp->theClusterMgr->getNodeInfo(m_impl.m_all_nodes[i].id); if (n.is_confirmed() && n.m_state.nodeGroup <= MAX_NDB_NODES) ng.set(n.m_state.nodeGroup); } tp->unlock_mutex(); if (ng.isclear()) return 0; Uint32 n = ng.find_first(); Uint32 m; do { m = n; } while ((n = ng.find(n+1)) != ng.NotFound); return m; }
Vector<Vector<int> > NdbRestarter::splitNodes() { Vector<int> part0; Vector<int> part1; Bitmask<255> ngmask; for (int i = 0; i < getNumDbNodes(); i++) { int nodeId = getDbNodeId(i); int ng = getNodeGroup(nodeId); if (ngmask.get(ng)) { part1.push_back(nodeId); } else { ngmask.set(ng); part0.push_back(nodeId); } } Vector<Vector<int> > result; if ((rand() % 100) > 50) { result.push_back(part0); result.push_back(part1); } else { result.push_back(part1); result.push_back(part0); } return result; }
S_SheetAnimation::S_SheetAnimation(SystemManager* l_systemMgr) : S_Base(System::SheetAnimation,l_systemMgr) { Bitmask req; req.TurnOnBit((unsigned int)Component::SpriteSheet); req.TurnOnBit((unsigned int)Component::State); m_requiredComponents.push_back(req); m_systemManager->GetMessageHandler()->Subscribe(EntityMessage::State_Changed,this); }
void fill(uint16_t x, uint16_t y, RgbColor foreground/*, RgbColor background*/, const Bitmask& bitmask) { uint16_t bitCounter = 0; for (auto h = y; h < min(y + bitmask.height(), height()); ++h) for (auto w = x; w < min(x + bitmask.width(), width()); ++w) { if (bitmask.test(bitCounter)) setPixel(w, h, foreground); // else // setPixel(h, w, background); ++bitCounter; } }
Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask, Uint32 m_no_of_attributesibutes, Uint32* inBuffer) { Uint32 bufIndx = 0; for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) { jam(); if (attributeMask.get(i)) { jam(); AttributeHeader::init(&inBuffer[bufIndx++], i, 0); } } return bufIndx; }
int runBug18612SR(NDBT_Context* ctx, NDBT_Step* step){ // Assume two replicas NdbRestarter restarter; if (restarter.getNumDbNodes() < 2) { ctx->stopTest(); return NDBT_OK; } Uint32 cnt = restarter.getNumDbNodes(); for(int loop = 0; loop < ctx->getNumLoops(); loop++) { int partition0[256]; int partition1[256]; bzero(partition0, sizeof(partition0)); bzero(partition1, sizeof(partition1)); Bitmask<4> nodesmask; Uint32 node1 = restarter.getDbNodeId(rand()%cnt); for (Uint32 i = 0; i<cnt/2; i++) { do { int tmp = restarter.getRandomNodeOtherNodeGroup(node1, rand()); if (tmp == -1) break; node1 = tmp; } while(nodesmask.get(node1)); partition0[i] = node1; partition1[i] = restarter.getRandomNodeSameNodeGroup(node1, rand()); ndbout_c("nodes %d %d", node1, partition1[i]); assert(!nodesmask.get(node1)); assert(!nodesmask.get(partition1[i])); nodesmask.set(node1); nodesmask.set(partition1[i]); } ndbout_c("done"); if (restarter.restartAll(false, true, false)) return NDBT_FAILED; int dump[255]; dump[0] = 9000; memcpy(dump + 1, partition0, sizeof(int)*cnt/2); for (Uint32 i = 0; i<cnt/2; i++) if (restarter.dumpStateOneNode(partition1[i], dump, 1+cnt/2)) return NDBT_FAILED; dump[0] = 9000; memcpy(dump + 1, partition1, sizeof(int)*cnt/2); for (Uint32 i = 0; i<cnt/2; i++) if (restarter.dumpStateOneNode(partition0[i], dump, 1+cnt/2)) return NDBT_FAILED; int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; if (restarter.dumpStateAllNodes(val2, 2)) return NDBT_FAILED; if (restarter.insertErrorInAllNodes(932)) return NDBT_FAILED; if (restarter.startAll()) return NDBT_FAILED; if (restarter.waitClusterStartPhase(2)) return NDBT_FAILED; dump[0] = 9001; for (Uint32 i = 0; i<cnt/2; i++) if (restarter.dumpStateAllNodes(dump, 2)) return NDBT_FAILED; if (restarter.waitClusterNoStart(30)) if (restarter.waitNodesNoStart(partition0, cnt/2, 10)) if (restarter.waitNodesNoStart(partition1, cnt/2, 10)) return NDBT_FAILED; if (restarter.startAll()) return NDBT_FAILED; if (restarter.waitClusterStarted()) return NDBT_FAILED; } return NDBT_OK; }
bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, Operationrec* const regOperPtr, KeyReqStruct *req_struct, Fragrecord* const regFragPtr, Uint32* const keyBuffer, Uint32& noPrimKey, Uint32* const afterBuffer, Uint32& noAfterWords, Uint32* const beforeBuffer, Uint32& noBeforeWords, bool disk) { noAfterWords = 0; noBeforeWords = 0; Uint32 readBuffer[MAX_ATTRIBUTES_IN_TABLE]; //--------------------------------------------------------------------------- // Set-up variables needed by readAttributes operPtr.p, tabptr.p //--------------------------------------------------------------------------- operPtr.p = regOperPtr; tabptr.i = regFragPtr->fragTableId; ptrCheckGuard(tabptr, cnoOfTablerec, tablerec); Tablerec* const regTabPtr = tabptr.p; Uint32 num_attr= regTabPtr->m_no_of_attributes; Uint32 descr_start= regTabPtr->tabDescriptor; ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); req_struct->check_offset[MM]= regTabPtr->get_check_offset(MM); req_struct->check_offset[DD]= regTabPtr->get_check_offset(DD); req_struct->attr_descr= &tableDescriptor[descr_start]; //-------------------------------------------------------------------- // Read Primary Key Values //-------------------------------------------------------------------- Tuple_header *save0= req_struct->m_tuple_ptr; if (regOperPtr->op_struct.op_type == ZDELETE && !regOperPtr->is_first_operation()) { jam(); req_struct->m_tuple_ptr= (Tuple_header*) c_undo_buffer.get_ptr(&req_struct->prevOpPtr.p->m_copy_tuple_location); } if (regTabPtr->need_expand(disk)) prepare_read(req_struct, regTabPtr, disk); int ret = readAttributes(req_struct, &tableDescriptor[regTabPtr->readKeyArray].tabDescr, regTabPtr->noOfKeyAttr, keyBuffer, ZATTR_BUFFER_SIZE, false); ndbrequire(ret != -1); noPrimKey= ret; req_struct->m_tuple_ptr = save0; Uint32 numAttrsToRead; if ((regOperPtr->op_struct.op_type == ZUPDATE) && (trigPtr->sendOnlyChangedAttributes)) { jam(); //-------------------------------------------------------------------- // Update that sends only changed information //-------------------------------------------------------------------- Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask; attributeMask = trigPtr->attributeMask; attributeMask.bitAND(req_struct->changeMask); numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes, &readBuffer[0]); } else if ((regOperPtr->op_struct.op_type == ZDELETE) && (!trigPtr->sendBeforeValues)) { jam(); //-------------------------------------------------------------------- // Delete without sending before values only read Primary Key //-------------------------------------------------------------------- return true; } else { jam(); //-------------------------------------------------------------------- // All others send all attributes that are monitored, except: // Omit unchanged blob inlines on update i.e. // attributeMask & ~ (blobAttributeMask & ~ changeMask) //-------------------------------------------------------------------- Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask; attributeMask = trigPtr->attributeMask; if (regOperPtr->op_struct.op_type == ZUPDATE) { Bitmask<MAXNROFATTRIBUTESINWORDS> tmpMask = regTabPtr->blobAttributeMask; tmpMask.bitANDC(req_struct->changeMask); attributeMask.bitANDC(tmpMask); } numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes, &readBuffer[0]); } ndbrequire(numAttrsToRead < MAX_ATTRIBUTES_IN_TABLE); //-------------------------------------------------------------------- // Read Main tuple values //-------------------------------------------------------------------- if (regOperPtr->op_struct.op_type != ZDELETE) { jam(); int ret = readAttributes(req_struct, &readBuffer[0], numAttrsToRead, afterBuffer, ZATTR_BUFFER_SIZE, false); ndbrequire(ret != -1); noAfterWords= ret; } else { jam(); noAfterWords = 0; } //-------------------------------------------------------------------- // Read Copy tuple values for UPDATE's //-------------------------------------------------------------------- // Initialise pagep and tuple offset for read of copy tuple //-------------------------------------------------------------------- if ((regOperPtr->op_struct.op_type == ZUPDATE || regOperPtr->op_struct.op_type == ZDELETE) && (trigPtr->sendBeforeValues)) { jam(); Tuple_header *save= req_struct->m_tuple_ptr; PagePtr tmp; if(regOperPtr->is_first_operation()) { Uint32 *ptr= get_ptr(&tmp, ®OperPtr->m_tuple_location, regTabPtr); req_struct->m_tuple_ptr= (Tuple_header*)ptr; } else { Uint32 *ptr= c_undo_buffer.get_ptr(&req_struct->prevOpPtr.p->m_copy_tuple_location); req_struct->m_tuple_ptr= (Tuple_header*)ptr; } if (regTabPtr->need_expand(disk)) prepare_read(req_struct, regTabPtr, disk); int ret = readAttributes(req_struct, &readBuffer[0], numAttrsToRead, beforeBuffer, ZATTR_BUFFER_SIZE, false); req_struct->m_tuple_ptr= save; ndbrequire(ret != -1); noBeforeWords = ret; if (trigPtr->m_receiverBlock != SUMA && (noAfterWords == noBeforeWords) && (memcmp(afterBuffer, beforeBuffer, noAfterWords << 2) == 0)) { //-------------------------------------------------------------------- // Although a trigger was fired it was not necessary since the old // value and the new value was exactly the same //-------------------------------------------------------------------- jam(); //XXX does this work with collations? return false; } } return true; }
void BackupRestore::logEntry(const LogEntry & tup) { if (!m_restore) return; NdbTransaction * trans = m_ndb->startTransaction(); if (trans == NULL) { // Deep shit, TODO: handle the error err << "Cannot start transaction" << endl; exitHandler(); } // if const NdbDictionary::Table * table = get_table(tup.m_table->m_dictTable); NdbOperation * op = trans->getNdbOperation(table); if (op == NULL) { err << "Cannot get operation: " << trans->getNdbError() << endl; exitHandler(); } // if int check = 0; switch(tup.m_type) { case LogEntry::LE_INSERT: check = op->insertTuple(); break; case LogEntry::LE_UPDATE: check = op->updateTuple(); break; case LogEntry::LE_DELETE: check = op->deleteTuple(); break; default: err << "Log entry has wrong operation type." << " Exiting..."; exitHandler(); } if (check != 0) { err << "Error defining op: " << trans->getNdbError() << endl; exitHandler(); } // if Bitmask<4096> keys; for (Uint32 i= 0; i < tup.size(); i++) { const AttributeS * attr = tup[i]; int size = attr->Desc->size; int arraySize = attr->Desc->arraySize; const char * dataPtr = attr->Data.string_value; if (tup.m_table->have_auto_inc(attr->Desc->attrId)) tup.m_table->update_max_auto_val(dataPtr,size*arraySize); const Uint32 length = (size / 8) * arraySize; if (attr->Desc->m_column->getPrimaryKey()) { if(!keys.get(attr->Desc->attrId)) { keys.set(attr->Desc->attrId); check= op->equal(attr->Desc->attrId, dataPtr, length); } } else check= op->setValue(attr->Desc->attrId, dataPtr, length); if (check != 0) { err << "Error defining op: " << trans->getNdbError() << endl; exitHandler(); } // if } const int ret = trans->execute(NdbTransaction::Commit); if (ret != 0) { // Both insert update and delete can fail during log running // and it's ok // TODO: check that the error is either tuple exists or tuple does not exist? bool ok= false; NdbError errobj= trans->getNdbError(); switch(tup.m_type) { case LogEntry::LE_INSERT: if(errobj.status == NdbError::PermanentError && errobj.classification == NdbError::ConstraintViolation) ok= true; break; case LogEntry::LE_UPDATE: case LogEntry::LE_DELETE: if(errobj.status == NdbError::PermanentError && errobj.classification == NdbError::NoDataFound) ok= true; break; } if (!ok) { err << "execute failed: " << errobj << endl; exitHandler(); } } m_ndb->closeTransaction(trans); m_logCount++; }