/* Test for correct behaviour using unique key operations * when an NDBD node's SegmentedSection pool is exhausted. */ int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){ /* * Signal type Exhausted @ How * ----------------------------------------------------- * Long TCINDXREQ Initial import Consume + send * Long TCINDXREQ Build second TCKEYREQ Consume + send short * w. long base key */ /* We will generate : * 10 SS left : * Long IndexReq with too long Key/AttrInfo * 1 SS left : * Long IndexReq read with short Key + Attrinfo to long * base table Key */ /* We just run on one table */ if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0) return NDBT_OK; const char* indexName= "WIDE_2COL_IX$NDBT_IDX0"; const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32); const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes; const Uint32 maxIndexKeyBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytesIndex; /* We want to use 6 Segmented Sections, each of 60 32-bit words, including * a 2 byte length overhead * (We don't want to use 10 Segmented Sections as in some scenarios TUP * uses Segmented Sections when sending results, and if we use TUP on * the same node, the exhaustion will occur in TUP, which is not what * we're testing) */ const Uint32 mediumPrimaryKeyBytes= (6* 60 * 4) - 2; char smallKey[50]; char srcBuff[srcBuffBytes]; char smallRowBuf[maxRowBytes]; char bigKeyIxBuf[maxRowBytes]; char bigAttrIxBuf[maxRowBytes]; char bigKeyRowBuf[maxRowBytes]; char resultSpace[maxRowBytes]; /* Small key for hinting to same TC */ Uint32 smallKeySize= setLongVarchar(&smallKey[0], "ShortKey", 8); /* Large value source */ memset(srcBuff, 'B', srcBuffBytes); Ndb* pNdb= GETNDB(step); const NdbRecord* baseRecord= ctx->getTab()->getDefaultRecord(); const NdbRecord* ixRecord= pNdb-> getDictionary()->getIndex(indexName, ctx->getTab()->getName())->getDefaultRecord(); /* Setup buffers * Small row buffer with short key and data in base table record format */ setLongVarchar(NdbDictionary::getValuePtr(baseRecord, smallRowBuf, 0), "ShortKey", 8); NdbDictionary::setNull(baseRecord, smallRowBuf, 0, false); setLongVarchar(NdbDictionary::getValuePtr(baseRecord, smallRowBuf, 1), "ShortData", 9); NdbDictionary::setNull(baseRecord, smallRowBuf, 1, false); /* Big index key buffer * Big index key (normal row attribute) in index record format * Index's key is attrid 1 from the base table * This could get confusing ! */ setLongVarchar(NdbDictionary::getValuePtr(ixRecord, bigKeyIxBuf, 1), &srcBuff[0], maxIndexKeyBytes); NdbDictionary::setNull(ixRecord, bigKeyIxBuf, 1, false); /* Big AttrInfo buffer * Small key and large attrinfo in base table record format */ setLongVarchar(NdbDictionary::getValuePtr(baseRecord, bigAttrIxBuf, 0), "ShortIXKey", 10); NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 0, false); setLongVarchar(NdbDictionary::getValuePtr(baseRecord, bigAttrIxBuf, 1), &srcBuff[0], maxIndexKeyBytes); NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 1, false); /* Big key row buffer * Medium sized key and small attrinfo (index key) in * base table record format */ setLongVarchar(NdbDictionary::getValuePtr(baseRecord, bigKeyRowBuf, 0), &srcBuff[0], mediumPrimaryKeyBytes); NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 0, false); setLongVarchar(NdbDictionary::getValuePtr(baseRecord, bigKeyRowBuf, 1), "ShortIXKey", 10); NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 1, false); /* Start a transaction on a specific node */ NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize); /* Insert a row in the base table with a big PK, and * small data (Unique IX key). This is used later to lookup * a big PK and cause overflow when reading TRANSID_AI in TC. */ CHECKNOTNULL(trans->insertTuple(baseRecord, bigKeyRowBuf)); CHECKEQUAL(0, trans->execute(NdbTransaction::Commit)); NdbRestarter restarter; /* Start a transaction on a specific node */ trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize); CHECKNOTNULL(trans); /* Activate error insert 8065 in this transaction, limits any * single append/import to 10 sections. */ CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, baseRecord, ctx->getTab(), smallRowBuf, &restarter, 8065)); /* Ok, let's try an index read with a big index key. * Since it's part of the same transaction, it'll go via * the same TC. */ const NdbOperation* bigRead= trans->readTuple(ixRecord, bigKeyIxBuf, baseRecord, resultSpace); CHECKNOTNULL(bigRead); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code) trans->close(); /* Ok, now a long TCINDXREQ to the same TC - this * has slightly different abort handling since no other * operations exist in this new transaction. */ /* Start a transaction on a specific node */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); CHECKNOTNULL(trans->readTuple(ixRecord, bigKeyIxBuf, baseRecord, resultSpace)); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code); trans->close(); /* Now a TCINDXREQ that overflows, but is not the last in the * batch, what happens to the other TCINDXREQ in the batch? */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); CHECKNOTNULL(trans->readTuple(ixRecord, bigKeyIxBuf, baseRecord, resultSpace)); /* Another read */ CHECKNOTNULL(trans->readTuple(ixRecord, bigKeyIxBuf, baseRecord, resultSpace)); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code); trans->close(); /* Next we read a tuple with a large primary key via the unique * index. The index read itself should be fine, but * pulling in the base table PK will cause abort due to overflow * handling TRANSID_AI */ /* Start a transaction on a specific node */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); /* Activate error insert 8066 in this transaction, limits a * single import/append to 1 section. * Note that the TRANSID_AI is received by TC as a short-signal * train, so no single append is large, but when the first * segment is used and append starts on the second, it will * fail. */ CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, baseRecord, ctx->getTab(), smallRowBuf, &restarter, 8066)); CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit)); CHECKNOTNULL(bigRead= trans->readTuple(ixRecord, bigAttrIxBuf, baseRecord, resultSpace)); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code) trans->close(); // TODO Move short signal testing to testUpgrade #if 0 /* * Short TCINDXREQ KeyInfo accumulate Consume + send long * (TCINDXREQ + KEYINFO) * Short TCINDXREQ AttrInfo accumulate Consume + send short key * + long AI * (TCINDXREQ + ATTRINFO) */ /* Now try with a 'short' TCINDXREQ, generated using the old Api * with a big index key value */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); const NdbDictionary::Index* index; CHECKNOTNULL(index= pNdb->getDictionary()-> getIndex(indexName, ctx->getTab()->getName())); NdbIndexOperation* bigReadOldApi; CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index)); CHECKEQUAL(0, bigReadOldApi->readTuple()); /* We use the attribute id of the index, not the base table here */ CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0, NdbDictionary::getValuePtr (ixRecord, bigKeyIxBuf, 1))); CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1)); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code) trans->close(); /* Now try with a 'short' TCINDXREQ, generated using the old Api * with a big attrinfo value */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); NdbIndexOperation* bigUpdateOldApi; CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index)); CHECKEQUAL(0, bigUpdateOldApi->updateTuple()); /* We use the attribute id of the index, not the base table here */ CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, NdbDictionary::getValuePtr (baseRecord, smallRowBuf, 1))); CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1, NdbDictionary::getValuePtr (baseRecord, bigAttrIxBuf, 1))); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code) trans->close(); /* Change error insert so that next TCINDXREQ will grab * all but one SegmentedSection */ restarter.insertErrorInAllNodes(8066); /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ * can be imported, but the ATTRINFO can't */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index)); CHECKEQUAL(0, bigUpdateOldApi->updateTuple()); /* We use the attribute id of the index, not the base table here */ CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, NdbDictionary::getValuePtr (baseRecord, smallRowBuf, 1))); CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1, NdbDictionary::getValuePtr (baseRecord, bigAttrIxBuf, 1))); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code) trans->close(); /* Change error insert so that there are no SectionSegments */ restarter.insertErrorInAllNodes(8067); /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ * can't be imported */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index)); CHECKEQUAL(0, bigUpdateOldApi->updateTuple()); /* We use the attribute id of the index, not the base table here */ CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, NdbDictionary::getValuePtr (baseRecord, smallRowBuf, 1))); CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1, NdbDictionary::getValuePtr (baseRecord, bigAttrIxBuf, 1))); CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit)); /* ZGET_DATABUF_ERR expected */ CHECKEQUAL(218, trans->getNdbError().code) trans->close(); #endif /* Finished with error insert, cleanup the error insertion */ CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(), &smallKey[0], smallKeySize)); CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, baseRecord, ctx->getTab(), smallRowBuf, &restarter, 8068)); trans->execute(NdbTransaction::Rollback); CHECKEQUAL(0, trans->getNdbError().code); trans->close(); return NDBT_OK; }
static void updateIndex(Ndb &myNdb, unsigned int noOfTuples, unsigned int noOfOperations, bool includePrimary, bool oneTrans, bool longKey) { Uint64 tbefore, tafter, before, after; NdbConnection *myTrans; NdbIndexOperation *myOp; char indexName[] = "PNUMINDEX0000"; char name[] = "Kalle0000000"; tbefore = NdbTick_CurrentMillisecond(); if (oneTrans) myTrans = myNdb.startTransaction(); for (unsigned int i = 0; i<noOfTuples; i++) { if (!oneTrans) myTrans = myNdb.startTransaction(); for(unsigned int j = 1; ((j<=noOfOperations)&&(i<noOfTuples)); (++j<=noOfOperations)?i++:i) { if (myTrans == NULL) error_handler4(__LINE__, myNdb.getNdbError()); myOp = myTrans->getNdbIndexOperation(indexName, "PERSON"); if (myOp == NULL) error_handler4(__LINE__, myTrans->getNdbError()); myOp->updateTuple(); if (includePrimary) { sprintf(name, "Kalle%.7i", i); if (longKey) memcpy(longName, name, strlen(name)); if (myOp->equal("NAME", (longKey)?longName:name) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } } if (myOp->equal("PNUM1", 17) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } if (myOp->equal("PNUM3", 19) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } // Update index itself, should be possible if (myOp->setValue("PNUM1", 77) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } if (myOp->setValue("PNUM2", 88)) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } if (myOp->setValue("PNUM4", 99)) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } if (myOp->setValue("AGE", 100) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } if (myOp->setValue("STRING_AGE", hundred) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } } if (noOfOperations == 1) printf("Trying to update person %s\n", name); else printf("Trying to update %u persons\n", noOfOperations); before = NdbTick_CurrentMillisecond(); if (myTrans->execute( (oneTrans) ? NoCommit : Commit ) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); myNdb.closeTransaction(myTrans); break; } after = NdbTick_CurrentMillisecond(); if (noOfOperations == 1) printf("Updated person %s, %u msec\n", name, (Uint32) after - before); else printf("Updated %u persons, %u msec\n", noOfOperations, (Uint32) after - before); if (!oneTrans) myNdb.closeTransaction(myTrans); } if (oneTrans) { if (myTrans->execute( Commit ) == -1) { error_handler4(__LINE__, myTrans->getNdbError()); } myNdb.closeTransaction(myTrans); } tafter = NdbTick_CurrentMillisecond(); ndbout << "Updated "<< noOfTuples << " tuples in " << ((oneTrans) ? 1 : noOfTuples) << " transaction(s), " << tafter - tbefore << " msec" << endl; }