예제 #1
0
파일: testLcp.cpp 프로젝트: 4T-Shirt/mysql
static int pause_lcp(int error)
{
  int nodes = g_restarter.getNumDbNodes();

  int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_INFO, 0 };
  int fd = ndb_mgm_listen_event(g_restarter.handle, filter);
  require(fd >= 0);
  require(!g_restarter.insertErrorInAllNodes(error));
  int dump[] = { DumpStateOrd::DihStartLcpImmediately };
  require(!g_restarter.dumpStateAllNodes(dump, 1));
  
  char *tmp;
  char buf[1024];
  SocketInputStream in(fd, 1000);
  int count = 0;
  do {
    tmp = in.gets(buf, 1024);
    if(tmp)
    {
      int id;
      if(sscanf(tmp, "%*[^:]: LCP: %d ", &id) == 1 && id == error &&
	 --nodes == 0){
	close(fd);
	return 0;
      }
    }
  } while(count++ < 30);
  
  close(fd);
  return -1;
}
예제 #2
0
int runBug15685(NDBT_Context* ctx, NDBT_Step* step){

  Ndb* pNdb = GETNDB(step);
  HugoOperations hugoOps(*ctx->getTab());
  NdbRestarter restarter;

  HugoTransactions hugoTrans(*ctx->getTab());
  if (hugoTrans.loadTable(GETNDB(step), 10) != 0){
    return NDBT_FAILED;
  }

  if(hugoOps.startTransaction(pNdb) != 0)
    goto err;
  
  if(hugoOps.pkUpdateRecord(pNdb, 0, 1, rand()) != 0)
    goto err;

  if(hugoOps.execute_NoCommit(pNdb) != 0)
    goto err;

  if (restarter.insertErrorInAllNodes(5100))
    return NDBT_FAILED;
  
  hugoOps.execute_Rollback(pNdb);

  if (restarter.waitClusterStarted() != 0)
    goto err;

  if (restarter.insertErrorInAllNodes(0))
    return NDBT_FAILED;
  
  ctx->stopTest();
  return NDBT_OK;
  
err:
  ctx->stopTest();
  return NDBT_FAILED;
}
예제 #3
0
int runBug20185(NDBT_Context* ctx, NDBT_Step* step){
  int result = NDBT_OK;
  int loops = ctx->getNumLoops();
  int records = ctx->getNumRecords();
  NdbRestarter restarter;
  HugoOperations hugoOps(*ctx->getTab());
  Ndb* pNdb = GETNDB(step);
  
  int dump[] = { 7090, 20 } ;
  if (restarter.dumpStateAllNodes(dump, 2))
    return NDBT_FAILED;
  
  NdbSleep_MilliSleep(3000);

  if(hugoOps.startTransaction(pNdb) != 0)
    return NDBT_FAILED;
  
  if(hugoOps.pkUpdateRecord(pNdb, 1, 1) != 0)
    return NDBT_FAILED;
  
  if (hugoOps.execute_NoCommit(pNdb) != 0)
    return NDBT_FAILED;
  
  int nodeId;
  const int node = hugoOps.getTransaction()->getConnectedNodeId();
  do {
    nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
  } while (nodeId == node);
  
  if (restarter.insertErrorInAllNodes(7030))
    return NDBT_FAILED;
  
  if (restarter.insertErrorInNode(nodeId, 7031))
    return NDBT_FAILED;
  
  NdbSleep_MilliSleep(500);
  
  if (hugoOps.execute_Commit(pNdb) == 0)
    return NDBT_FAILED;

  NdbSleep_MilliSleep(3000);

  restarter.waitClusterStarted();
  
  if (restarter.dumpStateAllNodes(dump, 1))
    return NDBT_FAILED;
  
  return NDBT_OK;
}
예제 #4
0
int runDirtyRead(NDBT_Context* ctx, NDBT_Step* step){
  int result = NDBT_OK;
  int loops = ctx->getNumLoops();
  int records = ctx->getNumRecords();
  NdbRestarter restarter;
  HugoOperations hugoOps(*ctx->getTab());
  Ndb* pNdb = GETNDB(step);
    
  int i = 0;
  while(i<loops && result != NDBT_FAILED && !ctx->isTestStopped()){
    g_info << i << ": ";

    int id = i % restarter.getNumDbNodes();
    int nodeId = restarter.getDbNodeId(id);
    ndbout << "Restart node " << nodeId << endl; 
    restarter.insertErrorInNode(nodeId, 5041);
    restarter.insertErrorInAllNodes(8048 + (i & 1));
    
    for(int j = 0; j<records; j++){
      if(hugoOps.startTransaction(pNdb) != 0)
	return NDBT_FAILED;
      
      if(hugoOps.pkReadRecord(pNdb, j, 1, NdbOperation::LM_CommittedRead) != 0)
	goto err;
      
      int res;
      if((res = hugoOps.execute_Commit(pNdb)) == 4119)
	goto done;
      
      if(res != 0)
	goto err;
      
      if(hugoOps.closeTransaction(pNdb) != 0)
	return NDBT_FAILED;
    }
done:
    if(hugoOps.closeTransaction(pNdb) != 0)
      return NDBT_FAILED;
    
    i++;
    restarter.waitClusterStarted(60) ;
  }
  return result;
err:
  hugoOps.closeTransaction(pNdb);
  return NDBT_FAILED;
}
예제 #5
0
int runBackupUndoWaitStarted(NDBT_Context* ctx, NDBT_Step* step){
  NdbBackup backup(GETNDB(step)->getNodeId()+1);
  unsigned backupId = 0;
  int undoError = 10041;
  NdbRestarter restarter;

  if(restarter.waitClusterStarted(60)){
    g_err << "waitClusterStarted failed"<< endl;
    return NDBT_FAILED;
  }

  if (restarter.insertErrorInAllNodes(undoError) != 0) {
    g_err << "Error insert failed" << endl;
    return NDBT_FAILED;
  } 
  // start backup wait started
  if (backup.start(backupId, 1, 0, 1) == -1){
    return NDBT_FAILED;
  }
  ndbout << "Started backup " << backupId << endl;
  ctx->setProperty("BackupId", backupId);

  return NDBT_OK;
}
예제 #6
0
int 
runBug18612SR(NDBT_Context* ctx, NDBT_Step* step){

  // Assume two replicas
  NdbRestarter restarter;
  if (restarter.getNumDbNodes() < 2)
  {
    ctx->stopTest();
    return NDBT_OK;
  }

  Uint32 cnt = restarter.getNumDbNodes();

  for(int loop = 0; loop < ctx->getNumLoops(); loop++)
  {
    int partition0[256];
    int partition1[256];
    bzero(partition0, sizeof(partition0));
    bzero(partition1, sizeof(partition1));
    Bitmask<4> nodesmask;
    
    Uint32 node1 = restarter.getDbNodeId(rand()%cnt);
    for (Uint32 i = 0; i<cnt/2; i++)
    {
      do { 
	int tmp = restarter.getRandomNodeOtherNodeGroup(node1, rand());
	if (tmp == -1)
	  break;
	node1 = tmp;
      } while(nodesmask.get(node1));
      
      partition0[i] = node1;
      partition1[i] = restarter.getRandomNodeSameNodeGroup(node1, rand());
      
      ndbout_c("nodes %d %d", node1, partition1[i]);
      
      assert(!nodesmask.get(node1));
      assert(!nodesmask.get(partition1[i]));
      nodesmask.set(node1);
      nodesmask.set(partition1[i]);
    } 
    
    ndbout_c("done");

    if (restarter.restartAll(false, true, false))
      return NDBT_FAILED;

    int dump[255];
    dump[0] = 9000;
    memcpy(dump + 1, partition0, sizeof(int)*cnt/2);    
    for (Uint32 i = 0; i<cnt/2; i++)
      if (restarter.dumpStateOneNode(partition1[i], dump, 1+cnt/2))
	return NDBT_FAILED;

    dump[0] = 9000;
    memcpy(dump + 1, partition1, sizeof(int)*cnt/2);    
    for (Uint32 i = 0; i<cnt/2; i++)
      if (restarter.dumpStateOneNode(partition0[i], dump, 1+cnt/2))
	return NDBT_FAILED;

    int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
    
    if (restarter.dumpStateAllNodes(val2, 2))
      return NDBT_FAILED;
    
    if (restarter.insertErrorInAllNodes(932))
      return NDBT_FAILED;
    
    if (restarter.startAll())
      return NDBT_FAILED;
    
    if (restarter.waitClusterStartPhase(2))
      return NDBT_FAILED;
    
    dump[0] = 9001;
    for (Uint32 i = 0; i<cnt/2; i++)
      if (restarter.dumpStateAllNodes(dump, 2))
	return NDBT_FAILED;

    if (restarter.waitClusterNoStart(30))
      if (restarter.waitNodesNoStart(partition0, cnt/2, 10))
	if (restarter.waitNodesNoStart(partition1, cnt/2, 10))
	  return NDBT_FAILED;
    
    if (restarter.startAll())
      return NDBT_FAILED;
    
    if (restarter.waitClusterStarted())
      return NDBT_FAILED;
  }
  return NDBT_OK;
}
예제 #7
0
int 
runBug18414(NDBT_Context* ctx, NDBT_Step* step){

  NdbRestarter restarter;
  if (restarter.getNumDbNodes() < 2)
  {
    ctx->stopTest();
    return NDBT_OK;
  }

  Ndb* pNdb = GETNDB(step);
  HugoOperations hugoOps(*ctx->getTab());
  HugoTransactions hugoTrans(*ctx->getTab());
  int loop = 0;
  do 
  {
    if(hugoOps.startTransaction(pNdb) != 0)
      goto err;
    
    if(hugoOps.pkUpdateRecord(pNdb, 0, 128, rand()) != 0)
      goto err;
    
    if(hugoOps.execute_NoCommit(pNdb) != 0)
      goto err;

    int node1 = hugoOps.getTransaction()->getConnectedNodeId();
    int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand());
    
    if (node1 == -1 || node2 == -1)
      break;
    
    if (loop & 1)
    {
      if (restarter.insertErrorInNode(node1, 8050))
	goto err;
    }
    
    int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
    if (restarter.dumpStateOneNode(node2, val2, 2))
      goto err;
    
    if (restarter.insertErrorInNode(node2, 5003))
      goto err;
    
    int res= hugoOps.execute_Rollback(pNdb);
  
    if (restarter.waitNodesNoStart(&node2, 1) != 0)
      goto err;
    
    if (restarter.insertErrorInAllNodes(0))
      goto err;
    
    if (restarter.startNodes(&node2, 1) != 0)
      goto err;
    
    if (restarter.waitClusterStarted() != 0)
      goto err;
    
    if (hugoTrans.scanUpdateRecords(pNdb, 128) != 0)
      goto err;

    hugoOps.closeTransaction(pNdb);
    
  } while(++loop < 5);
  
  return NDBT_OK;
  
err:
  hugoOps.closeTransaction(pNdb);
  return NDBT_FAILED;    
}
예제 #8
0
/* Test for correct behaviour using primary key operations
 * when an NDBD node's SegmentedSection pool is exhausted.
 */
int testSegmentedSectionPk(NDBT_Context* ctx, NDBT_Step* step){
  /*
   * Signal type       Exhausted @              How
   * -----------------------------------------------------
   * Long TCKEYREQ     Initial import           Consume + send
   * Long TCKEYREQ     Initial import, not first
   *                     TCKEYREQ in batch      Consume + send
   * Long TCKEYREQ     Initial import, not last
   *                     TCKEYREQ in batch      Consume + send
   * No testing of short TCKEYREQ variants as they cannot be
   * generated in mysql-5.1-telco-6.4+
   * TODO : Add short variant testing to testUpgrade.
   */

  /* We just run on one table */
  if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
    return NDBT_OK;

  const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
  const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
  const Uint32 maxAttrBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytes;
  char smallKey[50];
  char srcBuff[srcBuffBytes];
  char smallRowBuf[maxRowBytes];
  char bigKeyRowBuf[maxRowBytes];
  char bigAttrRowBuf[maxRowBytes];

  /* Small key for hinting to same TC */
  Uint32 smallKeySize= setLongVarchar(&smallKey[0],
                                      "ShortKey",
                                      8);

  /* Large value source */
  memset(srcBuff, 'B', srcBuffBytes);

  const NdbRecord* record= ctx->getTab()->getDefaultRecord();

  /* Setup buffers
   * Small row buffer with small key and small data
   */ 
  setLongVarchar(NdbDictionary::getValuePtr(record,
                                            smallRowBuf,
                                            0),
                 "ShortKey",
                 8);
  NdbDictionary::setNull(record, smallRowBuf, 0, false);

  setLongVarchar(NdbDictionary::getValuePtr(record,
                                            smallRowBuf,
                                            1),
                 "ShortData",
                 9);
  NdbDictionary::setNull(record, smallRowBuf, 1, false);

  /* Big key buffer with big key and small data*/
  setLongVarchar(NdbDictionary::getValuePtr(record,
                                            bigKeyRowBuf,
                                            0),
                 &srcBuff[0],
                 srcBuffBytes);
  NdbDictionary::setNull(record, bigKeyRowBuf, 0, false);

  setLongVarchar(NdbDictionary::getValuePtr(record,
                                            bigKeyRowBuf,
                                            1),
                 "ShortData",
                 9);
  NdbDictionary::setNull(record, bigKeyRowBuf, 1, false);

  /* Big AttrInfo buffer with small key and big data */
  setLongVarchar(NdbDictionary::getValuePtr(record,
                                            bigAttrRowBuf,
                                            0),
                 "ShortKey", 
                 8);
  NdbDictionary::setNull(record, bigAttrRowBuf, 0, false);

  setLongVarchar(NdbDictionary::getValuePtr(record,
                                            bigAttrRowBuf,
                                            1),
                 &srcBuff[0],
                 maxAttrBytes);
  NdbDictionary::setNull(record, bigAttrRowBuf, 1, false);

  NdbRestarter restarter;
  Ndb* pNdb= GETNDB(step);

  /* Start a transaction on a specific node */
  NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
                                                &smallKey[0],
                                                smallKeySize);
  CHECKNOTNULL(trans);

  /* Activate error insert 8065 in this transaction, limits
   * any single import/append to 1 section
   */
  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
                                          record, 
                                          ctx->getTab(),
                                          smallRowBuf, 
                                          &restarter, 
                                          8065));

  /* Ok, let's try an insert with a key bigger than 1 section.
   * Since it's part of the same transaction, it'll go via
   * the same TC.
   */
  const NdbOperation* bigInsert = trans->insertTuple(record, bigKeyRowBuf);

  CHECKNOTNULL(bigInsert);

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();
  
  /* Ok, now a long TCKEYREQ to the same TC - this
   * has slightly different abort handling since no other
   * operations exist in this new transaction.
   * We also change it so that import overflow occurs 
   * on the AttrInfo section
   */
  /* Start transaction on the same node */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));


  CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));

  CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code);

  trans->close();

  /* Ok, now a long TCKEYREQ where we run out of SegmentedSections
   * on the first TCKEYREQ, but there are other TCKEYREQs following
   * in the same batch.  Check that abort handling is correct
   */
    /* Start transaction on the same node */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  /* First op in batch, will cause overflow */
  CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
  
  /* Second op in batch, what happens to it? */
  const NdbOperation* secondOp;
  CHECKNOTNULL(secondOp = trans->insertTuple(record, bigAttrRowBuf));


  CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code);

  trans->close();

  /* Now try with a 'short' TCKEYREQ, generated using the old Api 
   * with a big key value
   */
  /* Start transaction on the same node */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  NdbOperation* bigInsertOldApi;
  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));

  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (record,
                                        bigKeyRowBuf,
                                        0)));
  CHECKEQUAL(0, bigInsertOldApi->setValue(1, 
                                          NdbDictionary::getValuePtr
                                          (record,
                                           bigKeyRowBuf,
                                           1)));

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  /* Now try with a 'short' TCKEYREQ, generated using the old Api 
   * with a big data value
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));

  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (record,
                                        bigAttrRowBuf,
                                        0)));
  CHECKEQUAL(0, bigInsertOldApi->setValue(1, 
                                          NdbDictionary::getValuePtr
                                          (record,
                                           bigAttrRowBuf,
                                           1)));

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  // TODO : Add code to testUpgrade
#if 0
  /*
   * Short TCKEYREQ    KeyInfo accumulate       Consume + send long
   *                     (TCKEYREQ + KEYINFO)
   * Short TCKEYREQ    AttrInfo accumulate      Consume + send short key
   *                                             + long AI
   *                      (TCKEYREQ + ATTRINFO)
   */
  /* Change error insert so that next TCKEYREQ will grab
   * all but one SegmentedSection so that we can then test SegmentedSection
   * exhaustion when importing the Key/AttrInfo words from the
   * TCKEYREQ signal itself.
   */
  restarter.insertErrorInAllNodes(8066);


  /* Now a 'short' TCKEYREQ, there will be space to import the
   * short key, but not the AttrInfo
   */
  /* Start transaction on same node */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
  
  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (record,
                                        smallRowBuf,
                                        0)));
  CHECKEQUAL(0, bigInsertOldApi->setValue(1, NdbDictionary::getValuePtr
                                          (record,
                                           smallRowBuf,
                                           1)));

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  /* Change error insert so that there are no SectionSegments 
   * This will cause failure when attempting to import the
   * KeyInfo from the TCKEYREQ
   */
  restarter.insertErrorInAllNodes(8067);

  /* Now a 'short' TCKEYREQ - there will be no space to import the key */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
  
  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (record,
                                        smallRowBuf,
                                        0)));
  CHECKEQUAL(0, bigInsertOldApi->setValue(1, 
                                          NdbDictionary::getValuePtr
                                          (record,
                                           smallRowBuf,
                                           1)));

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();  
#endif

  /* Finished with error insert, cleanup the error insertion
   * Error insert 8068 will free the hoarded segments
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));

  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
                                          record, 
                                          ctx->getTab(),
                                          smallRowBuf, 
                                          &restarter, 
                                          8068));

  trans->execute(NdbTransaction::Rollback);
  
  CHECKEQUAL(0, trans->getNdbError().code);

  trans->close();

  return NDBT_OK;
}
예제 #9
0
int testDropSignalFragments(NDBT_Context* ctx, NDBT_Step* step){
  /* Segmented section exhaustion results in dropped signals
   * Fragmented signals split one logical signal over multiple
   * physical signals (to cope with the MAX_SIGNAL_LENGTH=32kB
   * limitation).
   * This testcase checks that when individual signals comprising
   * a fragmented signal (in this case SCANTABREQ) are dropped, the
   * system behaves correctly.
   * Correct behaviour is to behave in the same way as if the signal
   * was not fragmented, and for SCANTABREQ, to return a temporary
   * resource error.
   */
  NdbRestarter restarter;
  Ndb* pNdb= GETNDB(step);

  /* SEND > ((2 * MAX_SEND_MESSAGE_BYTESIZE) + SOME EXTRA) 
   * This way we get at least 3 fragments
   * However, as this is generally > 64kB, it's too much AttrInfo for
   * a ScanTabReq, so the 'success' case returns error 874
   */
  const Uint32 PROG_WORDS= 16500; 

  struct SubCase
  {
    Uint32 errorInsertCode;
    int expectedRc;
  };
  const Uint32 numSubCases= 5;
  const SubCase cases[numSubCases]= 
  /* Error insert   Scanrc */
    {{          0,     874},  // Normal, success which gives too much AI error
     {       8074,     217},  // Drop first fragment -> error 217
     {       8075,     217},  // Drop middle fragment(s) -> error 217
     {       8076,     217},  // Drop last fragment -> error 217
     {       8077,     217}}; // Drop all fragments -> error 217
  const Uint32 numIterations= 50;
  
  Uint32 buff[ PROG_WORDS + 10 ]; // 10 extra for final 'return' etc.

  for (Uint32 iteration=0; iteration < (numIterations * numSubCases); iteration++)
  {
    /* Start a transaction */
    NdbTransaction* trans= pNdb->startTransaction();
    CHECKNOTNULL(trans);

    SubCase subcase= cases[iteration % numSubCases];

    Uint32 errorInsertVal= subcase.errorInsertCode;
    // printf("Inserting error : %u\n", errorInsertVal);
    /* We insert the error twice, to bias races between
     * error-insert propagation and the succeeding scan
     * in favour of error insert winning!
     * This problem needs a more general fix
     */
    CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
    CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));

    NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
    
    CHECKNOTNULL(scan);
    
    CHECKEQUAL(0, scan->readTuples());
    
    /* Create a large program, to give a large SCANTABREQ */
    NdbInterpretedCode prog(ctx->getTab(), buff, PROG_WORDS + 10);
    
    for (Uint32 w=0; w < PROG_WORDS; w++)
      CHECKEQUAL(0, prog.load_const_null(1));
    
    CHECKEQUAL(0, prog.interpret_exit_ok());
    CHECKEQUAL(0, prog.finalise());
    
    CHECKEQUAL(0, scan->setInterpretedCode(&prog));
    
    /* Api doesn't seem to wait for result of scan request */
    CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
    
    CHECKEQUAL(0, trans->getNdbError().code);

    CHECKEQUAL(-1, scan->nextResult());
    
    int expectedResult= subcase.expectedRc;
    CHECKEQUAL(expectedResult, scan->getNdbError().code);

    scan->close();
    
    trans->close();
  }

  restarter.insertErrorInAllNodes(0);

  return NDBT_OK;
}
예제 #10
0
/* Test for correct behaviour using unique key operations
 * when an NDBD node's SegmentedSection pool is exhausted.
 */
int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){
  /* 
   * Signal type       Exhausted @              How
   * -----------------------------------------------------
   * Long TCINDXREQ    Initial import           Consume + send 
   * Long TCINDXREQ    Build second TCKEYREQ    Consume + send short
   *                                             w. long base key
   */
  /* We will generate : 
   *   10 SS left : 
   *     Long IndexReq with too long Key/AttrInfo
   *    1 SS left :
   *     Long IndexReq read with short Key + Attrinfo to long 
   *       base table Key
   */
  /* We just run on one table */
  if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0)
    return NDBT_OK;

  const char* indexName= "WIDE_2COL_IX$NDBT_IDX0";
  const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
  const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
  const Uint32 maxIndexKeyBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytesIndex;
  /* We want to use 6 Segmented Sections, each of 60 32-bit words, including
   * a 2 byte length overhead
   * (We don't want to use 10 Segmented Sections as in some scenarios TUP 
   *  uses Segmented Sections when sending results, and if we use TUP on
   *  the same node, the exhaustion will occur in TUP, which is not what
   *  we're testing)
   */
  const Uint32 mediumPrimaryKeyBytes= (6* 60 * 4) - 2;
  char smallKey[50];
  char srcBuff[srcBuffBytes];
  char smallRowBuf[maxRowBytes];
  char bigKeyIxBuf[maxRowBytes];
  char bigAttrIxBuf[maxRowBytes];
  char bigKeyRowBuf[maxRowBytes];
  char resultSpace[maxRowBytes];

  /* Small key for hinting to same TC */
  Uint32 smallKeySize= setLongVarchar(&smallKey[0],
                                      "ShortKey",
                                      8);

  /* Large value source */
  memset(srcBuff, 'B', srcBuffBytes);

  Ndb* pNdb= GETNDB(step);

  const NdbRecord* baseRecord= ctx->getTab()->getDefaultRecord();
  const NdbRecord* ixRecord= pNdb->
    getDictionary()->getIndex(indexName,
                              ctx->getTab()->getName())->getDefaultRecord();

  /* Setup buffers
   * Small row buffer with short key and data in base table record format
   */ 
  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
                                            smallRowBuf,
                                            0),
                 "ShortKey",
                 8);
  NdbDictionary::setNull(baseRecord, smallRowBuf, 0, false);

  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
                                            smallRowBuf,
                                            1),
                 "ShortData",
                 9);
  NdbDictionary::setNull(baseRecord, smallRowBuf, 1, false);

  /* Big index key buffer
   * Big index key (normal row attribute) in index record format
   * Index's key is attrid 1 from the base table
   * This could get confusing !
   */
  
  setLongVarchar(NdbDictionary::getValuePtr(ixRecord,
                                            bigKeyIxBuf,
                                            1),
                 &srcBuff[0],
                 maxIndexKeyBytes);
  NdbDictionary::setNull(ixRecord, bigKeyIxBuf, 1, false);

  /* Big AttrInfo buffer
   * Small key and large attrinfo in base table record format */
  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
                                            bigAttrIxBuf,
                                            0),
                 "ShortIXKey", 
                 10);

  NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 0, false);

  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
                                            bigAttrIxBuf,
                                            1),
                 &srcBuff[0],
                 maxIndexKeyBytes);
  NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 1, false);

  /* Big key row buffer 
   * Medium sized key and small attrinfo (index key) in
   * base table record format
   */
  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
                                            bigKeyRowBuf,
                                            0),
                 &srcBuff[0], 
                 mediumPrimaryKeyBytes);

  NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 0, false);

  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
                                            bigKeyRowBuf,
                                            1),
                 "ShortIXKey",
                 10);
  NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 1, false);


  /* Start a transaction on a specific node */
  NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
                                                &smallKey[0],
                                                smallKeySize);
  /* Insert a row in the base table with a big PK, and
   * small data (Unique IX key).  This is used later to lookup
   * a big PK and cause overflow when reading TRANSID_AI in TC.
   */
  CHECKNOTNULL(trans->insertTuple(baseRecord,
                                  bigKeyRowBuf));

  CHECKEQUAL(0, trans->execute(NdbTransaction::Commit));

  NdbRestarter restarter;
  /* Start a transaction on a specific node */
  trans= pNdb->startTransaction(ctx->getTab(),
                                &smallKey[0],
                                smallKeySize);
  CHECKNOTNULL(trans);

  /* Activate error insert 8065 in this transaction, limits any
   * single append/import to 10 sections.
   */
  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
                                          baseRecord, 
                                          ctx->getTab(),
                                          smallRowBuf, 
                                          &restarter, 
                                          8065));

  /* Ok, let's try an index read with a big index key.
   * Since it's part of the same transaction, it'll go via
   * the same TC.
   */
  const NdbOperation* bigRead= trans->readTuple(ixRecord,
                                                bigKeyIxBuf,
                                                baseRecord,
                                                resultSpace);

  CHECKNOTNULL(bigRead);

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  
  /* Ok, now a long TCINDXREQ to the same TC - this
   * has slightly different abort handling since no other
   * operations exist in this new transaction.
   */
  /* Start a transaction on a specific node */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));

  CHECKNOTNULL(trans->readTuple(ixRecord,
                                bigKeyIxBuf,
                                baseRecord,
                                resultSpace));
  
  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code);
  
  trans->close();

  /* Now a TCINDXREQ that overflows, but is not the last in the
   * batch, what happens to the other TCINDXREQ in the batch?
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));

  CHECKNOTNULL(trans->readTuple(ixRecord,
                                bigKeyIxBuf,
                                baseRecord,
                                resultSpace));
  /* Another read */
  CHECKNOTNULL(trans->readTuple(ixRecord,
                                bigKeyIxBuf,
                                baseRecord,
                                resultSpace));
  
  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code);
  
  trans->close();


  /* Next we read a tuple with a large primary key via the unique
   * index.  The index read itself should be fine, but
   * pulling in the base table PK will cause abort due to overflow
   * handling TRANSID_AI
   */
  /* Start a transaction on a specific node */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));

  /* Activate error insert 8066 in this transaction, limits a
   * single import/append to 1 section.
   * Note that the TRANSID_AI is received by TC as a short-signal
   * train, so no single append is large, but when the first
   * segment is used and append starts on the second, it will
   * fail.
   */
  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
                                          baseRecord, 
                                          ctx->getTab(),
                                          smallRowBuf, 
                                          &restarter, 
                                          8066));
  CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
  
  CHECKNOTNULL(bigRead= trans->readTuple(ixRecord,
                                         bigAttrIxBuf,
                                         baseRecord,
                                         resultSpace));

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  // TODO Move short signal testing to testUpgrade
#if 0
  /*
   * Short TCINDXREQ   KeyInfo accumulate       Consume + send long
   *                     (TCINDXREQ + KEYINFO)
   * Short TCINDXREQ   AttrInfo accumulate      Consume + send short key
   *                                             + long AI
   *                     (TCINDXREQ + ATTRINFO)
   */
  /* Now try with a 'short' TCINDXREQ, generated using the old Api 
   * with a big index key value
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  const NdbDictionary::Index* index;
  CHECKNOTNULL(index= pNdb->getDictionary()->
               getIndex(indexName,
                        ctx->getTab()->getName()));

  NdbIndexOperation* bigReadOldApi;
  CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index));

  CHECKEQUAL(0, bigReadOldApi->readTuple());
  /* We use the attribute id of the index, not the base table here */
  CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0, 
                                     NdbDictionary::getValuePtr
                                     (ixRecord,
                                      bigKeyIxBuf,
                                      1)));

  CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1));

  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  /* Now try with a 'short' TCINDXREQ, generated using the old Api 
   * with a big attrinfo value
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  NdbIndexOperation* bigUpdateOldApi;
  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));

  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
  /* We use the attribute id of the index, not the base table here */
  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (baseRecord,
                                        smallRowBuf,
                                        1)));

  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
                                          NdbDictionary::getValuePtr
                                          (baseRecord,
                                           bigAttrIxBuf,
                                           1)));
  
  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  /* Change error insert so that next TCINDXREQ will grab
   * all but one SegmentedSection
   */
  restarter.insertErrorInAllNodes(8066);

  /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
   * can be imported, but the ATTRINFO can't
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));
  
  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));

  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
  /* We use the attribute id of the index, not the base table here */
  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (baseRecord,
                                        smallRowBuf,
                                        1)));

  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
                                          NdbDictionary::getValuePtr
                                          (baseRecord,
                                           bigAttrIxBuf,
                                           1)));
  
  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

  /* Change error insert so that there are no SectionSegments */
  restarter.insertErrorInAllNodes(8067);

  /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
   * can't be imported
   */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));

  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));

  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
  /* We use the attribute id of the index, not the base table here */
  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, 
                                       NdbDictionary::getValuePtr
                                       (baseRecord,
                                        smallRowBuf,
                                        1)));

  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
                                          NdbDictionary::getValuePtr
                                          (baseRecord,
                                           bigAttrIxBuf,
                                           1)));
  
  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));

  /* ZGET_DATABUF_ERR expected */
  CHECKEQUAL(218, trans->getNdbError().code)

  trans->close();

#endif  

  /* Finished with error insert, cleanup the error insertion */
  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                             &smallKey[0],
                                             smallKeySize));

  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
                                          baseRecord, 
                                          ctx->getTab(),
                                          smallRowBuf, 
                                          &restarter, 
                                          8068));

  trans->execute(NdbTransaction::Rollback);
  
  CHECKEQUAL(0, trans->getNdbError().code);

  trans->close();

  return NDBT_OK;
}
예제 #11
0
int testSlowDihFileWrites(NDBT_Context* ctx, NDBT_Step* step)
{
  /* Testcase checks behaviour with slow flushing of DIH table definitions
   * This caused problems in the past by exhausting the DIH page pool
   * Now there's a concurrent operations limit.
   * Check that it behaves with many queued ops, parallel drop/node restarts
   */
  
  /* Run as a 'T1' testcase - do nothing for other tables */
  if (strcmp(ctx->getTab()->getName(), "T1") != 0)
    return NDBT_OK;

  /* 1. Activate slow write error insert
   * 2. Trigger LCP
   * 3. Wait some time, periodically producing info on 
   *    the internal state
   * 4. Perform some parallel action (drop table/node restarts)
   * 5. Wait some time, periodically producing info on 
   *    the internal state
   * 6. Clear the error insert
   * 7. Wait a little longer
   * 8. Done.
   */
  NdbRestarter restarter;

  for (Uint32 scenario = 0;  scenario < NUM_SCENARIOS; scenario++)
  {
    ndbout_c("Inserting error 7235");
    restarter.insertErrorInAllNodes(7235);
    
    ndbout_c("Triggering LCP");
    int dumpArg = 7099;
    restarter.dumpStateAllNodes(&dumpArg, 1);
    
    const Uint32 periodSeconds = 10;
    Uint32 waitPeriods = 6;
    dumpArg = 7032;
    
    for (Uint32 p=0; p<waitPeriods; p++)
    {
      if (p == 3)
      {
        switch ((Scenarios) scenario)
        {
        case DROP_TABLE:
        {
          /* Drop one of the early-created tables */
          ndbout_c("Requesting DROP TABLE");
          ctx->setProperty("DIHWritesRequestType", (Uint32) DROP_TABLE_REQ);
          ctx->setProperty("DIHWritesRequest", (Uint32) 1);
          break;
        }
        case RESTART_MASTER:
        {
          ndbout_c("Requesting Master restart");
          ctx->setProperty("DIHWritesRequestType", (Uint32) MASTER_RESTART_REQ);
          ctx->setProperty("DIHWritesRequest", (Uint32) 1);

          break;
        }
        case RESTART_SLAVE:
        {
          ndbout_c("Requesting Slave restart");
          ctx->setProperty("DIHWritesRequestType", (Uint32) SLAVE_RESTART_REQ);
          ctx->setProperty("DIHWritesRequest", (Uint32) 1);

          break;
        }
        default:
          break;
        }
      }

      ndbout_c("Dumping DIH page info to ndbd stdout");
      restarter.dumpStateAllNodes(&dumpArg, 1);
      NdbSleep_MilliSleep(periodSeconds * 1000);
    }
    
    ndbout_c("Clearing error insert...");
    restarter.insertErrorInAllNodes(0);
    
    waitPeriods = 2;
    for (Uint32 p=0; p<waitPeriods; p++)
    {
      ndbout_c("Dumping DIH page info to ndbd stdout");
      restarter.dumpStateAllNodes(&dumpArg, 1);
      NdbSleep_MilliSleep(periodSeconds * 1000);
    }
    
    ndbout_c("Waiting for worker to finish task...");
    ctx->getPropertyWait("DIHWritesRequest", 2);
    
    if (ctx->isTestStopped())
      return NDBT_OK;

    ndbout_c("Done.");
  }  

  /* Finish up */
  ctx->stopTest();

  return NDBT_OK;
}