int deep_vector_copy_ext_test() { vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP(); int retval; int num_test; int total_errors = 0; const int NUM_TESTS = TEST_DEEP_MM_NUM_TESTS; int NB = this_mxp->scratchpad_size * 10; int NT = NB / sizeof(vbx_mm_t); vbx_mm_t *v = vbx_shared_malloc( NB ); srand( 0x1a84c92a ); int i; for( num_test=0; num_test < NUM_TESTS ; num_test++ ) { // initialize the whole working space for( i=0; i<NT; i++ ) { v[i] = i & MSK; } // choose random src/dest/length: // -- randomly pick the dest // -- set a window size of 2*K around the dest // -- randomly pick the src within the window // -- randomly pick the length, subject to end-of-scratchpad // -- this 'window' rule increases probability of overlaps // -- rough distribution: 30% short (pipeline) overlaps, 20% long overlaps, 50% no overlap int K, N1, N2, NN; N1 = rand() % NT; K = 1 + rand() % ((N1 > 0)? min(min(N1, NT-N1), 1024): min(NT, 1024)); N2 = N1 - K + rand() % (2*K); NN = rand() % (NT - max(N1,N2)); vbx_mm_t *dst = v + N1; vbx_mm_t *src = v + N2; printf("test:%d src:0x%08x dst:0x%08x len:%08d", num_test, N1, N2, NN ); // do the copy retval = VBX_T(vbw_vec_copy_ext)( dst, src, NN ); vbx_sync(); printf(" retval:0x%04x\n",retval); // ensure the copy was done properly int errors = verify_copy(v, 0, N1, 0, "head") + verify_copy(v, N1, NN+N1, (N2-N1), "copy") + verify_copy(v, NN+N1, NT, 0, "tail"); total_errors += errors; if( errors ) { //break; } } return total_errors; }
int deep_vector_copy_test() { int retval; int num_test; int total_errors = 0; const int NUM_TESTS = TEST_DEEP_SP_NUM_TESTS; const int NB = vbx_sp_getfree(); int NT = NB / sizeof(vbx_sp_t); vbx_sp_push(); vbx_sp_t *v = vbx_sp_malloc( NB ); srand( 0x1a84c92a ); for( num_test=0; num_test < NUM_TESTS ; num_test++ ) { // initialize entire available scratchpad vbx_set_vl( NT ); vbx( SE(T), VAND, v, MSK, 0 ); // choose random src/dest/length: // -- randomly pick the dest // -- set a window size of 2*K around the dest // -- randomly pick the src within the window // -- randomly pick the length, subject to end-of-scratchpad // -- this 'window' rule increases probability of overlaps // -- rough distribution: 30% short (pipeline) overlaps, 20% long overlaps, 50% no overlap int K, N1, N2, NN; N1 = rand() % NT; K = 1 + rand() % ((N1 > 0)? min(min(N1, NT-N1), 1024): min(NT, 1024)); N2 = N1 - K + rand() % (2*K); NN = rand() % (NT - max(N1,N2)); vbx_sp_t *dst = v + N1; vbx_sp_t *src = v + N2; printf("test:%d src:0x%08x dst:0x%08x len:%08d", num_test, N1, N2, NN ); // do the copy retval = VBX_T(vbw_vec_copy)( dst, src, NN ); vbx_sync(); printf(" retval:0x%04x\n",retval); // ensure the copy was done properly int errors = verify_copy((vbx_mm_t *)v, 0, N1, 0, "head") + verify_copy((vbx_mm_t *)v, N1, NN+N1, (N2-N1), "copy") + verify_copy((vbx_mm_t *)v, NN+N1, NT, 0, "tail"); total_errors += errors; if( errors ) { //break; } } vbx_sp_pop(); return total_errors; }
int copy_file_verify(char *file_a, char *file_b ) { char cp_cmd[MAXPATH*2 + 16]; int ival; sprintf(cp_cmd,"cp \"%s\" \"%s\"", file_a, file_b ); system( cp_cmd ); ival = verify_copy( file_a, file_b ); return( ival ); }
int main(int argc, char** argv) { NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:F:L"; #endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); DBUG_ENTER("main"); Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1)) { DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); } Ndb ndb(&con,_dbname); ndb.init(); while (ndb.waitUntilReady() != 0); NdbDictionary::Dictionary * dict = ndb.getDictionary(); int no_error= 1; int i; // create all tables Vector<const NdbDictionary::Table*> pTabs; if (argc == 0) { NDBT_Tables::dropAllTables(&ndb); NDBT_Tables::createAllTables(&ndb); for (i= 0; no_error && i < NDBT_Tables::getNumTables(); i++) { const NdbDictionary::Table *pTab= dict->getTable(NDBT_Tables::getTable(i)->getName()); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } else { for (i= 0; no_error && argc; argc--, i++) { dict->dropTable(argv[i]); NDBT_Tables::createTable(&ndb, argv[i]); const NdbDictionary::Table *pTab= dict->getTable(argv[i]); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } pTabs.push_back(NULL); // create an event for each table for (i= 0; no_error && pTabs[i]; i++) { HugoTransactions ht(*pTabs[i]); if (ht.createEvent(&ndb)){ no_error= 0; break; } } // create an event operation for each event Vector<NdbEventOperation *> pOps; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_EVENT", pTabs[i]->getName()); NdbEventOperation *pOp= ndb.createEventOperation(buf, 1000); if ( pOp == NULL ) { no_error= 0; break; } pOps.push_back(pOp); } // get storage for each event operation for (i= 0; no_error && pTabs[i]; i++) { int n_columns= pTabs[i]->getNoOfColumns(); for (int j = 0; j < n_columns; j++) { pOps[i]->getValue(pTabs[i]->getColumn(j)->getName()); pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName()); } } // start receiving events for (i= 0; no_error && pTabs[i]; i++) { if ( pOps[i]->execute() ) { no_error= 0; break; } } // create a "shadow" table for each table Vector<const NdbDictionary::Table*> pShadowTabs; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_SHADOW", pTabs[i]->getName()); dict->dropTable(buf); if (dict->getTable(buf)) { no_error= 0; break; } NdbDictionary::Table table_shadow(*pTabs[i]); table_shadow.setName(buf); dict->createTable(table_shadow); pShadowTabs.push_back(dict->getTable(buf)); if (!pShadowTabs[i]) { no_error= 0; break; } } // create a hugo operation per table Vector<HugoOperations *> hugo_ops; for (i= 0; no_error && pTabs[i]; i++) { hugo_ops.push_back(new HugoOperations(*pTabs[i])); } int n_records= 3; // insert n_records records per table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (i= 0; no_error && pTabs[i]; i++) { hugo_ops[i]->pkInsertRecord(&ndb, 0, n_records); } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); // update n_records-1 records in first table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } hugo_ops[0]->pkUpdateRecord(&ndb, n_records-1); if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); { NdbRestarts restarts; for (int j= 0; j < 10; j++) { // restart a node if (no_error) { int timeout = 240; if (restarts.executeRestart("RestartRandomNodeAbort", timeout)) { no_error= 0; break; } } // update all n_records records on all tables if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (int r= 0; r < n_records; r++) { for (i= 0; pTabs[i]; i++) { hugo_ops[i]->pkUpdateRecord(&ndb, r); } } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } // copy events and verify if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } } // drop the event operations for (i= 0; i < (int)pOps.size(); i++) { if (ndb.dropEventOperation(pOps[i])) { no_error= 0; } } if (no_error) DBUG_RETURN(NDBT_ProgramExit(NDBT_OK)); DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); }