bool BlockStreamPerformanceTest::next(BlockStreamBase*){ block_->setEmpty(); if(state_.child1_->next(block_)){ BlockStreamBase::BlockStreamTraverseIterator* it=block_->createIterator(); void* data; // printf("before while!<<<<</n"); // int count=0; while(data=it->nextTuple()){ tuplecount1_++; tuplecount_++; if(*(int*)data==-1){ tuplecount1_++; } // printf("%d\n",count++); } return true; } else if(state_.child2_->next(block_)) { BlockStreamBase::BlockStreamTraverseIterator* it = block_->createIterator(); while(it->nextTuple()) { tuplecount2_++; tuplecount_++; } return true; } cout << "performance test return false!\n"; return false; }
bool InOperator::Open(const PartitionOffset& partition_offset) { state_.child_set_->Open(partition_offset); state_.child_in_->Open(partition_offset); AtomicPushFreeHtBlockStream(BlockStreamBase::createBlock( state_.schema_child_set_, state_.block_size_)); AtomicPushFreeBlockStream(BlockStreamBase::createBlock( state_.schema_child_in_, state_.block_size_)); if (sema_open_.try_wait()) { // initialize hash table, use the child_set to build hash table hash_func_ = PartitionFunctionFactory::createBoostHashFunction(state_.ht_nbuckets_); vector<unsigned> ht_index; ht_index.push_back(state_.index_child_set_); hash_table_ = new BasicHashTable( state_.ht_nbuckets_, state_.ht_bucket_size_, (state_.schema_child_set_->getSubSchema(ht_index))->getTupleMaxSize()); ht_index.clear(); open_finished_ = true; } else { while (!open_finished_) usleep(1); } void* cur_tuple = NULL; void* tuple_in_hashtable = NULL; unsigned bn = 0; BlockStreamBase* bsb = AtomicPopFreeHtBlockStream(); while (state_.child_set_->Next(bsb)) { BlockStreamBase::BlockStreamTraverseIterator* bsti = bsb->createIterator(); bsti->reset(); while (cur_tuple = bsti->nextTuple()) { bn = state_.schema_child_set_->getcolumn(state_.index_child_set_) .operate->GetPartitionValue( state_.schema_child_set_->getColumnAddess( state_.index_child_set_, cur_tuple), state_.ht_nbuckets_); tuple_in_hashtable = hash_table_->atomicAllocate(bn); state_.schema_child_set_->getcolumn(state_.index_child_set_) .operate->Assign(state_.schema_child_set_->getColumnAddess( state_.index_child_set_, cur_tuple), tuple_in_hashtable); } bsb->setEmpty(); } barrier_->Arrive(); printf("-----------In Iterator Open Successful!-----------\n"); return true; }
bool bottomLayerSorting::open(const PartitionOffset& partition_offset) { if (tryEntryIntoSerializedSection()) { computeVectorSchema(); const bool child_open_return = state_.child_->open(partition_offset); setReturnStatus(child_open_return); } barrierArrive(); //Construct the PartitionID for the next function to make up the ChunkID partition_id_.projection_id = state_.projection_id_; partition_id_.partition_off = partition_offset; // Open finished. Buffer all the child create dataset in different group according to their ChunkIDs BlockStreamBase* block_for_asking = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); block_for_asking->setEmpty(); BlockStreamBase::BlockStreamTraverseIterator* iterator = NULL; void* current_chunk = new ChunkOffset; Operate* op_ = state_.schema_->getcolumn(1).operate->duplicateOperator(); while (state_.child_->next(block_for_asking)) { iterator = block_for_asking->createIterator(); void* current_tuple = NULL; while((current_tuple = iterator->nextTuple()) != 0) { state_.schema_->getColumnValue(0, current_tuple, current_chunk); if(tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)==tuples_in_chunk_.end()){ vector<compare_node*> tmp; tuples_in_chunk_[*(ChunkOffset*)current_chunk] = tmp; } compare_node* c_node = (compare_node*)malloc(sizeof(compare_node)); //newmalloc c_node->vector_schema_ = vector_schema_; c_node->tuple_ = malloc(vector_schema_->getTupleMaxSize()); //newmalloc vector_schema_->copyTuple((char*)current_tuple+state_.schema_->getcolumn(0).get_length(),c_node->tuple_); // c_node->tuple_ = current_tuple+state_.schema_->getcolumn(0).get_length(); // c_node->op_ = state_.schema_->getcolumn(1).operate->duplicateOperator(); c_node->op_ = op_; tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)->second.push_back(c_node); //for testing begin // if ((*(ChunkOffset*)current_chunk) == 0) // { // cout << "current chunk: " << *(ChunkOffset*)current_chunk << " tuple: "; // vector_schema_->displayTuple(current_tuple+state_.schema_->getcolumn(0).get_length(), " | "); // vector_schema_->displayTuple(tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)->second.back()->tuple_, " | "); // sleep(1); // } //for testing end } block_for_asking->setEmpty(); } //for testing begin // sleep(10000); // cout << "Chunk Num: " << tuples_in_chunk_.size() << endl; // sleep(1000); //for testing end // Sorting the tuples in each chunk /*for testing*/ cout << "Chunk num: " << tuples_in_chunk_.size() << endl; for (std::map<ChunkOffset, vector<compare_node*> >::iterator iter = tuples_in_chunk_.begin(); iter != tuples_in_chunk_.end(); iter++) { ///*for testing*/ cout << "chunk id: " << *(unsigned short*)iter->first << endl; //for testing begin cout << "Chunk size: " << iter->second.size() << endl; // for (unsigned i = 0; i < iter->second.size(); i++) // { // vector_schema_->displayTuple(iter->second[i]->tuple_, "\t"); //// sleep(1); // } // sleep(1000); //for testing end stable_sort(iter->second.begin(), iter->second.end(), compare); //for testing begin // for (unsigned i = 0; i < iter->second.size(); i++) // { // vector_schema_->displayTuple(iter->second[i]->tuple_, "\t"); //// sleep(1); // } // sleep(1000); //for testing end } return getReturnStatus(); }