bool BlockStreamJoinIterator::next(BlockStreamBase *block){
	unsigned bn;
	void *result_tuple;
	void *tuple_from_right_child;
	void *tuple_in_hashtable;
	void *key_in_input;
	void *key_in_hashtable;
	void *column_in_joinedTuple;
	void *joinedTuple=memalign(cacheline_size,state_.output_schema->getTupleMaxSize());
	bool key_exit;

	remaining_block rb;

	PartitionFunction* hash_tmp=PartitionFunctionFactory::createGeneralModuloFunction(4);
	while(true){
		if(atomicPopRemainingBlock(rb)){
			while((tuple_from_right_child=rb.blockstream_iterator->currentTuple())>0){
				unsigned bn=state_.input_schema_right->getcolumn(state_.joinIndex_right[0]).operate->getPartitionValue(state_.input_schema_right->getColumnAddess(state_.joinIndex_right[0],tuple_from_right_child),hash_tmp);
				while((tuple_in_hashtable=rb.hashtable_iterator_.readCurrent())>0){
					key_exit=true;
					for(unsigned i=0;i<state_.joinIndex_right.size();i++){
						key_in_input=state_.input_schema_right->getColumnAddess(state_.joinIndex_right[i],tuple_from_right_child);
						key_in_hashtable=state_.ht_schema->getColumnAddess(state_.joinIndex_left[i],tuple_in_hashtable);
						if(!state_.input_schema_right->getcolumn(state_.joinIndex_right[i]).operate->equal(key_in_input,key_in_hashtable)){
							key_exit=false;
							break;
						}
					}
					if(key_exit){
						if((result_tuple=block->allocateTuple(state_.output_schema->getTupleMaxSize()))>0){
							produced_tuples++;
							const unsigned copyed_bytes=state_.input_schema_left->copyTuple(tuple_in_hashtable,result_tuple);
							state_.input_schema_right->copyTuple(tuple_from_right_child,result_tuple+copyed_bytes);
						}
						else{
							atomicPushRemainingBlock(rb);
							free(joinedTuple);
							return true;
						}
					}
					BasicHashTable::Iterator tmp=rb.hashtable_iterator_;
					rb.hashtable_iterator_.increase_cur_();
				}
				rb.blockstream_iterator->increase_cur_();
				consumed_tuples_from_right++;

				if((tuple_from_right_child=rb.blockstream_iterator->currentTuple())){
					bn=state_.input_schema_right->getcolumn(state_.joinIndex_right[0]).operate->getPartitionValue(state_.input_schema_right->getColumnAddess(state_.joinIndex_right[0],tuple_from_right_child),hash);
					hashtable->placeIterator(rb.hashtable_iterator_,bn);
				}
			}
			AtomicPushFreeBlockStream(rb.bsb_right_);
		}
		rb.bsb_right_=AtomicPopFreeBlockStream();//1 1 1
		rb.bsb_right_->setEmpty();
		rb.hashtable_iterator_=hashtable->CreateIterator();
		if(state_.child_right->next(rb.bsb_right_)==false){
			if(block->Empty()==true){
				AtomicPushFreeBlockStream(rb.bsb_right_);
				free(joinedTuple);
				printf("****join next produces %d tuples while consumed %d tuples from right child and %d tuples from left, hash table has %d tuples\n",produced_tuples,consumed_tuples_from_right,consumed_tuples_from_left,tuples_in_hashtable);
				return false;
			}
			else{
				AtomicPushFreeBlockStream(rb.bsb_right_);
				free(joinedTuple);
				return true;
			}
		}
		rb.blockstream_iterator=rb.bsb_right_->createIterator();
		if((tuple_from_right_child=rb.blockstream_iterator->currentTuple())){
			bn=state_.input_schema_right->getcolumn(state_.joinIndex_right[0]).operate->getPartitionValue(state_.input_schema_right->getColumnAddess(state_.joinIndex_right[0],tuple_from_right_child),hash);
			hashtable->placeIterator(rb.hashtable_iterator_,bn);
		}
		atomicPushRemainingBlock(rb);
	}
	return next(block);
}
Example #2
0
bool IndexScanIterator::Next(BlockStreamBase* block)
{
	remaining_block rb;
	void* tuple_from_index_search;

	// There are blocks which haven't been completely processed
	if (atomicPopRemainingBlock(rb))
	{
		while (rb.block_off == rb.iter_result_map->first)
		{
			const unsigned bytes = state_.schema_->getTupleMaxSize();
			if ((tuple_from_index_search = block->allocateTuple(bytes)) > 0)
			{
				state_.schema_->copyTuple(rb.iterator->getTuple(*rb.iter_result_vector), tuple_from_index_search);
				rb.iter_result_vector++;
				if (rb.iter_result_vector == rb.iter_result_map->second->end())
				{
					rb.iter_result_map++;
					if (rb.iter_result_map == rb.result_set->end())
						break;
					rb.iter_result_vector = rb.iter_result_map->second->begin();
				}
			}
			else
			{
				atomicPushRemainingBlock(rb);
				return true;
			}
		}
		AtomicPushBlockStream(rb.block);
	}
	// When the program arrivals here, it means that there is no remaining block or the remaining block is
	// exhausted. What we should do is to ask a new block from the chunk_reader_iterator (or prartition_reader_iterator)
	BlockStreamBase* block_for_asking = AtomicPopBlockStream();
	block_for_asking->setEmpty();
	rb.block = block_for_asking;
	while (askForNextBlock(rb))
	{
		rb.iterator = rb.block->createIterator();
		while (rb.block_off == rb.iter_result_map->first)
		{
			const unsigned bytes = state_.schema_->getTupleMaxSize();
			if ((tuple_from_index_search = block->allocateTuple(bytes)) > 0)
			{
				state_.schema_->copyTuple(rb.iterator->getTuple(*rb.iter_result_vector), tuple_from_index_search);
////For testing begin
//				cout << "<" << rb.iter_result_map->first << ", " << *rb.iter_result_vector << ">\t";
//				state_.schema_->displayTuple(tuple_from_index_search, "\t");
//				sleep(1);
////For testing end
				rb.iter_result_vector++;
				if (rb.iter_result_vector == rb.iter_result_map->second->end())
				{
					rb.iter_result_map++;
					if (rb.iter_result_map == rb.result_set->end())
						break;
					rb.iter_result_vector = rb.iter_result_map->second->begin();
				}
			}
			else
			{
				atomicPushRemainingBlock(rb);
				return true;
			}
		}
		block_for_asking->setEmpty();
	}
	AtomicPushBlockStream(block_for_asking);
	if (!block->Empty())
		return true;
	return false;
}
bool ExpandableBlockStreamRandomMemAccess::next(BlockStreamBase* block) {

	remaining_block rb;
	void* tuple_from_child;
	void* tuple_in_block;

	if (atomicPopRemainingBlock(rb))
	{
		while ((tuple_from_child = rb.iterator->currentTuple()) > 0)
		{
			const unsigned bytes = state_.f_schema_->getTupleActualSize(tuple_in_block);
			if ((tuple_in_block = block->allocateTuple(bytes)) > 0)
			{
				/* the block has enough space to hold this tuple */
				state_.f_schema_->copyTuple((void*)(base_+(*(int*)tuple_from_child)*bytes), tuple_in_block);
				rb.iterator->increase_cur_();
			}
			else
			{
				/* the block is full, before we return, we pop the remaining block. */
				atomicPushRemainingBlock(rb);
				return true;
			}
		}
		AtomicPushFreeBlockStream(rb.block);
	}

	/* When the program arrivals here, it mains that there is no remaining blocks or the remaining block
	 * is exhausted, so we read a new block from the child
	 */
	BlockStreamBase* block_for_asking = AtomicPopFreeBlockStream();
	block_for_asking->setEmpty();
	while (state_.child_->next(block_for_asking))
	{
		BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator = block_for_asking->createIterator();
		while((tuple_from_child = traverse_iterator->currentTuple()) > 0)
		{
			const unsigned bytes = state_.f_schema_->getTupleActualSize(tuple_from_child);
			if ((tuple_in_block = block->allocateTuple(bytes)) > 0)
			{
				/* the block has enough space to hold this tuple */
				state_.f_schema_->copyTuple((void*)(base_+(*(int*)tuple_from_child)*bytes), tuple_in_block);
				traverse_iterator->increase_cur_();
			}
			else
			{
				/* the block is full, before we return, we pop the remaining block. */
				atomicPushRemainingBlock(remaining_block(block_for_asking, traverse_iterator));
				return true;
			}
		}
		/* the block_for_asking is exhausted, but the block is not full */
		traverse_iterator->~BlockStreamTraverseIterator();
		block_for_asking->setEmpty();
	}
	/* the child iterator is exhausted, but the block is not full */
	AtomicPushFreeBlockStream(block_for_asking);
	if (!block->Empty())
		return true;
	else
		return false;
}
bool BlockStreamCombinedIterator::next(BlockStreamBase *block){
	unsigned total_length_=0;
	for(unsigned i=0;i<state_.inputs_.size();i++){
			total_length_+=state_.inputs_[i]->getTupleMaxSize();
	}
	void *tuple=0;
	void *column_in_combinedTuple=0;
	void *combinedTuple_=memalign(cacheline_size,state_.output_->getTupleMaxSize());;
	void *cur=0;

	remaining_block rb;
	if(atomicPopRemainingBlock(rb)){
		while(1){
			for(unsigned j=0;j<state_.children_.size();j++){
				if((cur=rb.bsti_list_[j]->currentTuple())==0){
					rb.buffer_[j]->setEmpty();
					if(state_.children_[j]->next(rb.buffer_[j])==false){
						if(!block->Empty()){
							atomicPushRemainingBlock(rb);
							return true;
						}
						return false;
					}
					rb.bsti_list_[j]->reset();
					cur=rb.bsti_list_[j]->currentTuple();
				}
				column_in_combinedTuple=state_.output_->getColumnAddess(j,combinedTuple_);
				state_.output_->columns[j].operate->assign(cur,column_in_combinedTuple);
			}
			if((tuple=block->allocateTuple(total_length_))>0){
				memcpy(tuple,combinedTuple_,total_length_);
				for(unsigned k=0;k<state_.children_.size();k++){
					rb.bsti_list_[k]->increase_cur_();
				}
			}
			else{
				atomicPushRemainingBlock(rb);
				return true;
			}
		}
	}


	lock_.acquire();
	std::vector<BlockStreamBase *> v_bsb;
	if(!free_block_stream_list_.empty()){
		v_bsb=free_block_stream_list_.front();
		free_block_stream_list_.pop_front();
	}
	else{
		return false;
	}
	lock_.release();

	for(unsigned i=0;i<v_bsb.size();i++){
		v_bsb[i]->setEmpty();
		BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator=v_bsb[i]->createIterator();
		rb.bsti_list_.push_back(traverse_iterator);
	}

	atomicPushRemainingBlock(remaining_block(v_bsb,rb.bsti_list_));

	return next(block);
}