bool BlockStreamJoinIterator::next(BlockStreamBase *block){
	unsigned bn;
	void *result_tuple;
	void *tuple_from_right_child;
	void *tuple_in_hashtable;
	void *key_in_input;
	void *key_in_hashtable;
	void *column_in_joinedTuple;
	void *joinedTuple=memalign(cacheline_size,state_.output_schema->getTupleMaxSize());
	bool key_exit;

	remaining_block rb;

	PartitionFunction* hash_tmp=PartitionFunctionFactory::createGeneralModuloFunction(4);
	while(true){
		if(atomicPopRemainingBlock(rb)){
			while((tuple_from_right_child=rb.blockstream_iterator->currentTuple())>0){
				unsigned bn=state_.input_schema_right->getcolumn(state_.joinIndex_right[0]).operate->getPartitionValue(state_.input_schema_right->getColumnAddess(state_.joinIndex_right[0],tuple_from_right_child),hash_tmp);
				while((tuple_in_hashtable=rb.hashtable_iterator_.readCurrent())>0){
					key_exit=true;
					for(unsigned i=0;i<state_.joinIndex_right.size();i++){
						key_in_input=state_.input_schema_right->getColumnAddess(state_.joinIndex_right[i],tuple_from_right_child);
						key_in_hashtable=state_.ht_schema->getColumnAddess(state_.joinIndex_left[i],tuple_in_hashtable);
						if(!state_.input_schema_right->getcolumn(state_.joinIndex_right[i]).operate->equal(key_in_input,key_in_hashtable)){
							key_exit=false;
							break;
						}
					}
					if(key_exit){
						if((result_tuple=block->allocateTuple(state_.output_schema->getTupleMaxSize()))>0){
							produced_tuples++;
							const unsigned copyed_bytes=state_.input_schema_left->copyTuple(tuple_in_hashtable,result_tuple);
							state_.input_schema_right->copyTuple(tuple_from_right_child,result_tuple+copyed_bytes);
						}
						else{
							atomicPushRemainingBlock(rb);
							free(joinedTuple);
							return true;
						}
					}
					BasicHashTable::Iterator tmp=rb.hashtable_iterator_;
					rb.hashtable_iterator_.increase_cur_();
				}
				rb.blockstream_iterator->increase_cur_();
				consumed_tuples_from_right++;

				if((tuple_from_right_child=rb.blockstream_iterator->currentTuple())){
					bn=state_.input_schema_right->getcolumn(state_.joinIndex_right[0]).operate->getPartitionValue(state_.input_schema_right->getColumnAddess(state_.joinIndex_right[0],tuple_from_right_child),hash);
					hashtable->placeIterator(rb.hashtable_iterator_,bn);
				}
			}
			AtomicPushFreeBlockStream(rb.bsb_right_);
		}
		rb.bsb_right_=AtomicPopFreeBlockStream();//1 1 1
		rb.bsb_right_->setEmpty();
		rb.hashtable_iterator_=hashtable->CreateIterator();
		if(state_.child_right->next(rb.bsb_right_)==false){
			if(block->Empty()==true){
				AtomicPushFreeBlockStream(rb.bsb_right_);
				free(joinedTuple);
				printf("****join next produces %d tuples while consumed %d tuples from right child and %d tuples from left, hash table has %d tuples\n",produced_tuples,consumed_tuples_from_right,consumed_tuples_from_left,tuples_in_hashtable);
				return false;
			}
			else{
				AtomicPushFreeBlockStream(rb.bsb_right_);
				free(joinedTuple);
				return true;
			}
		}
		rb.blockstream_iterator=rb.bsb_right_->createIterator();
		if((tuple_from_right_child=rb.blockstream_iterator->currentTuple())){
			bn=state_.input_schema_right->getcolumn(state_.joinIndex_right[0]).operate->getPartitionValue(state_.input_schema_right->getColumnAddess(state_.joinIndex_right[0],tuple_from_right_child),hash);
			hashtable->placeIterator(rb.hashtable_iterator_,bn);
		}
		atomicPushRemainingBlock(rb);
	}
	return next(block);
}
Esempio n. 2
0
bool InOperator::Next(BlockStreamBase* block) {
  unsigned bn;
  RemainingBlock rb;
  void* tuple_from_child_in = NULL;
  void* tuple_in_output_block = NULL;
  void* tuple_in_hashtable;
  void* key_in_input;
  bool passIn = false;
  BasicHashTable::Iterator hashtable_iterator = hash_table_->CreateIterator();

  if (AtomicPopRemainingBlock(rb)) {
    while ((tuple_from_child_in = rb.blockstream_iterator_->currentTuple()) >
           0) {
      passIn = false;
      bn = state_.schema_child_in_->getcolumn(state_.index_child_in_)
               .operate->GetPartitionValue(
                   state_.schema_child_in_->getColumnAddess(
                       state_.index_child_in_, tuple_from_child_in),
                   state_.ht_nbuckets_);
      hash_table_->placeIterator(hashtable_iterator, bn);
      while ((tuple_in_hashtable = hashtable_iterator.readnext()) > 0) {
        key_in_input = state_.schema_child_in_->getColumnAddess(
            state_.index_child_in_, tuple_from_child_in);
        if (state_.schema_child_in_->getcolumn(state_.index_child_in_)
                .operate->Equal(tuple_in_hashtable, key_in_input)) {
          passIn = true;
          break;
        }
      }
      if (passIn) {
        const unsigned bytes = state_.schema_child_in_->getTupleMaxSize();
        if ((tuple_in_output_block = block->allocateTuple(bytes)) > 0) {
          state_.schema_child_in_->copyTuple(tuple_from_child_in,
                                             tuple_in_output_block);
          rb.blockstream_iterator_->increase_cur_();
        } else {
          AtomicPushRemainingBlock(rb);
          return true;
        }
      } else
        rb.blockstream_iterator_->increase_cur_();
    }
    AtomicPushFreeBlockStream(rb.bsb_in_);
  }

  BlockStreamBase* block_for_asking = AtomicPopFreeBlockStream();
  block_for_asking->setEmpty();
  while (state_.child_in_->Next(block_for_asking)) {
    BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator =
        block_for_asking->createIterator();
    while ((tuple_from_child_in = traverse_iterator->currentTuple()) > 0) {
      passIn = false;
      bn = state_.schema_child_in_->getcolumn(state_.index_child_in_)
               .operate->GetPartitionValue(
                   state_.schema_child_in_->getColumnAddess(
                       state_.index_child_in_, tuple_from_child_in),
                   state_.ht_nbuckets_);
      hash_table_->placeIterator(hashtable_iterator, bn);
      while ((tuple_in_hashtable = hashtable_iterator.readCurrent()) != 0) {
        key_in_input = state_.schema_child_in_->getColumnAddess(
            state_.index_child_in_, tuple_from_child_in);
        if (state_.schema_child_in_->getcolumn(state_.index_child_in_)
                .operate->Equal(tuple_in_hashtable, key_in_input)) {
          passIn = true;
          break;
        }
        hashtable_iterator.increase_cur_();
      }
      if (passIn) {
        const unsigned bytes = state_.schema_child_in_->getTupleMaxSize();
        if ((tuple_in_output_block = block->allocateTuple(bytes)) > 0) {
          state_.schema_child_in_->copyTuple(tuple_from_child_in,
                                             tuple_in_output_block);
          traverse_iterator->increase_cur_();
        } else {
          AtomicPushRemainingBlock(
              RemainingBlock(block_for_asking, traverse_iterator));
          return true;
        }
      } else
        traverse_iterator->increase_cur_();
    }
    traverse_iterator->~BlockStreamTraverseIterator();
    block_for_asking->setEmpty();
  }
  AtomicPushFreeBlockStream(block_for_asking);
  if (!block->Empty()) return true;
  return false;
}
bool ExpandableBlockStreamRandomMemAccess::next(BlockStreamBase* block) {

	remaining_block rb;
	void* tuple_from_child;
	void* tuple_in_block;

	if (atomicPopRemainingBlock(rb))
	{
		while ((tuple_from_child = rb.iterator->currentTuple()) > 0)
		{
			const unsigned bytes = state_.f_schema_->getTupleActualSize(tuple_in_block);
			if ((tuple_in_block = block->allocateTuple(bytes)) > 0)
			{
				/* the block has enough space to hold this tuple */
				state_.f_schema_->copyTuple((void*)(base_+(*(int*)tuple_from_child)*bytes), tuple_in_block);
				rb.iterator->increase_cur_();
			}
			else
			{
				/* the block is full, before we return, we pop the remaining block. */
				atomicPushRemainingBlock(rb);
				return true;
			}
		}
		AtomicPushFreeBlockStream(rb.block);
	}

	/* When the program arrivals here, it mains that there is no remaining blocks or the remaining block
	 * is exhausted, so we read a new block from the child
	 */
	BlockStreamBase* block_for_asking = AtomicPopFreeBlockStream();
	block_for_asking->setEmpty();
	while (state_.child_->next(block_for_asking))
	{
		BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator = block_for_asking->createIterator();
		while((tuple_from_child = traverse_iterator->currentTuple()) > 0)
		{
			const unsigned bytes = state_.f_schema_->getTupleActualSize(tuple_from_child);
			if ((tuple_in_block = block->allocateTuple(bytes)) > 0)
			{
				/* the block has enough space to hold this tuple */
				state_.f_schema_->copyTuple((void*)(base_+(*(int*)tuple_from_child)*bytes), tuple_in_block);
				traverse_iterator->increase_cur_();
			}
			else
			{
				/* the block is full, before we return, we pop the remaining block. */
				atomicPushRemainingBlock(remaining_block(block_for_asking, traverse_iterator));
				return true;
			}
		}
		/* the block_for_asking is exhausted, but the block is not full */
		traverse_iterator->~BlockStreamTraverseIterator();
		block_for_asking->setEmpty();
	}
	/* the child iterator is exhausted, but the block is not full */
	AtomicPushFreeBlockStream(block_for_asking);
	if (!block->Empty())
		return true;
	else
		return false;
}