void WorkerPersistentArrayManager::save_marked_arrays(Interpreter* runner) {
		ArrayIdLabelMap::iterator it;
		for (it = persistent_array_map_.begin();
				it != persistent_array_map_.end(); ++it) {
			int array_id = it->first;
			int string_slot = it->second;
			//DEBUG
			SIP_LOG(std::cout << "\nsave marked: array= " << runner->array_name(array_id) << ", label=" << runner->string_literal(string_slot) << std::endl);
			const std::string label = runner->sip_tables().string_literal(string_slot);
			if (runner->sip_tables().is_scalar(array_id)) {
				double value = runner->scalar_value(array_id);
				SIP_LOG(std::cout << "saving scalar with label " << label << " value is " << value << std::endl);
				save_scalar(label, value);
			} else if (runner->sip_tables().is_contiguous(array_id)) {
				Block* contiguous_array = runner->get_and_remove_contiguous_array(array_id);
				SIP_LOG(std::cout << "saving contiguous array with label  "<<  label << " with contents "<< std::endl << *contiguous_array << std::endl);
				save_contiguous(label, contiguous_array);
			} else {
				//in parallel implementation, there won't be any of these on worker.
				IdBlockMap<Block>::PerArrayMap* per_array_map = runner->get_and_remove_per_array_map(array_id);
				SIP_LOG(std::cout << " saving distributed array  with label " << label << " and map with " << per_array_map->size() << " blocks" << std::endl);
				save_distributed(label, per_array_map);
			}
		}
		persistent_array_map_.clear();
	}
예제 #2
0
void SialOpsParallel::restore_persistent(Interpreter* worker, int array_slot,
		int string_slot) {
	SIP_LOG(std::cout << "restore_persistent with array " << sip_tables_.array_name(array_slot) << " in slot " << array_slot << " and string \"" << sip_tables_.string_literal(string_slot) << "\"" << std::endl);

	if (sip_tables_.is_distributed(array_slot)
			|| sip_tables_.is_served(array_slot)) {
		int my_server = sip_mpi_attr_.my_server();
		if (my_server > 0) {
			int restore_persistent_tag;
			restore_persistent_tag =
					barrier_support_.make_mpi_tag_for_RESTORE_PERSISTENT();
			int line_number = current_line();
			int buffer[4] = { array_slot, string_slot, line_number, barrier_support_.section_number()};
			SIPMPIUtils::check_err(
					MPI_Send(buffer, 4, MPI_INT, my_server,
							restore_persistent_tag, MPI_COMM_WORLD));
			//expect ack
			ack_handler_.expect_ack_from(my_server, restore_persistent_tag);
        	}
	} else {
		persistent_array_manager_->restore_persistent(worker, array_slot,
				string_slot);
		SIP_LOG(std::cout << "returned from restore_persistent" << std::endl << std::flush);
	}

}
예제 #3
0
ContiguousArrayManager::ContiguousArrayManager(const sip::SipTables& sip_tables,
		setup::SetupReader& setup_reader) :
		sip_tables_(sip_tables), setup_reader_(setup_reader) {
	//create static arrays in sial program.  All static arrays are allocated a startup
	int num_arrays = sip_tables_.num_arrays();
	for (int i = 0; i < num_arrays; ++i) {
		if (sip_tables_.is_contiguous(i) && sip_tables_.array_rank(i) > 0) {
			//check whether array has been predefined
			// FIXME TODO HACK - Check current program number.
			// The assumption is that only the first program reads predefined arrays from the
			// initialization data. The following ones dont.

			if (sip_tables_.is_predefined(i)) {

				SIP_LOG(
						std::cout << "Array " << sip_tables_.array_name(i)<< " is predefined..." << std::endl);

				Block::BlockPtr block = NULL;
				std::string name = sip_tables_.array_name(i);
				setup::SetupReader::NamePredefinedContiguousArrayMapIterator b =
						setup_reader_.name_to_predefined_contiguous_array_map_.find(
								name);
                if (b != setup_reader_.name_to_predefined_contiguous_array_map_.end()){        
					//array is predefined and in setup reader
					block = b->second.second;
					insert_contiguous_array(i, block);
				} else { //is predefined, but not in setreader.  Set to zero, insert into setup_reader map so it "owns" it and deletes it.

				    SIP_MASTER(check_and_warn(false, "No data for predefined static array " + name);)
					block = create_contiguous_array(i);
					int block_rank = sip_tables_.array_rank(i);
					std::pair<int, Block::BlockPtr> zeroed_block_pair = std::make_pair(i, block);
					setup_reader_.name_to_predefined_contiguous_array_map_.insert(make_pair(name, zeroed_block_pair));
				}
			} else { //not predefined, just create it
예제 #4
0
ServerBlock* DiskBackedBlockMap::get_block_for_writing(const BlockId& block_id){
	/** If block is not in block map, allocate space for it
	 * Otherwise, if the block is not in memory, allocate space for it.
	 * Set in_memory and dirty_flag
	 */
	ServerBlock* block = block_map_.block(block_id);
	size_t block_size = sip_tables_.block_size(block_id);
	if (block == NULL) {
		std::stringstream msg;
		msg << "S " << sip_mpi_attr_.global_rank();
		msg << " : getting uninitialized block " << block_id << ".  Creating zero block for writing"<< std::endl;
		SIP_LOG(std::cout << msg.str() << std::flush);
		block = allocate_block(NULL, block_size);
	    block_map_.insert_block(block_id, block);
	} else {
		if (!block->is_in_memory())
			block->allocate_in_memory_data();
	}

	block->set_in_memory();
	block->set_dirty();

	policy_.touch(block_id);

	return block;
}
예제 #5
0
//returns the number of doubles actually freed
size_t DiskBackedBlockMap::backup_and_free_doubles(size_t requested_doubles_to_free) {
	size_t freed_count = 0;
	try {
		while (freed_count < requested_doubles_to_free) { //if requested_doubles_to_free <= 0, no iterations performed.
			ServerBlock* block;
			BlockId bid = policy_.get_next_block_for_removal(block);
			block->wait();
			ServerBlock* blk = block_map_.block(bid);
			check(blk == block, "bug in free_doubles");
			SIP_LOG(
					std::cout << "S " << sip_mpi_attr_.company_rank() << " : Freeing block " << bid << " and writing to disk to make space for new block" << std::endl);
			if (!blk->disk_state_.is_valid_on_disk()) {
				write_block_to_disk(bid, blk);
				blocks_to_disk_.inc();
				blk->disk_state_.set_valid_on_disk();
			}
			double* data_to_free = blk->get_data();
			free_data(data_to_free, blk->size()); //this method updates remaining_doubles_
			blk->disk_state_.unset_in_memory();
			blk->data_ = NULL;
			freed_count += blk->size();
		}
	} catch (const std::out_of_range& oor) {
		//ran out of blocks, just return what was freed.
	}
	return freed_count;
}
예제 #6
0
/**
 * A put appears in a SIAL program as
 * put target(i,j,k,l) += source(i,j,k,l)
 * So we need the target block id, but the source block data.
 * Accumulation is done by the server
 *
 * The implementation will be more complicated if asynchronous send is
 * used
 *
 * @param target
 * @param source_ptr
 */
void SialOpsParallel::put_accumulate(BlockId& target_id,
		const Block::BlockPtr source_block) {

	//partial check for data races
	check_and_set_mode(target_id, WRITE);

	//send message with target block's id to server
	int my_rank = sip_mpi_attr_.global_rank();
	int server_rank = data_distribution_.get_server_rank(target_id);
	int put_accumulate_tag, put_accumulate_data_tag;
	put_accumulate_tag = barrier_support_.make_mpi_tags_for_PUT_ACCUMULATE(
			put_accumulate_data_tag);

    sip::check(server_rank>=0&&server_rank<sip_mpi_attr_.global_size(), "invalid server rank",current_line()); 

    SIP_LOG(std::cout<<"W " << sip_mpi_attr_.global_rank()
       		<< " : sending PUT_ACCUMULATE for block " << target_id
       		<< " to server "<< server_rank << std::endl);


    // Construct int array to send to server.
    const int to_send_size = BlockId::MPI_BLOCK_ID_COUNT + 2;
    const int line_num_offset = BlockId::MPI_BLOCK_ID_COUNT;
    const int section_num_offset = line_num_offset + 1;
    int to_send[to_send_size]; // BlockId & line number
    int *serialized_block_id = target_id.to_mpi_array();
    std::copy(serialized_block_id + 0, serialized_block_id + BlockId::MPI_BLOCK_ID_COUNT, to_send);
    to_send[line_num_offset] = current_line();
    to_send[section_num_offset] = barrier_support_.section_number();

	//send block id
	SIPMPIUtils::check_err(
			MPI_Send(to_send, to_send_size, MPI_INT,
					server_rank, put_accumulate_tag, MPI_COMM_WORLD));
	//immediately follow with the data
	SIPMPIUtils::check_err(
			MPI_Send(source_block->get_data(), source_block->size(), MPI_DOUBLE,
					server_rank, put_accumulate_data_tag, MPI_COMM_WORLD));

	//ack
	ack_handler_.expect_ack_from(server_rank, put_accumulate_data_tag);

	SIP_LOG(
			std::cout<< "W " << sip_mpi_attr_.global_rank() << " : Done with PUT_ACCUMULATE for block " << target_id << " to server rank " << server_rank << std::endl);

}
	void WorkerPersistentArrayManager::set_persistent(Interpreter* runner, int array_id, int string_slot) {
		SIP_LOG(std::cout << "set_persistent: array= " << runner->sip_tables().array_name(array_id) << ", label=" << runner->sip_tables().string_literal(string_slot) << std::endl);
		std::pair<ArrayIdLabelMap::iterator, bool> ret = persistent_array_map_.insert(std::pair<int, int>(array_id, string_slot));
		check(ret.second, "duplicate save of array in same sial program ");
		//check(ret.second, "duplicate save of array in same sial program " + SipTables::instance().array_name(array_id));
		//note that duplicate label for same type of object will
		//be detected during the save process so we don't
		//check for unique labels here.
	}
예제 #8
0
void SialOpsParallel::end_program() {
	//implicit sip_barrier
	//this is required to ensure that there are no pending messages
	//at the server when the end_program message arrives.
	sip_barrier();
	int my_server = sip_mpi_attr_.my_server();
	SIP_LOG(std::cout << "I'm a worker and my server is " << my_server << std::endl << std::flush);
	//send end_program message to server, if designated worker and wait for ack.
	if (my_server > 0) {
		int end_program_tag;
		end_program_tag = barrier_support_.make_mpi_tag_for_END_PROGRAM();
		SIPMPIUtils::check_err(
				MPI_Send(0, 0, MPI_INT, my_server, end_program_tag,
						MPI_COMM_WORLD));
		ack_handler_.expect_sync_ack_from(my_server, end_program_tag);
	}
	//the program is done and the servers know it.
	SIP_LOG(std::cout << "leaving end_program" << std::endl << std::flush);
}
예제 #9
0
ServerBlock* DiskBackedBlockMap::allocate_block(ServerBlock* block, size_t block_size, bool initialize){
    /** If enough memory remains, allocates block and returns.
     * Otherwise, frees up memory by writing out dirty blocks
     * till enough memory has been obtained, then allocates
     * and returns block.
     */
	std::size_t remaining_mem = max_allocatable_bytes_ - ServerBlock::allocated_bytes();

    while (block_size * sizeof(double) > remaining_mem){
        try{
			BlockId bid = policy_.get_next_block_for_removal();
			ServerBlock* blk = block_map_.block(bid);
			SIP_LOG(std::cout << "S " << sip_mpi_attr_.company_rank()
					<< " : Freeing block " << bid
					<< " and writing to disk to make space for new block"
					<< std::endl);
			if(blk->is_dirty()){
				write_block_to_disk(bid, blk);
			}
			blk->free_in_memory_data();

			// Junmin's fix :
			// As a result of freeing up block memory, the remaining memory should
			// have increased. Otherwise it will go into an infinite loop.
			if (!(remaining_mem < max_allocatable_bytes_ - ServerBlock::allocated_bytes())) {
				throw std::out_of_range("Break now.");
			}
            remaining_mem = max_allocatable_bytes_ - ServerBlock::allocated_bytes();

        } catch (const std::out_of_range& oor){
            std::cerr << " In DiskBackedBlockMap::allocate_block" << std::endl;
            std::cerr << oor.what() << std::endl;
            std::cerr << *this << std::endl;
            fail(" Something got messed up in the internal data structures of the Server", current_line());
        } catch(const std::bad_alloc& ba){
            std::cerr << " In DiskBackedBlockMap::allocate_block" << std::endl;
            std::cerr << ba.what() << std::endl;
            std::cerr << *this << std::endl;
			fail(" Could not allocate ServerBlock, out of memory", current_line());
		}
	}

	std::stringstream ss;
	ss << "S " << sip_mpi_attr_.company_rank() << " : Could not allocate memory for block of size "
			<< block_size << ", Memory being used :" << ServerBlock::allocated_bytes() << std::endl;
	sip :: check (block_size <= max_allocatable_bytes_ - ServerBlock::allocated_bytes(), ss.str());
   
    if (block == NULL) {
	    block = new ServerBlock(block_size, initialize);
    } else {
        block->allocate_in_memory_data();
    }

	return block;
}
예제 #10
0
SetupReader::~SetupReader() {
	for(PredefIntArrayIterator iter = predef_int_arr_.begin(); iter != predef_int_arr_.end(); ++iter){
		SIP_LOG(std::cout<<"From SetupReader, freeing "<<iter->first<<std::endl);
		delete [] iter->second.dims;  //dims
		delete [] iter->second.data; //data
	}
//    for (PredefArrayIterator iter = predef_arr_.begin(); iter != predef_arr_.end(); ++iter){
//    	SIP_LOG(std::cout<<"From SetupReader, freeing "<<iter->first<<std::endl);
//    	delete [] iter->second.second.first;
//    	delete [] iter->second.second.second;
//    }
    for (NamePredefinedContiguousArrayMapIterator iter = name_to_predefined_contiguous_array_map_.begin();
    		iter != name_to_predefined_contiguous_array_map_.end(); ++iter){
    	SIP_LOG(std::cout<<"From SetupReader, freeing "<<iter->first<<std::endl);
    	CHECK(iter->second.second, "attempting to delete NULL block in ~SetupReader");
		delete iter->second.second;
		iter->second.second = NULL;
    }
    name_to_predefined_contiguous_array_map_.clear();
}
Block::BlockPtr ContiguousArrayManager::create_contiguous_array(int array_id) {
	BlockShape shape = sip_tables_.contiguous_array_shape(array_id);
	SIP_LOG(
			std::cout<< "creating contiguous array " << sip_tables_.array_name(array_id) << " with shape " << shape << " and array id :" << array_id << std::endl);
	Block::BlockPtr block_ptr = new Block(shape);
	const std::pair<ContiguousArrayMap::iterator, bool> &ret =
			contiguous_array_map_.insert(
					std::pair<int, Block::BlockPtr>(array_id, block_ptr));
	sip::check(ret.second,
			std::string(
					"attempting to create contiguous array that already exists"));
	return block_ptr;
}
/** delete all contiguous arrays except the predefined ones. */
ContiguousArrayManager::~ContiguousArrayManager() {
	ContiguousArrayMap::iterator it;
	for (it = contiguous_array_map_.begin(); it != contiguous_array_map_.end();
			++it) {
		int i = it->first;
		if (it->second != NULL && !sip_tables_.is_predefined(i)) {
			SIP_LOG(
					std::cout<<"Deleting contiguous array : "<<sip_tables_.array_name(i)<<std::endl);
			delete it->second;
			it->second = NULL;
		}
	}
	contiguous_array_map_.clear();
}
예제 #13
0
//TODO optimize this.  Can reduce searches in block map.
void SialOpsParallel::get(BlockId& block_id) {

	//check for "data race"
	check_and_set_mode(block_id, READ);

	//if block already exists, or has pending request, just return
	Block::BlockPtr block = block_manager_.block(block_id);
	if (block != NULL)
		return;

	//send get message to block's server, and post receive
	int server_rank = data_distribution_.get_server_rank(block_id);
	int get_tag;
	get_tag = barrier_support_.make_mpi_tag_for_GET();

    sip::check(server_rank>=0&&server_rank<sip_mpi_attr_.global_size(), "invalid server rank",current_line()); 

    SIP_LOG(std::cout<<"W " << sip_mpi_attr_.global_rank()
    		<< " : sending GET for block " << block_id
    		<< " to server "<< server_rank << std::endl);

    // Construct int array to send to server.
    const int to_send_size = BlockId::MPI_BLOCK_ID_COUNT + 2;
    const int line_num_offset = BlockId::MPI_BLOCK_ID_COUNT;
    const int section_num_offset = line_num_offset + 1;
    int to_send[to_send_size]; // BlockId & line number
    int *serialized_block_id = block_id.to_mpi_array();
    std::copy(serialized_block_id + 0, serialized_block_id + BlockId::MPI_BLOCK_ID_COUNT, to_send);
    to_send[line_num_offset] = current_line();
    to_send[section_num_offset] = barrier_support_.section_number();

	SIPMPIUtils::check_err(
			MPI_Send(to_send, to_send_size, MPI_INT,
					server_rank, get_tag, MPI_COMM_WORLD));

	//allocate block, and insert in block map, using block data as buffer
	block = block_manager_.get_block_for_writing(block_id, true);

	//post an asynchronous receive and store the request in the
	//block's state
	MPI_Request request;
	SIPMPIUtils::check_err(
			MPI_Irecv(block->get_data(), block->size(), MPI_DOUBLE, server_rank,
					get_tag, MPI_COMM_WORLD, &request));
	block->state().mpi_request_ = request;
}
예제 #14
0
/** Removes all the blocks associated with the given array from the block map.
 * Removing the array from the map will cause its destructor to
 * be called, which will delete the data. Because of this, we must be very
 * careful to remove blocks that should not be delete from the block_map_.
 */
void SialOpsParallel::delete_distributed(int array_id) {

	//delete any blocks stored locally along with the map
	block_manager_.block_map_.delete_per_array_map_and_blocks(array_id);

	//send delete message to server if responsible worker
	int server_rank = sip_mpi_attr_.my_server();
	if (server_rank > 0) {
		int line_number = current_line();
		int to_send[3] = { array_id, line_number, barrier_support_.section_number() };
		SIP_LOG(std::cout<<"W " << sip_mpi_attr_.global_rank() << " : sending DELETE to server "<< server_rank << std::endl);
		int delete_tag = barrier_support_.make_mpi_tag_for_DELETE();
		SIPMPIUtils::check_err(
				MPI_Send(to_send, 3, MPI_INT, server_rank, delete_tag,
						MPI_COMM_WORLD));
		ack_handler_.expect_ack_from(server_rank, delete_tag);
	}
}
Block::BlockPtr ContiguousArrayManager::insert_contiguous_array(int array_id,
		Block::BlockPtr block_ptr) {
	sip::check(block_ptr != NULL && block_ptr->get_data() != NULL,
			"Trying to insert null block_ptr or null block into contiguous array manager\n");
	ContiguousArrayMap::iterator it = contiguous_array_map_.find(array_id);
	if (it != contiguous_array_map_.end()){
		delete it->second;
		it->second = NULL;
		contiguous_array_map_.erase(it);
	}
	contiguous_array_map_[array_id] = block_ptr;

	SIP_LOG(
			std::cout<<"Contiguous Block of array "<<sip_tables_.array_name(array_id)<<std::endl);
	sip::check(
			block_ptr->shape() == sip_tables_.contiguous_array_shape(array_id),
			std::string("array ") + sip_tables_.array_name(array_id)
					+ std::string(
							"shape inconsistent in Sial program and inserted array "));
	return block_ptr;
}
예제 #16
0
void SioxReader::read_special_instruction_table(){
	SIP_LOG(std::cout<< " in void SioxReader::read_special_instruction_table()" << std::endl);
	sip::SpecialInstructionManager::read(tables.special_instruction_manager_, file);
}
bool DistributedBlockConsistency::update_and_check_consistency(SIPMPIConstants::MessageType_t operation, int worker, int section){
	if (section > last_section_){
		//a barrier occurred since last access of block
		last_section_ = section;
		reset_consistency_status();
	}


	/**
	 * Block Consistency Rules
	 *
	 *     		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
	 *		NO      Rw      Ww         Aw         Rw1        Ww1     Aw1
	 *		Rw      Rw      Sw         Sw         RM          X       X
	 *		RM      RM       X          X          RM         X       X
	 *		Ww      Sw      Sw         Sw          X          X       X
	 *		Aw      Sw      Sw         Aw          X          X       AM
	 *		AM       X       X         AM          X          X       AM
	 *		Sw      Sw      Sw         Sw          X          X       X
	 */

	ServerBlockMode mode = mode_;
	int prev_worker = worker_;

	// Check if block already in inconsistent state.
	if (mode == INVALID_MODE || prev_worker == INVALID_WORKER)
		return false;


	ServerBlockMode new_mode = INVALID_MODE;
	int new_worker = INVALID_WORKER;

	//PUT_INCREMENT, PUT_SCALE, PUT_INITIALIZE should be checked as accumulate, put, and put, respectively.
	switch (mode){
	case NONE: {
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		NO      Rw      Ww         Aw         Rw1        Ww1     Aw1
		 */
		if (OPEN == prev_worker){
			switch(operation){
			case SIPMPIConstants::GET : 			new_mode = READ; 		new_worker = worker; break;
			case SIPMPIConstants::PUT : 			new_mode = WRITE; 		new_worker = worker; break;
			case SIPMPIConstants::PUT_ACCUMULATE :	new_mode = ACCUMULATE; 	new_worker = worker; break;
			default : goto consistency_error;
			}
		} else {
			goto consistency_error;
		}
	}
		break;
	case READ: {
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Rw      Rw      Sw         Sw         RM          X       X
		 *		RM      RM       X          X          RM         X       X
		 */
		if (OPEN == prev_worker){
			goto consistency_error;
		} else if (MULTIPLE_WORKER == prev_worker){
			switch(operation){
			case SIPMPIConstants::GET :				new_mode = READ; new_worker = MULTIPLE_WORKER; break;
			case SIPMPIConstants::PUT : case SIPMPIConstants::PUT_ACCUMULATE : {	goto consistency_error; } break;
			default : goto consistency_error;
			}
		} else { 	// Single worker
			switch(operation){
			case SIPMPIConstants::GET :	new_mode = READ; new_worker = (worker == prev_worker ? worker : MULTIPLE_WORKER); break;
			case SIPMPIConstants::PUT : case SIPMPIConstants::PUT_ACCUMULATE :{
				if (worker != prev_worker)
					goto consistency_error;
				new_mode = SINGLE_WORKER;
				new_worker = worker;
			}
			break;
			default : goto consistency_error;
			}
		}
	}
		break;
	case WRITE:{
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Ww      Sw      Sw         Sw          X          X       X
		 */
		if (prev_worker == worker){
			new_mode = SINGLE_WORKER; new_worker = worker;
		} else {
			goto consistency_error;
		}
	}
	break;
	case ACCUMULATE:{
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Aw      Sw      Sw         Aw          X          X       AM
		 *		AM       X       X         AM          X          X       AM
		 */
		if (OPEN == prev_worker){
			goto consistency_error;
		} else if (MULTIPLE_WORKER == prev_worker){
			if (SIPMPIConstants::PUT_ACCUMULATE == operation){
				new_mode = ACCUMULATE; new_worker = MULTIPLE_WORKER;
			} else {
				goto consistency_error;
			}
		} else { // Single worker
			switch(operation){
			case SIPMPIConstants::GET : case SIPMPIConstants::PUT : {
				if (prev_worker != worker)
					goto consistency_error;
				new_worker = worker;
				new_mode = SINGLE_WORKER;
			}
			break;
			case SIPMPIConstants::PUT_ACCUMULATE :	{
				new_mode = ACCUMULATE;
				new_worker = (worker == prev_worker ? worker : MULTIPLE_WORKER);
			}
			break;
			default : goto consistency_error;
			}
		}
	}
	break;
	case SINGLE_WORKER:{
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Sw      Sw      Sw         Sw          X          X       X
		 */
		if (worker == prev_worker){
			new_mode = SINGLE_WORKER; new_worker = worker;
		} else {
			goto consistency_error;
		}
	}
	break;
	default:
		goto consistency_error;
	}

	mode_ = new_mode;
	worker_ = new_worker;
	return true;

consistency_error:
	SIP_LOG(std::cout << "Inconsistent block at server ")
	mode_ = INVALID_MODE;
	worker_ = INVALID_WORKER;

	return false;

}
예제 #18
0
bool ServerBlock::update_and_check_consistency(SIPMPIConstants::MessageType_t operation, int worker){
	/**
	 * Block Consistency Rules
	 *
	 *     		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
	 *		NO      Rw      Ww         Aw         Rw1        Ww1     Aw1
	 *		Rw      Rw      Sw         Sw         RM          X       X
	 *		RM      RM       X          X          RM         X       X
	 *		Ww      Sw      Sw         Sw          X          X       X
	 *		Aw      Sw      Sw         Aw          X          X       AM
	 *		AM       X       X         AM          X          X       AM
	 *		Sw      Sw      Sw         Sw          X          X       X
	 */

	ServerBlockMode mode = consistency_status_.first;
	int prev_worker = consistency_status_.second;

	// Check if block already in inconsistent state.
	if (mode == INVALID_MODE || prev_worker == INVALID_WORKER)
		return false;


	ServerBlockMode new_mode = INVALID_MODE;
	int new_worker = INVALID_WORKER;

	switch (mode){
	case NONE: {
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		NO      Rw      Ww         Aw         Rw1        Ww1     Aw1
		 */
		if (OPEN == prev_worker){
			switch(operation){
			case SIPMPIConstants::GET : 			new_mode = READ; 		new_worker = worker; break;
			case SIPMPIConstants::PUT : 			new_mode = WRITE; 		new_worker = worker; break;
			case SIPMPIConstants::PUT_ACCUMULATE :	new_mode = ACCUMULATE; 	new_worker = worker; break;
			default : goto consistency_error;
			}
		} else {
			goto consistency_error;
		}
	}
		break;
	case READ: {
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Rw      Rw      Sw         Sw         RM          X       X
		 *		RM      RM       X          X          RM         X       X
		 */
		if (OPEN == prev_worker){
			goto consistency_error;
		} else if (MULTIPLE_WORKER == prev_worker){
			switch(operation){
			case SIPMPIConstants::GET :				new_mode = READ; new_worker = MULTIPLE_WORKER; break;
			case SIPMPIConstants::PUT : case SIPMPIConstants::PUT_ACCUMULATE : {	goto consistency_error; } break;
			default : goto consistency_error;
			}
		} else { 	// Single worker
			switch(operation){
			case SIPMPIConstants::GET :	new_mode = READ; new_worker = (worker == prev_worker ? worker : MULTIPLE_WORKER); break;
			case SIPMPIConstants::PUT : case SIPMPIConstants::PUT_ACCUMULATE :{
				if (worker != prev_worker)
					goto consistency_error;
				new_mode = SINGLE_WORKER;
				new_worker = worker;
			}
			break;
			default : goto consistency_error;
			}
		}
	}
		break;
	case WRITE:{
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Ww      Sw      Sw         Sw          X          X       X
		 */
		if (prev_worker == worker){
			new_mode = SINGLE_WORKER; new_worker = worker;
		} else {
			goto consistency_error;
		}
	}
	break;
	case ACCUMULATE:{
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Aw      Sw      Sw         Aw          X          X       AM
		 *		AM       X       X         AM          X          X       AM
		 */
		if (OPEN == prev_worker){
			goto consistency_error;
		} else if (MULTIPLE_WORKER == prev_worker){
			if (SIPMPIConstants::PUT_ACCUMULATE == operation){
				new_mode = ACCUMULATE; new_worker = MULTIPLE_WORKER;
			} else {
				goto consistency_error;
			}
		} else { // Single worker
			switch(operation){
			case SIPMPIConstants::GET : case SIPMPIConstants::PUT : {
				if (prev_worker != worker)
					goto consistency_error;
				new_worker = worker;
				new_mode = SINGLE_WORKER;
			}
			break;
			case SIPMPIConstants::PUT_ACCUMULATE :	{
				new_mode = ACCUMULATE;
				new_worker = (worker == prev_worker ? worker : MULTIPLE_WORKER);
			}
			break;
			default : goto consistency_error;
			}
		}
	}
	break;
	case SINGLE_WORKER:{
		/*  		  GET,w    PUT,w    PUT_ACC,w   GET,w1     PUT,w1  PUT_ACC,w1
		 *		Sw      Sw      Sw         Sw          X          X       X
		 */
		if (worker == prev_worker){
			new_mode = SINGLE_WORKER; new_worker = worker;
		} else {
			goto consistency_error;
		}
	}
	break;
	default:
		goto consistency_error;
	}

	consistency_status_.first = new_mode;
	consistency_status_.second = new_worker;
	return true;

consistency_error:
	SIP_LOG(std::cout << "Inconsistent block at server ")
	consistency_status_.first = INVALID_MODE;
	consistency_status_.second = INVALID_WORKER;

	return false;

}
예제 #19
0
void SialOpsParallel::log_statement(opcode_t type, int line){
	SIP_LOG(
			std::cout<< "W " << sip_mpi_attr_.global_rank()
					 << " : Line "<<line << ", type: " << opcodeToName(type)<<std::endl);
}