Esempio n. 1
0
int SRLockClient::start_client () {	
	SRClientContext ctx;
	char temp_char;
	
	ctx.ib_port = SERVER_IB_PORT;
	
	srand (generate_random_seed());		// initialize random seed
		
	TEST_NZ (establish_tcp_connection(SERVER_ADDR.c_str(), SERVER_TCP_PORT, &(ctx.sockfd)));
	
	DEBUG_COUT("[Comm] Client connected to LM on sock " << ctx.sockfd);

	TEST_NZ (ctx.create_context());
	DEBUG_COUT("[Info] Context Created " << ctx.sockfd);
	
	
	TEST_NZ (RDMACommon::connect_qp (&(ctx.qp), ctx.ib_port, ctx.port_attr.lid, ctx.sockfd));	
	DEBUG_COUT("[Conn] QP connected!");
	
	start_operation(ctx);
	
	// Sync so server will know that client is done mucking with its memory
	DEBUG_COUT("[Info] Client is done, and is ready to destroy its resources!");
	TEST_NZ (sock_sync_data (ctx.sockfd, 1, "W", &temp_char));	/* just send a dummy char back and forth */
	TEST_NZ(ctx.destroy_context());
}
int RDMAServer::initialize_data_structures(){
	global_items_region			= new ItemVersion[ITEM_CNT * MAX_ITEM_VERSIONS];
	
	//OrdersVersion*		RDMAServer::orders_region		= new OrdersVersion[MAX_ORDERS_CNT * MAX_ORDERS_VERSIONS];
	global_orders_region		= new OrdersVersion[MAX_BUFFER_SIZE];	// TODO

	//OrderLineVersion*	RDMAServer::order_line_region	= new OrderLineVersion[ORDERLINE_PER_ORDER * MAX_ORDERS_CNT * MAX_ORDERLINE_VERSIONS];
	global_order_line_region	= new OrderLineVersion[MAX_BUFFER_SIZE];			// TODO
	
	//CCXactsVersion*		RDMAServer::cc_xacts_region		= new CCXactsVersion[MAX_CCXACTS_CNT * MAX_CCXACTS_VERSIONS];
	global_cc_xacts_region		= new CCXactsVersion[MAX_BUFFER_SIZE];	// TODO
	
	global_timestamp_region		= new Timestamp[1];
	global_lock_items_region	= new uint64_t[ITEM_CNT];
	
	
	
	RDMAServer::global_timestamp_region[0].value = 0ULL;	// the timestamp counter is initially set to 0
	DEBUG_COUT("[Info] Timestamp set to " << RDMAServer::global_timestamp_region[0].value);

	TEST_NZ(load_tables_from_files(global_items_region));
	DEBUG_COUT("[Info] Tables loaded successfully");
	
	int i;
	uint32_t stat;
	uint32_t version;
	for (i=0; i < ITEM_CNT; i++) {
		stat = (uint32_t)0;	// 0 for free, 1 for locked
		version = (uint32_t)0;	// the first version of each item is 0
		global_lock_items_region[i] = Lock::set_lock(stat, version);
	}
	DEBUG_COUT("[Info] All locks set free");
		
	return 0;
}
ClientGroup::~ClientGroup() {
	DEBUG_COUT(CLASS_NAME, __func__, "[Info] Deconstructor called ");
	for (auto& c : clients_)
		delete c;
	if (config::SNAPSHOT_CLUSTER_MODE == true)
		delete oracleReader_;
}
int Coordinator::start () {	
	CoordinatorContext ctx[SERVER_CNT];
	
	struct sockaddr_in returned_addr;
	socklen_t len = sizeof(returned_addr);
	char temp_char;
	
	srand (generate_random_seed());		// initialize random seed
	
	
	// Call socket(), bind() and listen()
	TEST_NZ (server_socket_setup(&server_sockfd, SERVER_CNT));
	
	// accept connections from Cohorts
	std::cout << "[Info] Waiting for " << SERVER_CNT << " cohort(s) on port " << TRX_MANAGER_TCP_PORT << std::endl;
	for (int s = 0; s < SERVER_CNT; s++) {
		ctx[s].sockfd  = accept (server_sockfd, (struct sockaddr *) &returned_addr, &len);
		if (ctx[s].sockfd < 0){ 
			std::cerr << "ERROR on accept() for RM #" << s << std::endl;
			return -1;
		}
		
		std::cout << "[Conn] Received Cohort #" << s << " on sock " << ctx[s].sockfd << std::endl;
		// create all resources
	
		ctx[s].ib_port = TRX_MANAGER_IB_PORT;
		TEST_NZ (ctx[s].create_context());
		DEBUG_COUT("[Info] Context for cohort " << s << " created");
		
		
		// connect the QPs
		TEST_NZ (RDMACommon::connect_qp (&(ctx[s].qp), ctx[s].ib_port, ctx[s].port_attr.lid, ctx[s].sockfd));	
		DEBUG_COUT("[Conn] QPed to cohort " << s);
	}
	std::cout << "[Info] Established connection to all " << SERVER_CNT << " resource-manager(s)." << std::endl; 
	
	
	start_benchmark(ctx);
	
	DEBUG_COUT("[Info] Coordinator is done, and is ready to destroy its resources!");
	for (int i = 0; i < SERVER_CNT; i++) {
		TEST_NZ (sock_sync_data (ctx[i].sockfd, 1, "W", &temp_char));	/* just send a dummy char back and forth */
		DEBUG_COUT("[Conn] Notified cohort " << i << " it's done");
		TEST_NZ ( ctx[i].destroy_context());
	}
}
ErrorType MemoryRequestDispatcher::handleCASRequest(std::shared_ptr<CASRequest> &casReqPtr) {
	ErrorType eType;
	size_t msNum = casReqPtr->getMemoryServerNumber();
	std::shared_ptr<CASRequest::CASParameters> paramPtr = casReqPtr->getParameters();
	primitive::pointer_size_t	expected	= paramPtr->expectedHead_.toULL();
	primitive::pointer_size_t	desired		= paramPtr->desiredHead_.toULL();
	size_t bucketID = paramPtr->offset_;

	eType = replicas_.at(msNum).getHashRegion()->CAS(&expected, desired, paramPtr->offset_);

	if (eType == error::SUCCESS)
		DEBUG_COUT(CLASS_NAME, __func__, "CASing bucket " << bucketID << " on MS " << msNum << " succeeded (expected: "
				<< paramPtr->expectedHead_.toHexString() << ", changed to: " << paramPtr->desiredHead_.toHexString() << ")");
	else {
		const Pointer& actualHead = Pointer::makePointer(expected);
		DEBUG_COUT(CLASS_NAME, __func__, "CASing bucket " << bucketID << " on MS " << msNum << " failed (expected: "
				<< paramPtr->expectedHead_.toHexString() << ", real head: " << actualHead.toHexString() << ")");
	}
	return eType;
}
ErrorType MemoryRequestDispatcher::handleReadEntryRequest(std::shared_ptr<EntryReadRequest> &entryReadReqPtr){
	ErrorType eType;
	size_t msNum = entryReadReqPtr->getMemoryServerNumber();
	primitive::coordinator_num_t cID = entryReadReqPtr->getCoordinatorID();
	std::shared_ptr<EntryReadRequest::EntryReadParameters> paramPtr = entryReadReqPtr->getParameters();
	LogEntry &entry = paramPtr->localEntry_;
	char readBuffer[paramPtr->length_];

	eType = replicas_.at(msNum).getLogRegion(cID)->read(readBuffer, paramPtr->remoteBufferOffset_, paramPtr->length_);

	if (eType == error::SUCCESS) {
		std::string tempStr(readBuffer, paramPtr->length_);
		std::istringstream is(tempStr);
		LogEntry::deserialize(is, entry);
		DEBUG_COUT(CLASS_NAME, __func__, "Log entry " << entry.getCurrentP().toHexString() << " read from log journal[" << (int)cID << "] from MS " << msNum);
	}
	else
		DEBUG_COUT(CLASS_NAME, __func__, "Failure in reading Log entry " << entry.getCurrentP().toHexString() << " from log journal[" << (int)cID << "] from MS " << msNum);

	return eType;
}
void MemoryRequestDispatcher::run() {
	ErrorType eType;
	while (true) {
		//std::this_thread::sleep_for(std::chrono::seconds(1));

		std::shared_ptr<Request> req = reqBufferPtr_->remove();

		if (req->getRequestType() == Request::RequestType::CAS) {
			DEBUG_COUT(CLASS_NAME, __func__, "Received CAS request");

			std::shared_ptr<CASRequest> casReqPtr = std::dynamic_pointer_cast<CASRequest> (req);
			eType = handleCASRequest(casReqPtr);
		}
		else if (req->getRegionType() == Request::RegionType::LOG) {
			if (req->getRequestType() == Request::RequestType::READ) {
				DEBUG_COUT(CLASS_NAME, __func__, "Received Log Read request");

				std::shared_ptr<EntryReadRequest> entryReadReqPtr = std::dynamic_pointer_cast<EntryReadRequest> (req);
				eType = handleReadEntryRequest(entryReadReqPtr);
			}
			else {	// entryReqPtr->getRequestType() == Request::RequestType::WRITE
				DEBUG_COUT(CLASS_NAME, __func__, "Received Log Write request");

				std::shared_ptr<EntryWriteRequest> entryWriteReqPtr = std::dynamic_pointer_cast<EntryWriteRequest> (req);
				eType = handleWriteEntryRequest(entryWriteReqPtr);
			}
		}
		else if (req->getRegionType() == Request::RegionType::HASH) {
			if (req->getRequestType() == Request::RequestType::READ) {
				DEBUG_COUT(CLASS_NAME, __func__, "Received Bucket Read request");

				std::shared_ptr<BucketReadRequest> bucketReadReqPtr = std::dynamic_pointer_cast<BucketReadRequest> (req);
				eType = handleReadBucketRequest(bucketReadReqPtr);
			}
			else  	// bucketReqPtr->getRequestType() == Request::RequestType::WRITE, which is an error
				eType = error::UNKNOWN_REQUEST_TYPE;
		}
		else if (req->getRegionType() == Request::RegionType::ENTRY_STATE) {
			if (req->getRequestType() == Request::RequestType::READ) {
				DEBUG_COUT(CLASS_NAME, __func__, "Received State Read request");

				std::shared_ptr<StateReadRequest> stateReadReqPtr = std::dynamic_pointer_cast<StateReadRequest> (req);
				eType = handleReadStateRequest(stateReadReqPtr);
			}
			else {
				DEBUG_COUT(CLASS_NAME, __func__, "Received State Write request");

				std::shared_ptr<StateWriteRequest> stateWriteReqPtr = std::dynamic_pointer_cast<StateWriteRequest> (req);
				eType = handleWriteStateRequest(stateWriteReqPtr);
			}
		}
		else {
			DEBUG_CERR(CLASS_NAME, __func__, "Received request with unknown type");
			eType = error::UNKNOWN_REQUEST_TYPE;
		}

		// We finally set the std::promise variable
		req->setProm(eType);
	}
}
ErrorType MemoryRequestDispatcher::handleWriteEntryRequest(std::shared_ptr<EntryWriteRequest> &entryReqPtr){
	ErrorType eType;
	size_t msNum = entryReqPtr->getMemoryServerNumber();
	primitive::coordinator_num_t cID = entryReqPtr->getCoordinatorID();
	std::shared_ptr<EntryWriteRequest::EntryWriteParameters> paramPtr = entryReqPtr->getParameters();
	const LogEntry &entry = paramPtr->localEntry_;

	std::ostringstream os;
	entry.serialize(os);

	const std::string& tmp = os.str();
	const char* cstr = tmp.c_str();

	eType = replicas_.at(msNum).getLogRegion(cID)->write(cstr, paramPtr->remoteBufferOffset_, paramPtr->length_);

	if (eType == error::SUCCESS)
		DEBUG_COUT(CLASS_NAME, __func__, "Log Entry " << entry.getCurrentP().toHexString() << " written on log journal[" << (int)cID << "] on MS " << msNum);
	else
		DEBUG_COUT(CLASS_NAME, __func__, "Failure in writing log Entry " << entry.getCurrentP().toHexString() << " on log journal[" << (int)cID << "] on MS " << msNum);

	return eType;
}
Esempio n. 9
0
int SRLockClient::release_lock (SRClientContext &ctx, struct LockRequest &req, struct LockResponse &res) {
	if (req.request_type == LockRequest::RELEASE){
		TEST_NZ (RDMACommon::post_RECEIVE(ctx.qp, ctx.lock_res_mr, (uintptr_t)&res, sizeof(struct LockResponse))); 
		DEBUG_COUT("[Info] receive posted to the queue");
		
		//TEST_NZ (sock_write(ctx.sockfd, (char *)&req, sizeof(struct LockRequest)));
		TEST_NZ (RDMACommon::post_SEND(ctx.qp, ctx.lock_req_mr, (uintptr_t)&req, sizeof(struct LockRequest), true));
		TEST_NZ (RDMACommon::poll_completion(ctx.cq));	// Ack for SEND (false)
			
		DEBUG_COUT("[Sent] LockRequest::Request (RELEASE) to LM.");
		
		//TEST_NZ (sock_read(ctx.sockfd, (char *)&res, sizeof(struct LockResponse)));
		TEST_NZ (RDMACommon::poll_completion(ctx.cq));	// Receive LockResponse
		
		if (res.response_type == LockResponse::RELEASED)
			DEBUG_COUT("[Recv] RELEASE LockResponse (result: released)");
		else {
			DEBUG_COUT("[Error] RELEASE LockResponse result: " << res.response_type);
		}
		return 0;
	}
	return 1;
}
void* DQClientCentricServer::handle_client(void *param) {
	DQServerContext *ctx = (DQServerContext *) param;
	char temp_char;
	
	DEBUG_COUT("[in handle client]");
	
	// TEST_NZ (RDMACommon::post_RECEIVE(ctx->qp, ctx->recv_data_mr, (uintptr_t)&ctx->recv_data_msg, sizeof(int)));
	TEST_NZ (sock_sync_data (ctx->sockfd, 1, "W", &temp_char));	// just send a dummy char back and forth
	
	DEBUG_COUT("[Synced with client]");
	
	/*
	int iteration = 0;
	while (iteration < OPERATIONS_CNT) {
		TEST_NZ (RDMACommon::poll_completion(ctx->cq));
		DEBUG_COUT("[Recv] request from client");
		
		TEST_NZ (RDMACommon::post_RECEIVE(ctx->qp, ctx->recv_data_mr, (uintptr_t)&ctx->recv_data_msg, sizeof(int)));	// for 
		DEBUG_COUT("[Info] receive posted to the queue");
		
		if (iteration % 1000 == 0) {
			TEST_NZ (RDMACommon::post_SEND(ctx->qp, ctx->send_data_mr, (uintptr_t)ctx->send_data_msg, 10 * sizeof(char), true));
			DEBUG_COUT("[Sent] response to client");
		
			TEST_NZ (RDMACommon::poll_completion(ctx->cq));	// for SEND
			DEBUG_COUT("[Info] completion received");
		
		}
		else {
			TEST_NZ (RDMACommon::post_SEND(ctx->qp, ctx->send_data_mr, (uintptr_t)ctx->send_data_msg, 10 * sizeof(char), false));
			DEBUG_COUT("[Sent] response to client (without completion)");
		}		
		iteration++;
	}
	*/
	DEBUG_COUT("[Sent] buffer info to client");
}
ErrorType MemoryRequestDispatcher::handleReadBucketRequest(std::shared_ptr<BucketReadRequest> &bucketReadReqPtr) {
	ErrorType eType;
	size_t msNum = bucketReadReqPtr->getMemoryServerNumber();
	std::shared_ptr<BucketReadRequest::BucketReadParameters> paramPtr = bucketReadReqPtr->getParameters();
	size_t bucketID = paramPtr->remoteBufferOffset_;
	Pointer &pointer = paramPtr->localPointer_;
	primitive::pointer_size_t	readBuffer[1];

	eType = replicas_.at(msNum).getHashRegion()->read(readBuffer, paramPtr->remoteBufferOffset_, sizeof(primitive::pointer_size_t));
	if (eType != error::SUCCESS) {
		DEBUG_COUT(CLASS_NAME, __func__, "Failure in reading bucket " << bucketID << " from MS " << msNum);
		return eType;
	}

	pointer = Pointer::makePointer(readBuffer[0]);

	DEBUG_COUT(CLASS_NAME, __func__, "Bucket " << bucketID << " read from MS " << msNum <<  " and is pointing to " << pointer.toHexString());

	// TODO: what if the bucket is empty
	// if (pointer.isNull())
	//	eType = error::KEY_NOT_FOUND;

	return eType;
}
int IPCohort::start_server () {	
	IPCohortContext ctx;
	char temp_char;
		
	TEST_NZ (establish_tcp_connection(TRX_MANAGER_ADDR.c_str(), TRX_MANAGER_TCP_PORT, &ctx.sockfd));
	
	TEST_NZ (ctx.create_context());
	
	srand (time(NULL));		// initialize random seed
	
	start_benchmark(ctx);
	
	/* Sync so server will know that client is done mucking with its memory */
	DEBUG_COUT("[Info] Cohort client is done, and is ready to destroy its resources!");
	TEST_NZ (sock_sync_data (ctx.sockfd, 1, "W", &temp_char));	/* just send a dummy char back and forth */
	TEST_NZ(ctx.destroy_context());
}
ClientGroup::ClientGroup(unsigned instanceID, uint32_t clientsCnt, uint32_t homeServerID, size_t ibPortsCnt)
: clientsCnt_(clientsCnt){

	DEBUG_COUT(CLASS_NAME, __func__, "[Info] Constructor called ");

	srand ((unsigned int)utils::generate_random_seed());		// initialize random seed

	if (config::SNAPSHOT_CLUSTER_MODE == true){
		oracleReader_ = new OracleReader(instanceID, clientsCnt, 1);
	}

	for (uint32_t i = 0; i < clientsCnt; i++) {
		uint8_t ibPort = (uint8_t)(i % ibPortsCnt + 1);
		uint16_t homeWarehouseID = (uint16_t)(homeServerID * config::tpcc_settings::WAREHOUSE_PER_SERVER + rand() % config::tpcc_settings::WAREHOUSE_PER_SERVER);
		uint8_t homeDistrictID = (uint8_t)(rand() % config::tpcc_settings::DISTRICT_PER_WAREHOUSE);
		clients_.push_back(new TPCC::TPCCClient(instanceID, homeWarehouseID, homeDistrictID, ibPort, oracleReader_));
	}
}
Esempio n. 14
0
LogJournal::~LogJournal() {
	delete[] content_;
	delete region_;
	DEBUG_COUT (CLASS_NAME, __func__, "LogJournal destroyed!");
}
int IPCohort::start_benchmark(IPCohortContext &ctx) {
	char temp_char;
	unsigned long long cpu_checkpoint_start, cpu_checkpoint_finish;
	int iteration = 0;
	struct rusage usage;
    struct timeval start_user_usage, start_kernel_usage, end_user_usage, end_kernel_usage;
	
	
	DEBUG_COUT("[in handle client]");
	
	TEST_NZ (sock_sync_data (ctx.sockfd, 1, "W", &temp_char));	// just send a dummy char back and forth
	
	DEBUG_COUT("[Synced with client]");
	
    // For CPU usage in terms of time
	getrusage(RUSAGE_SELF, &usage);
    start_kernel_usage = usage.ru_stime;
    start_user_usage = usage.ru_utime;
	
	
	// For CPU usage in terms of clocks (ticks)
	cpu_checkpoint_start = rdtsc();
	
	while (iteration < OPERATIONS_CNT) {
		//TEST_NZ (RDMACommon::poll_completion(ctx.cq));	// for Receive
		TEST_NZ (sock_read(ctx.sockfd, (char *)&(ctx.recv_data_msg), sizeof(int)));	
		DEBUG_COUT("[READ] --READY-- request from coordinator");
		
		
		//TEST_NZ (RDMACommon::post_SEND(ctx.qp, ctx.send_data_mr, (uintptr_t)&ctx.send_data_msg, sizeof(int), false));
		TEST_NZ (sock_write(ctx.sockfd, (char *)&(ctx.send_data_msg), sizeof(int)));			
		DEBUG_COUT("[WRIT] --YES-- response to coordinator (without completion)");
		
		
		//TEST_NZ (RDMACommon::poll_completion(ctx.cq));	// for Receive
		TEST_NZ (sock_read(ctx.sockfd, (char *)&(ctx.recv_data_msg), sizeof(int)));	
		DEBUG_COUT("[READ] --COMMIT-- request from coordinator");
		
		//TEST_NZ (RDMACommon::post_SEND(ctx.qp, ctx.send_data_mr, (uintptr_t)&ctx.send_data_msg, sizeof(int), true));
		TEST_NZ (sock_write(ctx.sockfd, (char *)&(ctx.send_data_msg), sizeof(int)));			
		DEBUG_COUT("[WRIT] --DONE-- response to coordinator");
		
		iteration++;
	}
	cpu_checkpoint_finish = rdtsc();
	
    getrusage(RUSAGE_SELF, &usage);
    end_user_usage = usage.ru_utime;
    end_kernel_usage = usage.ru_stime;
	
	
	double user_cpu_microtime = ( end_user_usage.tv_sec - start_user_usage.tv_sec ) * 1E6 + ( end_user_usage.tv_usec - start_user_usage.tv_usec );
	double kernel_cpu_microtime = ( end_kernel_usage.tv_sec - start_kernel_usage.tv_sec ) * 1E6 + ( end_kernel_usage.tv_usec - start_kernel_usage.tv_usec );
	
	
	DEBUG_COUT("[Info] Cohort Client Done");
	
	unsigned long long average_cpu_clocks = (cpu_checkpoint_finish - cpu_checkpoint_start) / OPERATIONS_CNT;
	
	std::cout << "[Stat] AVG total CPU elapsed time (u sec):    	" << (user_cpu_microtime + kernel_cpu_microtime) / OPERATIONS_CNT << std::endl;
	std::cout << "[Stat] Average CPU clocks:    	" << average_cpu_clocks << std::endl;
	return 0;
}
int DQClientCentricServer::start_server () {	
	tcp_port	= SERVER_TCP_PORT;
	ib_port		= SERVER_IB_PORT;
	DQServerContext ctx[CLIENTS_CNT];
	struct sockaddr_in serv_addr, cli_addr;
	socklen_t clilen = sizeof(cli_addr);
	char temp_char;
	std::string clients_addresses;	// separated by the delimiter '|'
	std::string clients_tcp_port;	// separated by the delimiter '|'
	
	
	TEST_NZ(initialize_data_structures());

	std::cout << "[Info] Server is waiting for " << CLIENTS_CNT
		<< " client(s) on tcp port: " << tcp_port << ", ib port: " << ib_port << std::endl;
	
	// Open Socket
	server_sockfd = socket (AF_INET, SOCK_STREAM, 0);
	if (server_sockfd < 0) {
		std::cerr << "Error opening socket" << std::endl;
		return -1;
	}
	
	// Bind
	memset(&serv_addr, 0, sizeof(serv_addr));
	serv_addr.sin_family = AF_INET;
	serv_addr.sin_addr.s_addr = INADDR_ANY;
	serv_addr.sin_port = htons(tcp_port);
	TEST_NZ(bind(server_sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)));
	

	// listen				  
	TEST_NZ(listen (server_sockfd, CLIENTS_CNT));
	
	
	// accept connections
	for (int i = 0; i < CLIENTS_CNT; i++){
		initialize_context(ctx[i]);
		ctx[i].sockfd  = accept (server_sockfd, (struct sockaddr *) &cli_addr, &clilen);
		if (ctx[i].sockfd < 0){
			std::cerr << "ERROR on accept" << std::endl;
			return -1;
		}
		std::cout << "[Conn] Received client #" << i << " on socket " << ctx[i].sockfd << std::endl;
		
		ctx[i].client_ip = "";
		ctx[i].client_ip	+= std::string(inet_ntoa (cli_addr.sin_addr));
		clients_addresses.append(ctx[i].client_ip);
		clients_tcp_port.append(std::to_string(8000 + i));  
		
		// if the client is not the last client, add the delimiter
		if (i != CLIENTS_CNT - 1){
			clients_addresses.append("|");
			clients_tcp_port.append("|"); 
		}
	
		// create all resources
		TEST_NZ (ctx[i].create_context());
		DEBUG_COUT("[Info] Context for client " << i << " (" << ctx[i].client_ip << ") created");
		
		// connect the QPs
		TEST_NZ (RDMACommon::connect_qp (&(ctx[i].qp), ctx[i].ib_port, ctx[i].port_attr.lid, ctx[i].sockfd));
		DEBUG_COUT("[Conn] QPed to client " << i);
	}
	
	for (int i = 0; i < CLIENTS_CNT; i++){
		// send memory locations using SEND 
		memcpy(&(ctx[i].send_message_msg.peer_mr), ctx[i].locks_mr, sizeof(struct ibv_mr));
		ctx[i].send_message_msg.client_id =  static_cast<uint32_t>(i + 1);
		strcpy(ctx[i].send_message_msg.clients_addr, clients_addresses.c_str());
		strcpy(ctx[i].send_message_msg.clients_tcp_port, clients_tcp_port.c_str());
		
		DEBUG_COUT("[Sent] Cheeck: " << ctx[i].send_message_msg.clients_addr);
		
		TEST_NZ (RDMACommon::post_SEND (ctx[i].qp, ctx[i].send_message_mr, (uintptr_t)&ctx[i].send_message_msg, sizeof (struct MemoryKeys), true));
		TEST_NZ (RDMACommon::poll_completion(ctx[i].cq));
		DEBUG_COUT("[Sent] buffer info to client " << i);
	}
	
	
	
	// Server waits for the client to muck with its memory
	
	
	/*************** THIS IS FOR SEND
	// accept connections
	for (int i = 0; i < CLIENTS_CNT; i++){
		initialize_context(ctx[i]);
		ctx[i].sockfd  = accept (server_sockfd, (struct sockaddr *) &cli_addr, &clilen);
		if (ctx[i].sockfd < 0){
			std::cerr << "ERROR on accept" << std::endl;
			return -1;
		}
		std::cout << "[Conn] Received client #" << i << " on socket " << ctx[i].sockfd << std::endl;
	
		// create all resources
		TEST_NZ (ctx[i].create_context());
		DEBUG_COUT("[Info] Context for client " << i << " created");
		
		// connect the QPs
		TEST_NZ (RDMACommon::connect_qp (&(ctx[i].qp), ctx[i].ib_port, ctx[i].port_attr.lid, ctx[i].sockfd));
		DEBUG_COUT("[Conn] QPed to client " << i);
	
		pthread_create(&master_threads[i], NULL, BenchmarkServer::handle_client, &ctx[i]);
	}
	std::cout << "[Info] Established connection to all " << CLIENTS_CNT << " client(s)." << std::endl; 
	
	//wait for handlers to finish
	for (int i = 0; i < CLIENTS_CNT; i++) {
		pthread_join(master_threads[i], NULL);
	}
	*/
	for (int i = 0; i < CLIENTS_CNT; i++) {
		TEST_NZ (sock_sync_data (ctx[i].sockfd, 1, "W", &temp_char));	// just send a dummy char back and forth
		DEBUG_COUT("[Conn] Client " << i << " notified it's finished");
		TEST_NZ (ctx[i].destroy_context());
		std::cout << "[Info] Destroying client " << i << " resources" << std::endl;
	}	
	std::cout << "[Info] Server's ready to gracefully get destroyed" << std::endl;	
}
Esempio n. 17
0
int SRLockClient::start_operation (SRClientContext &ctx) {
	
	struct timespec firstRequestTime, lastRequestTime, beforeAcquisition, afterAcquisition, beforeRelease, afterRelease;
	char temp_char;

	

	struct LockRequest req_cpy;
	
	double sumExclusiveAcqTime= 0.0, sumSharedAcqTime= 0.0, sumExclusiveRelTime= 0.0, sumSharedRelime= 0.0;
	double acquisitionTime, releaseTime;
	int exclusiveCount= 0, sharedCount= 0;
	
	clock_gettime(CLOCK_REALTIME, &firstRequestTime); // Fire the  timer	
	
	ctx.op_num = 0;
	while (ctx.op_num  <  OPERATIONS_CNT) {
		ctx.op_num = ctx.op_num + 1;
		DEBUG_COUT (std::endl << "[Info] Submitting lock request #" << ctx.op_num);

		// ************************************************************************
		//	Client request a lock on the selected item and waits for a response.
		select_item(ctx.lock_request);
		req_cpy.request_item = ctx.lock_request.request_item;		// to keep track of the initial request type, because ctx.lock_request is going to change
		req_cpy.request_type = ctx.lock_request.request_type;		// to keep track of the initial request type, because ctx.lock_request is going to change
		
		clock_gettime(CLOCK_REALTIME, &beforeAcquisition); // Fire the  timer		
		acquire_lock (ctx, ctx.lock_request, ctx.lock_response);
		clock_gettime(CLOCK_REALTIME, &afterAcquisition);	// Fire the  timer
		acquisitionTime = ( afterAcquisition.tv_sec - beforeAcquisition.tv_sec ) * 1E6 + ( afterAcquisition.tv_nsec - beforeAcquisition.tv_nsec )/1E3;
		
		// hold lock for a specified time period (in a specific way).
		hold_lock();
		
		// ************************************************************************
		//	Clients release the acquired lock.
		ctx.lock_request.request_type = LockRequest::RELEASE;
		
		clock_gettime(CLOCK_REALTIME, &beforeRelease); // Fire the  timer		
		release_lock (ctx, ctx.lock_request, ctx.lock_response);
		clock_gettime(CLOCK_REALTIME, &afterRelease);	// Fire the  timer
		releaseTime = ( afterRelease.tv_sec - beforeRelease.tv_sec ) * 1E6 + ( afterRelease.tv_nsec - beforeRelease.tv_nsec )/1E3;
		
		if(req_cpy.request_type == LockRequest::EXCLUSIVE){
			sumExclusiveAcqTime += acquisitionTime;
			sumExclusiveRelTime += releaseTime; 
			exclusiveCount++;
		}
		else if(req_cpy.request_type == LockRequest::SHARED){
			sumSharedAcqTime += acquisitionTime; 
			sumSharedRelime += releaseTime; 
			sharedCount++;
		}
	}
	clock_gettime(CLOCK_REALTIME, &lastRequestTime); // Fire the  timer

	double micro_elapsed_time = ( lastRequestTime.tv_sec - firstRequestTime.tv_sec ) * 1E6 + ( lastRequestTime.tv_nsec - firstRequestTime.tv_nsec )/1E3;
	double lock_per_sec = (double)(OPERATIONS_CNT / (double)(micro_elapsed_time / 1000000));
	
	// std::cout << std::endl << "[Stat] Locks (acquire & release) per sec: 	" <<  lock_per_sec << std::endl;
	// std::cout << "[STAT] Avg time per Exclusive acquisition (us)	" << sumExclusiveAcqTime / exclusiveCount << std::endl;
	// std::cout << "[STAT] Avg time per Exclusive release (us)		" << sumExclusiveRelTime / exclusiveCount << std::endl;
	// std::cout << "[STAT] Avg time per Shared acquisition (us)		" << sumSharedAcqTime / sharedCount << std::endl;
	// std::cout << "[STAT] Avg time per Shared release (us)			" << sumSharedRelime / sharedCount << std::endl;
	std::cout << "[Summary] " << lock_per_sec << ", " << sumExclusiveAcqTime / exclusiveCount << ", " << sumExclusiveRelTime / exclusiveCount << ", " << sumSharedAcqTime / sharedCount << ", " << sumSharedRelime / sharedCount << std::endl;
	

	return 0;
}
int BaseTradTrxManager::initialize_data_structures(){
	timestamp.value = 0ULL;	// the timestamp counter is initially set to 0
	DEBUG_COUT("[Info] Timestamp set to " << timestamp.value);
	return 0;
}
MemoryRequestDispatcher::~MemoryRequestDispatcher() {
	DEBUG_COUT (CLASS_NAME, __func__, "MemoryRequestDispatcher destroyed!");
}
int Coordinator::start_benchmark(CoordinatorContext *ctx) {
	int signaledPosts = 0;
	struct timespec firstRequestTime, lastRequestTime;				// for calculating TPMS
	struct timespec beforeSending, afterSending;				// for calculating TPMS
	
	struct timespec before_read_ts, after_read_ts, after_fetch_info, after_commit_ts, after_lock, after_decrement, after_unlock;
    
	struct rusage usage;
    struct timeval start_user_usage, start_kernel_usage, end_user_usage, end_kernel_usage;
	char temp_char;
	
	unsigned long long cpu_checkpoint_start, cpu_checkpoint_2, cpu_checkpoint_3, cpu_checkpoint_finish ;
	bool signalled_flag = false;	
	
	for (int i = 0; i < SERVER_CNT; i++) {
		TEST_NZ (sock_sync_data (ctx[i].sockfd, 1, "W", &temp_char));	// just send a dummy char back and forth
	}
	
	
	DEBUG_COUT ("[Info] Benchmark now gets started");
	
	clock_gettime(CLOCK_REALTIME, &firstRequestTime);	// Fire the  timer
    getrusage(RUSAGE_SELF, &usage);
    start_kernel_usage = usage.ru_stime;
    start_user_usage = usage.ru_utime;
	
	int iteration = 0;
	
	cpu_checkpoint_start = rdtsc();
	while (iteration < OPERATIONS_CNT) {
		DEBUG_COUT("iteration " << iteration);

		// Sent: Ready??
		for (int i = 0; i < SERVER_CNT; i++) {
			TEST_NZ (RDMACommon::post_RECEIVE(ctx[i].qp, ctx[i].recv_data_mr, (uintptr_t)&ctx[i].recv_data_msg, sizeof(int)));
			DEBUG_COUT("[Info] Receive posted");
			
			TEST_NZ (RDMACommon::post_SEND(ctx[i].qp, ctx[i].send_data_mr, (uintptr_t)&ctx[i].send_data_msg, sizeof(int), false));
			DEBUG_COUT("[Sent] --READY-- request sent to cohort " << i);
			
		}
		
		// Received: Ready!
		for (int i = 0; i < SERVER_CNT; i++) {
			TEST_NZ (RDMACommon::poll_completion(ctx[i].cq));	// for RECV
			//TEST_NZ (RDMACommon::event_based_poll_completion(ctx[i].comp_channel, ctx[i].cq)); // for RECV
			DEBUG_COUT("[Recv] --YES-- response received from cohort " << i);
		}
		
		// Sent: Commit
		for (int i = 0; i < SERVER_CNT; i++) {
			TEST_NZ (RDMACommon::post_RECEIVE(ctx[i].qp, ctx[i].recv_data_mr, (uintptr_t)&ctx[i].recv_data_msg, sizeof(int)));
			DEBUG_COUT("[Info] Receive posted");
			
			
			if (iteration % 500 == 0) {
				signalled_flag = true;
				TEST_NZ (RDMACommon::post_SEND(ctx[i].qp, ctx[i].send_data_mr, (uintptr_t)&ctx[i].send_data_msg, sizeof(int), true));
				DEBUG_COUT("[Sent] --COMMIT-- request sent to cohort " << i);
			}
			else{
				signalled_flag = false;
				TEST_NZ (RDMACommon::post_SEND(ctx[i].qp, ctx[i].send_data_mr, (uintptr_t)&ctx[i].send_data_msg, sizeof(int), false));
				DEBUG_COUT("[Sent] --COMMIT-- request sent to cohort (unsignalled) " << i);
			}
		}
		
		// collect completion events for SEND
		if (signalled_flag){
			for (int i = 0; i < SERVER_CNT; i++) {
				TEST_NZ (RDMACommon::poll_completion(ctx[i].cq));	// for SENT
				//TEST_NZ (RDMACommon::event_based_poll_completion(ctx[i].comp_channel, ctx[i].cq));	// for SENT
			}
		}
		
		// Received: Done!
		for (int i = 0; i < SERVER_CNT; i++) {
			TEST_NZ (RDMACommon::poll_completion(ctx[i].cq));	// for RECV
			//TEST_NZ (RDMACommon::event_based_poll_completion(ctx[i].comp_channel, ctx[i].cq)); // for RECV
		
			
			DEBUG_COUT("[Recv] --DONE-- response received from cohort " << i);
		}
		
		iteration++;
	}
	cpu_checkpoint_finish = rdtsc();

    getrusage(RUSAGE_SELF, &usage);
    end_user_usage = usage.ru_utime;
    end_kernel_usage = usage.ru_stime;
	
	clock_gettime(CLOCK_REALTIME, &lastRequestTime);	// Fire the  timer
	double user_cpu_microtime = ( end_user_usage.tv_sec - start_user_usage.tv_sec ) * 1E6 + ( end_user_usage.tv_usec - start_user_usage.tv_usec );
	double kernel_cpu_microtime = ( end_kernel_usage.tv_sec - start_kernel_usage.tv_sec ) * 1E6 + ( end_kernel_usage.tv_usec - start_kernel_usage.tv_usec );
	
	double micro_elapsed_time = ( ( lastRequestTime.tv_sec - firstRequestTime.tv_sec ) * 1E9 + ( lastRequestTime.tv_nsec - firstRequestTime.tv_nsec ) ) / 1000;
	
	double latency_in_micro = (double)(micro_elapsed_time / OPERATIONS_CNT);
	//double latency_in_micro = (double)(cumulative_latency / signaledPosts) / 1000;
	
	double mega_byte_per_sec = ((sizeof(int) * OPERATIONS_CNT / 1E6 ) / (micro_elapsed_time / 1E6) );
	double operations_per_sec = OPERATIONS_CNT / (micro_elapsed_time / 1E6);
	double cpu_utilization = (user_cpu_microtime + kernel_cpu_microtime) / micro_elapsed_time;
	
	unsigned long long average_cpu_clocks = (cpu_checkpoint_finish - cpu_checkpoint_start) / OPERATIONS_CNT;
	
	std::cout << "[Stat] Avg latency(u sec):   	" << latency_in_micro << std::endl; 
	std::cout << "[Stat] MegaByte per Sec:   	" << mega_byte_per_sec <<  std::endl;
	std::cout << "[Stat] Operations per Sec:   	" << operations_per_sec <<  std::endl;
	std::cout << "[Stat] CPU utilization:    	" << cpu_utilization << std::endl;
	std::cout << "[Stat] USER CPU utilization:    	" << user_cpu_microtime / micro_elapsed_time << std::endl;
	std::cout << "[Stat] KERNEL CPU utilization:    	" << kernel_cpu_microtime / micro_elapsed_time << std::endl;
	std::cout << "[Stat] AVG USER CPU elapsed time (u sec):    	" << user_cpu_microtime / OPERATIONS_CNT << std::endl;
	std::cout << "[Stat] AVG KERNEL CPU elapsed time (u sec):    	" << kernel_cpu_microtime / OPERATIONS_CNT << std::endl;
	std::cout << "[Stat] AVG total CPU elapsed time (u sec):    	" << (user_cpu_microtime + kernel_cpu_microtime) / OPERATIONS_CNT << std::endl;
	
	std::cout << "[Stat] Average CPU clocks:    	" << average_cpu_clocks << std::endl;
	
	
	std::cout  << latency_in_micro << '\t' << mega_byte_per_sec << '\t' << operations_per_sec << '\t' << cpu_utilization << std::endl;
	
	
	return 0;
}
int RDMAServer::start_server (int server_num) {	
	tcp_port	= TCP_PORT[server_num];
	ib_port		= IB_PORT[server_num];
	RDMAServerContext ctx[CLIENTS_CNT];
	
	char temp_char;
	
	
	TEST_NZ(initialize_data_structures());

	std::cout << "[Info] Server " << server_num << " is waiting for " << CLIENTS_CNT
		<< " client(s) on tcp port: " << tcp_port << ", ib port: " << ib_port << std::endl;
	
	server_sockfd = -1;
	struct sockaddr_in serv_addr, cli_addr;
	socklen_t clilen = sizeof(cli_addr);
	pthread_t master_threads[CLIENTS_CNT];	
	
	// Open Socket
	server_sockfd = socket (AF_INET, SOCK_STREAM, 0);
	if (server_sockfd < 0) {
		std::cerr << "Error opening socket" << std::endl;
		return -1;
	}
	
	// Bind
	memset(&serv_addr, 0, sizeof(serv_addr));
	serv_addr.sin_family = AF_INET;
	serv_addr.sin_addr.s_addr = INADDR_ANY;
	serv_addr.sin_port = htons(tcp_port);
	TEST_NZ(bind(server_sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)));
	
	
	// listen				  
	TEST_NZ(listen (server_sockfd, CLIENTS_CNT));
	
	// accept connections
	for (int i = 0; i < CLIENTS_CNT; i++){
		initialize_context(ctx[i]);
		ctx[i].sockfd  = accept (server_sockfd, (struct sockaddr *) &cli_addr, &clilen);
		if (ctx[i].sockfd < 0){ 
			std::cerr << "ERROR on accept" << std::endl;
			return -1;
		}
		std::cout << "[Conn] Received client #" << i << " on socket " << ctx[i].sockfd << std::endl;
		// pthread_create(&master_threads[i], NULL, RDMAServer::handle_client, &client_socks[i]);
	
		// create all resources
		TEST_NZ (ctx[i].create_context());
		DEBUG_COUT("[Info] Context for client " << i << " created");
	
		// connect the QPs
		TEST_NZ (RDMACommon::connect_qp (&(ctx[i].qp), ctx[i].ib_port, ctx[i].port_attr.lid, ctx[i].sockfd));	
		DEBUG_COUT("[Conn] QPed to client " << i);
	
		// prepare server buffer with read message
		memcpy(&(ctx[i].send_msg.mr_items),		ctx[i].mr_items,		sizeof(struct ibv_mr));
		memcpy(&(ctx[i].send_msg.mr_orders),	ctx[i].mr_orders,		sizeof(struct ibv_mr));
		memcpy(&(ctx[i].send_msg.mr_order_line),ctx[i].mr_order_line,	sizeof(struct ibv_mr));
		memcpy(&(ctx[i].send_msg.mr_cc_xacts),	ctx[i].mr_cc_xacts,		sizeof(struct ibv_mr));
		memcpy(&(ctx[i].send_msg.mr_timestamp),	ctx[i].mr_timestamp,	sizeof(struct ibv_mr));
		memcpy(&(ctx[i].send_msg.mr_lock_items),ctx[i].mr_lock_items,	sizeof(struct ibv_mr));
	}
	
	for (int i = 0; i < CLIENTS_CNT; i++){
	
		// send memory locations using SEND 
		TEST_NZ (RDMACommon::post_SEND (ctx[i].qp, ctx[i].send_mr, (uintptr_t)&ctx[i].send_msg, sizeof(struct MemoryKeys), true));
		TEST_NZ (RDMACommon::poll_completion(ctx[i].cq));
		DEBUG_COUT("[Sent] buffer info to client " << i);
	}

	/*
		Server waits for the client to muck with its memory
	*/

	for (int i = 0; i < CLIENTS_CNT; i++) {
		TEST_NZ (sock_sync_data (ctx[i].sockfd, 1, "W", &temp_char));	/* just send a dummy char back and forth */
		DEBUG_COUT("[Conn] Client " << i << " notified it's finished");
		TEST_NZ (ctx[i].destroy_context());
		std::cout << "[Info] Destroying client " << i << " resources" << std::endl;
	}
	std::cout << "[Info] Server's ready to gracefully get destroyed" << std::endl;	
}
MemoryRequestDispatcher::MemoryRequestDispatcher()
: replicas_(config::MEMORY_SERVER_CNT){
	DEBUG_COUT (CLASS_NAME, __func__, "MemoryRequestDispatcher created!");
	//reqBufferPtr_.reset(new LIFORequestBuffer());
	reqBufferPtr_.reset(new RandomRequestBuffer());
}