Example #1
0
//
// Grunt destructor.
//
Grunt::~Grunt()
{
	// Remove completion queue, if any.
	if ( (type != InvalidType) && !IsType(type, GenericVIType) && io_cq )
		delete io_cq;

	Size_Target_Array( 0 );

	// Release grunt's I/O data buffers if they are in use.
	if ( data_size )
#if defined(IOMTR_OSFAMILY_NETWARE)
		NXMemFree( read_data );
		NXMemFree( write_data );
#elif defined(IOMTR_OSFAMILY_UNIX)
		free( read_data );
		free( write_data );
#elif defined(IOMTR_OSFAMILY_WINDOWS)
		VirtualFree( read_data, 0, MEM_RELEASE );
		VirtualFree( write_data, 0, MEM_RELEASE );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif

	Free_Transaction_Arrays();
}
//
// Destructor
//
Manager::~Manager()
{
	int g;

	delete [] m_pVersionString;
	delete [] m_pVersionStringWithDebug;

	prt->Close();
	delete prt;

#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_SOLARIS)
	if(data != NULL)	
		free(data);
	if(swap_devices != NULL)	
		free(swap_devices);	
#elif defined(IOMTR_OS_NETWARE)
	if(data != NULL)	
		NXMemFree(data);
	if(swap_devices != NULL)	
		NXMemFree(swap_devices);
#elif defined(IOMTR_OS_WIN32) || defined(IOMTR_OS_WIN64)
	VirtualFree( data, 0, MEM_RELEASE );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif

	for ( g = 0; g < grunt_count; g++ )
		delete grunts[g];
}
Example #3
0
//
// Free all memory associated with related I/O request arrays.
//
void Grunt::Free_Transaction_Arrays()
{
	if ( trans_slots )
#if defined(IOMTR_OSFAMILY_NETWARE)	
		NXMemFree( trans_slots );
#elif defined(IOMTR_OSFAMILY_UNIX) || defined(IOMTR_OSFAMILY_WINDOWS)
		free( trans_slots );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if ( available_trans_queue )
#if defined(IOMTR_OSFAMILY_NETWARE)	
		NXMemFree( available_trans_queue );
#elif defined(IOMTR_OSFAMILY_UNIX) || defined(IOMTR_OSFAMILY_WINDOWS)
		free( available_trans_queue );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	total_trans_slots = 0;
	cur_trans_slots = 0;
}
Example #4
0
BOOL SetQueueSize(HANDLE cqid, int size)
{
	// Allocate memory for size elements in the completion queue.
	// and also for the parallel array of struct aiocb pointers.
	struct IOCQ *this_cqid;

	this_cqid = (struct IOCQ *)cqid;
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->element_list = (struct CQ_Element *)malloc(sizeof(CQ_Element) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->element_list = (struct CQ_Element *)NXMemAlloc(sizeof(CQ_Element) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->element_list == NULL) {
		cout << "memory allocation failed" << endl;
		return (FALSE);
	}
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->aiocb_list = (struct aiocb64 **)malloc(sizeof(struct aiocb64 *) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->aiocb_list = (struct aiocb64 **)NXMemAlloc(sizeof(struct aiocb64 *) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->aiocb_list == NULL) {
		cout << "memory allocation failed" << endl;
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		free(this_cqid->element_list);
#elif defined(IOMTR_OS_NETWARE)
		NXMemFree(this_cqid->element_list);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		return (FALSE);
	}

	memset(this_cqid->element_list, 0, sizeof(struct CQ_Element) * size);
	memset(this_cqid->aiocb_list, 0, sizeof(struct aiocb64 *) * size);
	this_cqid->size = size;
	this_cqid->last_freed = -1;
	this_cqid->position = 0;

#ifdef _DEBUG
	cout << "allocated a completion queue of size " << size << " for handle : " << this_cqid << endl;
#endif
	return (TRUE);
}
Example #5
0
BOOL CQAIO::SetQueueSize(int size)
{
	struct IOCQ *this_cqid = (struct IOCQ *)completion_queue;

#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->element_list = (struct CQ_Element *)malloc(sizeof(CQ_Element) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->element_list = (struct CQ_Element *)NXMemAlloc(sizeof(CQ_Element) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->element_list == NULL) {
		cout << "memory allocation failed." << endl;
		return (FALSE);
	}
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->aiocb_list = (struct aiocb64 **)malloc(sizeof(struct aiocb64 *) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->aiocb_list = (struct aiocb64 **)NXMemAlloc(sizeof(struct aiocb64 *) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->aiocb_list == NULL) {
		cout << "memory allocation failed." << endl;
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		free(this_cqid->element_list);
#elif defined(IOMTR_OS_NETWARE)
		NXMemFree(this_cqid->element_list);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		return (FALSE);
	}

	this_cqid->size = size;
	memset(this_cqid->aiocb_list, 0, sizeof(struct aiocb64 *) * size);
	memset(this_cqid->element_list, 0, sizeof(struct CQ_Element) * size);

#ifdef _DEBUG
	cout << "allocated a completion queue of size " << size << " for handle : " << this_cqid << endl;
#endif
	return (TRUE);
}
Example #6
0
//
// Preparing a disk for access by a worker thread.  The disk must have been 
// previously initialized.
//
void Grunt::Prepare_Disk( int disk_id )
{
	void		*buffer = NULL;
	DWORD		buffer_size;
	DWORDLONG	prepare_offset = 0;
	TargetDisk	*disk = (TargetDisk *) targets[disk_id];

	critical_error = FALSE;

	// Allocate a large (64k for 512 byte sector size) buffer for the preparation.
	buffer_size = disk->spec.disk_info.sector_size * 128;
#if defined(IOMTR_OSFAMILY_NETWARE)
	NXMemFree( buffer );
	errno = 0;
	if ( !(buffer = NXMemAlloc(buffer_size, 1) ))
#elif defined(IOMTR_OSFAMILY_UNIX)
	free( buffer );
	errno = 0;
	if ( !(buffer = valloc(buffer_size) ))
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	VirtualFree( buffer, 0, MEM_RELEASE );
	if ( !(buffer = VirtualAlloc( NULL, buffer_size, MEM_COMMIT, PAGE_READWRITE )) )
#else
  #warning ===> WARNING: You have to do some coding here to get the port done! 
#endif
	{
		cout << "*** Could not allocate buffer to prepare disk." << endl;
		critical_error = TRUE;
		InterlockedDecrement( (long *) &not_ready );
		return;
	}

	// Open the disk for preparation.
#if defined(IOMTR_OSFAMILY_NETWARE) || defined(IOMTR_OSFAMILY_UNIX)
	// The disk::prepare() operation writes to a file iobw.tst till it uses up
	// all the available disk space. Now, Solaris allows a file to be created
	// with a (logical) size that is larger than the actual size of the file.
	// Later, when an attempt is made to write to the unwritten portion of the
	// file, Solaris attempts to expand the actual size on the disk to 
	// accomodate the new writes.
	//
	// The disk::prepare() throws up a problem here. Since we write in parallel
	// to the the file, Solaris allows us to create an iobw.tst that is larger
	// than the available space on the disk. A later write to the unfilled
	// portion throws up an ENOSPC error. To avoid this problem, we use the
	// O_APPEND flag which always sets the write offset to the eof.
	if ( !disk->Open( &grunt_state, O_APPEND ) )
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	if ( !disk->Open( &grunt_state ) )
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	{
		cout << "*** Could not open disk." << endl;
		critical_error = TRUE;
	}
	else
	{
		// Prepare the disk, first with large block sizes, then with single sectors.
		if ( !disk->Prepare( buffer, &prepare_offset, buffer_size, &grunt_state ) ||
			 !disk->Prepare( buffer, &prepare_offset, disk->spec.disk_info.sector_size, &grunt_state ) )
		{
			cout << "*** An error occurred while preparing the disk." << endl;
			critical_error = TRUE;
		}

		disk->Close( NULL );
	}

#if defined(IOMTR_OSFAMILY_NETWARE)
	NXMemFree( buffer );
#elif defined(IOMTR_OSFAMILY_UNIX)
	free( buffer );
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	VirtualFree( buffer, 0, MEM_RELEASE );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	cout << "   " << disk->spec.name << " done." << endl;
	InterlockedDecrement( (long *) &not_ready );
}
Example #7
0
//
// Setting access specifications for worker.  Also ensuring that a data buffer
// large enough to support the maximum requested transfer has been allocated.
// Note that Iometer will call Set_Access before testing starts to ensure that
// Dynamo can run the spec with the largest transfer request.
//
BOOL Grunt::Set_Access( const Test_Spec* spec )
{
	// Check for idle spec.
	if ((idle = ( spec->access[0].of_size == IOERROR)))
		return TRUE;

	access_spec.Initialize( &(spec->access[0]) );

	// Allocate a data buffer large enough to support the maximum requested 
	// transfer.  We do this only if the current buffer is too small and
	// we're using per worker data buffers.
	if ( data_size >= access_spec.max_transfer ) {
		cout << "Grunt: Grunt data buffer size " << data_size << " >= " 
			 << access_spec.max_transfer << ", returning" << endl;
		return TRUE;
	} else if ( !data_size ) {
		// We always want to use our own buffers, not the manager's
		// buffer.  This is due to a bug in some ServerWorks chipsets
		// (confirmed on the HE-SL chipset) where performing both
		// read and write operations to the same cache line can cause
		// the PCI bridge (the CIOB5) to hang indefinitely until a
		// third PCI bus request comes in.
		//
		// Using per-grunt buffers eliminates that problem, as you
		// aren't thrashing on the same buffer for both read and
		// write operations.
		data_size = access_spec.max_transfer;
		cout << "Grunt: Growing grunt data buffer from " << data_size << " to " 
			 << access_spec.max_transfer << endl;
	}

	// Allocating a larger buffer.
	#if _DEBUG
		cout << "Growing grunt data buffers from " << data_size << " to " 
			 << access_spec.max_transfer << endl;
	#endif

#if defined(IOMTR_OSFAMILY_NETWARE)
	if ( read_data ) {
		NXMemFree( read_data );
	}
	errno = 0;
	if ( !(read_data = NXMemAlloc(access_spec.max_transfer, 1) ))
#elif defined(IOMTR_OSFAMILY_UNIX)
	if ( read_data ) {
		free( read_data );
	}
	errno = 0;
	if ( !(read_data = valloc(access_spec.max_transfer) ))		
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	if ( read_data ) {
		VirtualFree( read_data, 0, MEM_RELEASE );
	}
	if ( !(read_data = VirtualAlloc(NULL, access_spec.max_transfer, MEM_COMMIT, PAGE_READWRITE)))
#else
  #warning ===> WARNING: You have to do some coding here to get the port done! 
#endif
	{
		// Could not allocate a larger buffer.  Signal failure.
		cout << "*** Grunt could not allocate read data buffer for I/O transfers." << endl;
		data_size = 0;
		return FALSE;
	}

#if defined(IOMTR_OSFAMILY_NETWARE)
	if ( write_data ) {
		NXMemFree( write_data );
	}
	errno = 0;
	if ( !(write_data = NXMemAlloc(access_spec.max_transfer, 1) ))
#elif defined(IOMTR_OSFAMILY_UNIX)
	if ( write_data ) {
		free( write_data );
	}
	errno = 0;
	if ( !(write_data = valloc(access_spec.max_transfer) ))
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	if ( write_data ) {
		VirtualFree( write_data, 0, MEM_RELEASE );
	}
	if ( !(write_data = VirtualAlloc(NULL, access_spec.max_transfer, MEM_COMMIT, PAGE_READWRITE)))
#else
  #warning ===> WARNING: You have to do some coding here to get the port done! 
#endif
	{
		// Could not allocate a larger buffer.  Signal failure.
		cout << "*** Grunt could not allocate write data buffer for I/O transfers." << endl;
		data_size = 0;
		return FALSE;
	}

	data_size = access_spec.max_transfer;
	return TRUE;
}
Example #8
0
//
// Setting the size of the target array to hold the number and type of 
// targets specified.  If the requested number of targets is 0, the 
// array will be freed.
//
BOOL Grunt::Size_Target_Array( int count, const Target_Spec *target_specs )
{
	int i;

	// Free all current targets.  This is needed in case the newer targets
	// are of a different type, even if we have the same number of targets.
	for ( i = 0; i < target_count; i++ )
		delete targets[i];
	target_count = 0;

	// Reset the grunt's target type.
	type = InvalidType;

	// Release the memory if everything is being freed.
	if ( !count || !target_specs )
	{
#if defined(IOMTR_OSFAMILY_NETWARE)
		NXMemFree( targets );
#elif defined(IOMTR_OSFAMILY_UNIX)
		free( targets );
#elif defined(IOMTR_OSFAMILY_WINDOWS)
		free( targets );   // TODO: Check if VirtualFree() is not needed here.
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		targets = NULL;
		return TRUE;
	}

	// Allocate enough pointers to refer to all targets.
#if defined(IOMTR_OSFAMILY_NETWARE)	
	targets = (Target**)NXMemRealloc( targets, sizeof(Target*) * count, 1 );
#elif defined(IOMTR_OSFAMILY_UNIX) || defined(IOMTR_OSFAMILY_WINDOWS)
	targets = (Target**)realloc( targets, sizeof(Target*) * count );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if ( !targets )
		return FALSE;

	// Create the requested number of targets.
	for ( i = 0; i < count; i++ )
	{
		type = (TargetType)(type | target_specs[i].type);

		if ( IsType( target_specs[i].type, GenericDiskType ) )
			targets[i] = new TargetDisk;
		else if ( IsType( target_specs[i].type, GenericTCPType ) )
			targets[i] = new TargetTCP;
#if defined(IOMTR_SETTING_VI_SUPPORT)
		else if ( IsType( target_specs[i].type, GenericVIType ) )
			targets[i] = new TargetVI;
#endif // IOMTR_SETTING_VI_SUPPORT

		if ( !targets[i] )
			return FALSE;
	}

	target_count = count;
	return TRUE;
}
//
// Setting access specifications for next test.
// Note that Iometer will call Set_Access before testing starts to ensure that
// Dynamo can run the spec with the largest transfer request.
//
BOOL Manager::Set_Access( int target, const Test_Spec *spec )
{
	int		g;			// loop control variable

	// Recursively assign all workers the same access specification.
	if ( target == ALL_WORKERS )
	{
		cout << "All workers running Access Spec: " << spec->name << endl;
		for ( g = 0; g < grunt_count; g++ )
		{
			if ( !Set_Access( g, spec ) )
				return FALSE;
		}
		return TRUE;
	}

	cout << "Worker " << target << " running Access Spec: " << spec->name << endl;

	// If the grunt could not set the access spec properly, return.
	// The grunt may not have been able to grow its data buffer.
	if ( !grunts[target]->Set_Access( spec ) )
		return FALSE;

	// If the grunt is not using the manager's data buffer or the manager's
	// buffer is already large enough, just return.
	if ( grunts[target]->data_size ||
		data_size >= grunts[target]->access_spec.max_transfer )
	{
		return TRUE;
	}

	// Grow the manager's data buffer and update all grunts using it.
	#if _DEBUG
		cout << "Growing manager data buffer from " << data_size << " to " 
			 << grunts[target]->access_spec.max_transfer << endl << flush;
	#endif

	// Align all data transfers on a page boundary.  This will work for all disks
	// with sector sizes that divide evenly into the page size - which is always
	// the case.
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_SOLARIS)
	free(data);
	errno = 0;
	if ( !(data = valloc(grunts[target]->access_spec.max_transfer) ))
#elif defined(IOMTR_OS_NETWARE) 
	NXMemFree(data);
	errno = 0;
	if ( !(data = NXMemAlloc(grunts[target]->access_spec.max_transfer, 1) ))
#elif defined(IOMTR_OS_WIN32) || defined(IOMTR_OS_WIN64)
	VirtualFree( data, 0, MEM_RELEASE );
	if ( !(data = VirtualAlloc( NULL, grunts[target]->access_spec.max_transfer, 
		MEM_COMMIT, PAGE_READWRITE )) )
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	{
		// Could not allocate a larger buffer.  Signal failure.
		cout << "*** Manager could not allocate data buffer for I/O transfers." 
			<< endl << flush;
		data_size = 0;
		return FALSE;
	}
	data_size = grunts[target]->access_spec.max_transfer;

	// Update all grunts using the manager's data buffer.
	for ( g = 0; g < grunt_count; g++ )
	{
		if ( !grunts[g]->data_size )
		{
			grunts[g]->read_data = data;
			grunts[g]->write_data = data;
		}
	}
	return TRUE;
}
Example #10
0
//
// CloseHandle() has a slightly different interface from the NT call. It takes an
// additional input parameter to determine the object type. The object can be either
// a FILE_ELEMENT or a CQ_ELEMENT.
//
// CloseHandle() closes the file handle or the completion queue handle and frees all
// the allocated memory. It does an additional check to ensure that all the queued
// Asynch I/Os that have not yet completed are cancelled before actually closing the
// handle. This helps clean up the kernel queues of any pending requests.
//
// Although it takes longer, this is acceptable since the code is not in the performance 
// critical region.
// 
BOOL CloseHandle(HANDLE object, int object_type)
{
	struct File *filep;
	struct IOCQ *cqid;
	int retval, i;

#ifdef _DEBUG
	cout << "CloseHandle() freeing : handle = " << object << " objecttype = " << object_type << endl;
#endif

	switch (object_type) {
	case FILE_ELEMENT:
		filep = (struct File *)object;
		cqid = filep->iocq;
		// cancel any pending aio requests.
		retval = aio_cancel64(filep->fd, NULL);
		while (retval == AIO_NOTCANCELED) {
			retval = aio_cancel64(filep->fd, NULL);
		}

		if (cqid != NULL && cqid->element_list != NULL && cqid->aiocb_list != NULL) {
			for (i = 0; i < cqid->size; i++) {
				if (cqid->element_list[i].aiocbp.aio_fildes != filep->fd)
					continue;

				// We are not interested in the return values of aio_error() and aio_return().
				// only have to dequeue all the requests.
				if (!cqid->aiocb_list[i])
					continue;

				retval = aio_error64(cqid->aiocb_list[i]);
				retval = aio_return64(cqid->aiocb_list[i]);
			}
		}
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		close(filep->fd);
#elif defined(IOMTR_OS_NETWARE)
		if (IsType(filep->type, LogicalDiskType))
			NXClose(filep->fd);
		else if (IsType(filep->type, PhysicalDiskType))
			MM_ReleaseIOObject(filep->fd);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		break;
	case CQ_ELEMENT:
		cqid = (struct IOCQ *)object;

		// cancel any pending aio requests.
		for (i = 0; i < cqid->size; i++) {
			if (!cqid->aiocb_list[i])
				continue;

#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_NETWARE)
			/*
			 * In Linux, you crash (!) if the aiocpb isn't in your queue. :-(
			 * This code seems to occasionally do this...so I just cancel all
			 * AIOs for the queue, thus avoiding the problem of cancelling a
			 * message not in the queue.
			 */
			retval = aio_cancel64(cqid->element_list[i].aiocbp.aio_fildes, NULL);
#elif defined(IOMTR_OS_SOLARIS)
			retval = aio_cancel64(cqid->element_list[i].aiocbp.aio_fildes, cqid->aiocb_list[i]);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
			if (retval == AIO_NOTCANCELED) {
				retval = aio_error64(cqid->aiocb_list[i]);
				retval = aio_return64(cqid->aiocb_list[i]);
			}
		}

#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		free(cqid->aiocb_list);
#elif defined(IOMTR_OS_NETWARE)
		NXMemFree(cqid->aiocb_list);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		// Something strange here. If I free the element_list, the next round
		// of aio_write() and aio_read() calls fail. If I dont free this, then they
		// succeed. But then, there is a memory leak equal to the max number of outstanding
		// I/Os * sizeof(CQ_Element).    Does that mean that the above aio_cancel() calls
		// are broken ??? It should be mentioned here that the element_list holds the 
		// actual aiocb structures.
		//
		// It suddenly seems to be working now.
		// Remember to turn this "free" off when you hit the problem again.
		// NEED TO LOOK INTO THIS.
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		free(cqid->element_list);
		free(cqid);
#elif defined(IOMTR_OS_NETWARE)
		NXMemFree(cqid->element_list);
		NXMemFree(cqid);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		break;
	default:
		break;
	}
	return (TRUE);
}