示例#1
0
BOOL SetQueueSize(HANDLE cqid, int size)
{
	// Allocate memory for size elements in the completion queue.
	// and also for the parallel array of struct aiocb pointers.
	struct IOCQ *this_cqid;

	this_cqid = (struct IOCQ *)cqid;
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->element_list = (struct CQ_Element *)malloc(sizeof(CQ_Element) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->element_list = (struct CQ_Element *)NXMemAlloc(sizeof(CQ_Element) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->element_list == NULL) {
		cout << "memory allocation failed" << endl;
		return (FALSE);
	}
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->aiocb_list = (struct aiocb64 **)malloc(sizeof(struct aiocb64 *) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->aiocb_list = (struct aiocb64 **)NXMemAlloc(sizeof(struct aiocb64 *) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->aiocb_list == NULL) {
		cout << "memory allocation failed" << endl;
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		free(this_cqid->element_list);
#elif defined(IOMTR_OS_NETWARE)
		NXMemFree(this_cqid->element_list);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		return (FALSE);
	}

	memset(this_cqid->element_list, 0, sizeof(struct CQ_Element) * size);
	memset(this_cqid->aiocb_list, 0, sizeof(struct aiocb64 *) * size);
	this_cqid->size = size;
	this_cqid->last_freed = -1;
	this_cqid->position = 0;

#ifdef _DEBUG
	cout << "allocated a completion queue of size " << size << " for handle : " << this_cqid << endl;
#endif
	return (TRUE);
}
示例#2
0
BOOL CQAIO::SetQueueSize(int size)
{
	struct IOCQ *this_cqid = (struct IOCQ *)completion_queue;

#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->element_list = (struct CQ_Element *)malloc(sizeof(CQ_Element) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->element_list = (struct CQ_Element *)NXMemAlloc(sizeof(CQ_Element) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->element_list == NULL) {
		cout << "memory allocation failed." << endl;
		return (FALSE);
	}
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	this_cqid->aiocb_list = (struct aiocb64 **)malloc(sizeof(struct aiocb64 *) * size);
#elif defined(IOMTR_OS_NETWARE)
	this_cqid->aiocb_list = (struct aiocb64 **)NXMemAlloc(sizeof(struct aiocb64 *) * size, 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (this_cqid->aiocb_list == NULL) {
		cout << "memory allocation failed." << endl;
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		free(this_cqid->element_list);
#elif defined(IOMTR_OS_NETWARE)
		NXMemFree(this_cqid->element_list);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		return (FALSE);
	}

	this_cqid->size = size;
	memset(this_cqid->aiocb_list, 0, sizeof(struct aiocb64 *) * size);
	memset(this_cqid->element_list, 0, sizeof(struct CQ_Element) * size);

#ifdef _DEBUG
	cout << "allocated a completion queue of size " << size << " for handle : " << this_cqid << endl;
#endif
	return (TRUE);
}
示例#3
0
//
// Starting threads to prepare disks for tests.  Returning TRUE if we
// successfully started the disk preparation.
//
BOOL Grunt::Prepare_Disks()
{
#if defined(IOMTR_OSFAMILY_NETWARE) || defined(IOMTR_OSFAMILY_UNIX)
	pthread_t newThread;
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	// nop
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif

	grunt_state = TestPreparing;
	InterlockedExchange( (long *) &not_ready, target_count );

	// Creating a thread to prepare each disk.
	cout << "Preparing disks..." << endl;

#if defined(IOMTR_OSFAMILY_NETWARE)	
	prepare_thread = (Thread_Info*)NXMemAlloc(sizeof(Thread_Info) * target_count, 1 );
#elif defined(IOMTR_OSFAMILY_UNIX) || defined(IOMTR_OSFAMILY_WINDOWS)
	prepare_thread = (Thread_Info*)malloc(sizeof(Thread_Info) * target_count );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if ( !prepare_thread )
	{
		cout << "*** Unable to allocate memory for preparation threads." << endl;
		return FALSE;
	};

	for ( int i = 0; i < target_count; i++ )
	{
		if ( IsType( targets[i]->spec.type, LogicalDiskType ) )
		{
			prepare_thread[i].parent = this;
			prepare_thread[i].id = i;
			cout << "   " << targets[i]->spec.name << " preparing." << endl;
#if defined(IOMTR_OSFAMILY_NETWARE) || defined(IOMTR_OSFAMILY_UNIX)
			// Assuming that thr_create call will not fail !!!

			pthread_create(&newThread, NULL, (void *(*)(void *))Prepare_Disk_Wrapper, 
										 (void *) &(prepare_thread[i]));
			pthread_detach(newThread);
#elif defined(IOMTR_OSFAMILY_WINDOWS)
			_beginthread( Prepare_Disk_Wrapper, 0, (void *) &(prepare_thread[i]) );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		}
	}
	return TRUE;
}
示例#4
0
//
// This function is the UNIX equivalent of the NT call to create a IO Completion Port.
// The behavior is similar to the NT call wherein a Completion Queue is created and
// associated with a file handle (if required).
//
// A Completion Queue is a structure that holds the status of the various asynch IO
// requests queued by the program.
// 
// The function returns a handle to the Completion Queue.
//
HANDLE CreateIoCompletionPort(HANDLE file_handle, HANDLE cq, DWORD completion_key, DWORD num_threads)
{
	struct File *filep;
	struct IOCQ *cqid;

	cqid = (struct IOCQ *)cq;
	if (cqid == NULL) {
		// cqid is NULL. We assign a new completion queue.
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
		cqid = (struct IOCQ *)malloc(sizeof(struct IOCQ));
#elif defined(IOMTR_OS_NETWARE)
		cqid = (struct IOCQ *)NXMemAlloc(sizeof(struct IOCQ), 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
		if (cqid == NULL) {
			cout << "memory allocation failed. Exiting...." << endl;
			exit(1);
		}
		cqid->element_list = NULL;
		cqid->aiocb_list = NULL;
		cqid->size = 0;
		cqid->last_freed = -1;
		cqid->position = 0;
	}
	// cqid is not-NULL. Trying to assign an existing completion queue.
	// to the file handle.
	// If file_handle is INVALID then do nothing. 
	// We are required to create the comp queue only.
	filep = (struct File *)file_handle;
	if (filep != INVALID_HANDLE_VALUE) {
		filep->iocq = cqid;
		filep->completion_key = completion_key;
	}

	return ((HANDLE) cqid);
}
示例#5
0
//
// This is the UNIX equivalent of the NT call CreateEvent() which creates an Event Queue.
// It is similar to a Completion Queue and works in the same way. The only difference is
// that an Event Queue is not associated with any particular file handle.
//
// This function returns a handle to an Event Queue.
//
// Note that this implementation currently supports only one event queue. So multiple
// calls to this function returns the same event queue handle.
//
HANDLE CreateEvent(void *, BOOL, BOOL, LPCTSTR)
{
	// We need to create an event queue.
	IOCQ *eventqid;

#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_OSX) || defined(IOMTR_OS_SOLARIS)
	eventqid = (struct IOCQ *)malloc(sizeof(struct IOCQ));
#elif defined(IOMTR_OS_NETWARE)
	eventqid = (struct IOCQ *)NXMemAlloc(sizeof(struct IOCQ), 1);
#else
#warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	if (eventqid == NULL) {
		cout << "memory allocation failed. Exiting...." << endl;
		return (NULL);
	}
	eventqid->element_list = NULL;
	eventqid->aiocb_list = NULL;
	eventqid->size = 0;
	eventqid->last_freed = -1;
	eventqid->position = 0;

	return ((HANDLE) eventqid);
}
示例#6
0
//
// Preparing a disk for access by a worker thread.  The disk must have been 
// previously initialized.
//
void Grunt::Prepare_Disk( int disk_id )
{
	void		*buffer = NULL;
	DWORD		buffer_size;
	DWORDLONG	prepare_offset = 0;
	TargetDisk	*disk = (TargetDisk *) targets[disk_id];

	critical_error = FALSE;

	// Allocate a large (64k for 512 byte sector size) buffer for the preparation.
	buffer_size = disk->spec.disk_info.sector_size * 128;
#if defined(IOMTR_OSFAMILY_NETWARE)
	NXMemFree( buffer );
	errno = 0;
	if ( !(buffer = NXMemAlloc(buffer_size, 1) ))
#elif defined(IOMTR_OSFAMILY_UNIX)
	free( buffer );
	errno = 0;
	if ( !(buffer = valloc(buffer_size) ))
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	VirtualFree( buffer, 0, MEM_RELEASE );
	if ( !(buffer = VirtualAlloc( NULL, buffer_size, MEM_COMMIT, PAGE_READWRITE )) )
#else
  #warning ===> WARNING: You have to do some coding here to get the port done! 
#endif
	{
		cout << "*** Could not allocate buffer to prepare disk." << endl;
		critical_error = TRUE;
		InterlockedDecrement( (long *) &not_ready );
		return;
	}

	// Open the disk for preparation.
#if defined(IOMTR_OSFAMILY_NETWARE) || defined(IOMTR_OSFAMILY_UNIX)
	// The disk::prepare() operation writes to a file iobw.tst till it uses up
	// all the available disk space. Now, Solaris allows a file to be created
	// with a (logical) size that is larger than the actual size of the file.
	// Later, when an attempt is made to write to the unwritten portion of the
	// file, Solaris attempts to expand the actual size on the disk to 
	// accomodate the new writes.
	//
	// The disk::prepare() throws up a problem here. Since we write in parallel
	// to the the file, Solaris allows us to create an iobw.tst that is larger
	// than the available space on the disk. A later write to the unfilled
	// portion throws up an ENOSPC error. To avoid this problem, we use the
	// O_APPEND flag which always sets the write offset to the eof.
	if ( !disk->Open( &grunt_state, O_APPEND ) )
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	if ( !disk->Open( &grunt_state ) )
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	{
		cout << "*** Could not open disk." << endl;
		critical_error = TRUE;
	}
	else
	{
		// Prepare the disk, first with large block sizes, then with single sectors.
		if ( !disk->Prepare( buffer, &prepare_offset, buffer_size, &grunt_state ) ||
			 !disk->Prepare( buffer, &prepare_offset, disk->spec.disk_info.sector_size, &grunt_state ) )
		{
			cout << "*** An error occurred while preparing the disk." << endl;
			critical_error = TRUE;
		}

		disk->Close( NULL );
	}

#if defined(IOMTR_OSFAMILY_NETWARE)
	NXMemFree( buffer );
#elif defined(IOMTR_OSFAMILY_UNIX)
	free( buffer );
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	VirtualFree( buffer, 0, MEM_RELEASE );
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	cout << "   " << disk->spec.name << " done." << endl;
	InterlockedDecrement( (long *) &not_ready );
}
示例#7
0
//
// Setting access specifications for worker.  Also ensuring that a data buffer
// large enough to support the maximum requested transfer has been allocated.
// Note that Iometer will call Set_Access before testing starts to ensure that
// Dynamo can run the spec with the largest transfer request.
//
BOOL Grunt::Set_Access( const Test_Spec* spec )
{
	// Check for idle spec.
	if ((idle = ( spec->access[0].of_size == IOERROR)))
		return TRUE;

	access_spec.Initialize( &(spec->access[0]) );

	// Allocate a data buffer large enough to support the maximum requested 
	// transfer.  We do this only if the current buffer is too small and
	// we're using per worker data buffers.
	if ( data_size >= access_spec.max_transfer ) {
		cout << "Grunt: Grunt data buffer size " << data_size << " >= " 
			 << access_spec.max_transfer << ", returning" << endl;
		return TRUE;
	} else if ( !data_size ) {
		// We always want to use our own buffers, not the manager's
		// buffer.  This is due to a bug in some ServerWorks chipsets
		// (confirmed on the HE-SL chipset) where performing both
		// read and write operations to the same cache line can cause
		// the PCI bridge (the CIOB5) to hang indefinitely until a
		// third PCI bus request comes in.
		//
		// Using per-grunt buffers eliminates that problem, as you
		// aren't thrashing on the same buffer for both read and
		// write operations.
		data_size = access_spec.max_transfer;
		cout << "Grunt: Growing grunt data buffer from " << data_size << " to " 
			 << access_spec.max_transfer << endl;
	}

	// Allocating a larger buffer.
	#if _DEBUG
		cout << "Growing grunt data buffers from " << data_size << " to " 
			 << access_spec.max_transfer << endl;
	#endif

#if defined(IOMTR_OSFAMILY_NETWARE)
	if ( read_data ) {
		NXMemFree( read_data );
	}
	errno = 0;
	if ( !(read_data = NXMemAlloc(access_spec.max_transfer, 1) ))
#elif defined(IOMTR_OSFAMILY_UNIX)
	if ( read_data ) {
		free( read_data );
	}
	errno = 0;
	if ( !(read_data = valloc(access_spec.max_transfer) ))		
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	if ( read_data ) {
		VirtualFree( read_data, 0, MEM_RELEASE );
	}
	if ( !(read_data = VirtualAlloc(NULL, access_spec.max_transfer, MEM_COMMIT, PAGE_READWRITE)))
#else
  #warning ===> WARNING: You have to do some coding here to get the port done! 
#endif
	{
		// Could not allocate a larger buffer.  Signal failure.
		cout << "*** Grunt could not allocate read data buffer for I/O transfers." << endl;
		data_size = 0;
		return FALSE;
	}

#if defined(IOMTR_OSFAMILY_NETWARE)
	if ( write_data ) {
		NXMemFree( write_data );
	}
	errno = 0;
	if ( !(write_data = NXMemAlloc(access_spec.max_transfer, 1) ))
#elif defined(IOMTR_OSFAMILY_UNIX)
	if ( write_data ) {
		free( write_data );
	}
	errno = 0;
	if ( !(write_data = valloc(access_spec.max_transfer) ))
#elif defined(IOMTR_OSFAMILY_WINDOWS)
	if ( write_data ) {
		VirtualFree( write_data, 0, MEM_RELEASE );
	}
	if ( !(write_data = VirtualAlloc(NULL, access_spec.max_transfer, MEM_COMMIT, PAGE_READWRITE)))
#else
  #warning ===> WARNING: You have to do some coding here to get the port done! 
#endif
	{
		// Could not allocate a larger buffer.  Signal failure.
		cout << "*** Grunt could not allocate write data buffer for I/O transfers." << endl;
		data_size = 0;
		return FALSE;
	}

	data_size = access_spec.max_transfer;
	return TRUE;
}
//
// Setting access specifications for next test.
// Note that Iometer will call Set_Access before testing starts to ensure that
// Dynamo can run the spec with the largest transfer request.
//
BOOL Manager::Set_Access( int target, const Test_Spec *spec )
{
	int		g;			// loop control variable

	// Recursively assign all workers the same access specification.
	if ( target == ALL_WORKERS )
	{
		cout << "All workers running Access Spec: " << spec->name << endl;
		for ( g = 0; g < grunt_count; g++ )
		{
			if ( !Set_Access( g, spec ) )
				return FALSE;
		}
		return TRUE;
	}

	cout << "Worker " << target << " running Access Spec: " << spec->name << endl;

	// If the grunt could not set the access spec properly, return.
	// The grunt may not have been able to grow its data buffer.
	if ( !grunts[target]->Set_Access( spec ) )
		return FALSE;

	// If the grunt is not using the manager's data buffer or the manager's
	// buffer is already large enough, just return.
	if ( grunts[target]->data_size ||
		data_size >= grunts[target]->access_spec.max_transfer )
	{
		return TRUE;
	}

	// Grow the manager's data buffer and update all grunts using it.
	#if _DEBUG
		cout << "Growing manager data buffer from " << data_size << " to " 
			 << grunts[target]->access_spec.max_transfer << endl << flush;
	#endif

	// Align all data transfers on a page boundary.  This will work for all disks
	// with sector sizes that divide evenly into the page size - which is always
	// the case.
#if defined(IOMTR_OS_LINUX) || defined(IOMTR_OS_SOLARIS)
	free(data);
	errno = 0;
	if ( !(data = valloc(grunts[target]->access_spec.max_transfer) ))
#elif defined(IOMTR_OS_NETWARE) 
	NXMemFree(data);
	errno = 0;
	if ( !(data = NXMemAlloc(grunts[target]->access_spec.max_transfer, 1) ))
#elif defined(IOMTR_OS_WIN32) || defined(IOMTR_OS_WIN64)
	VirtualFree( data, 0, MEM_RELEASE );
	if ( !(data = VirtualAlloc( NULL, grunts[target]->access_spec.max_transfer, 
		MEM_COMMIT, PAGE_READWRITE )) )
#else
 #warning ===> WARNING: You have to do some coding here to get the port done!
#endif
	{
		// Could not allocate a larger buffer.  Signal failure.
		cout << "*** Manager could not allocate data buffer for I/O transfers." 
			<< endl << flush;
		data_size = 0;
		return FALSE;
	}
	data_size = grunts[target]->access_spec.max_transfer;

	// Update all grunts using the manager's data buffer.
	for ( g = 0; g < grunt_count; g++ )
	{
		if ( !grunts[g]->data_size )
		{
			grunts[g]->read_data = data;
			grunts[g]->write_data = data;
		}
	}
	return TRUE;
}