示例#1
0
void
scsi_dma_buffer_daemon(void *dev, int counter)
{
	scsi_device_info *device = (scsi_device_info*)dev;
	dma_buffer *buffer;

	ACQUIRE_BEN(&device->dma_buffer_lock);

	buffer = &device->dma_buffer;

	if (!buffer->inuse
		&& buffer->last_use - system_time() > SCSI_DMA_BUFFER_CLEANUP_DELAY) {
		scsi_free_dma_buffer(buffer);
		scsi_free_dma_buffer_sg_orig(buffer);
	}

	RELEASE_BEN(&device->dma_buffer_lock);
}
示例#2
0
void
scsi_dma_buffer_free(dma_buffer *buffer)
{
	scsi_free_dma_buffer(buffer);
	scsi_free_dma_buffer_sg_orig(buffer);
}
示例#3
0
static bool
scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
{
	// free old buffer first
	scsi_free_dma_buffer(buffer);

	// just in case alignment is ridiculously huge
	size = (size + dma_params->alignment) & ~dma_params->alignment;

	size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// calculate worst case number of S/G entries, i.e. if they are non-continuous;
	// there is a controller limit and a limit by our own S/G manager to check
	if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
		|| size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) {
		uint32 boundary = dma_params->dma_boundary;

		// alright - a contiguous buffer is required to keep S/G table short
		SHOW_INFO(1, "need to setup contiguous DMA buffer of size %" B_PRIu32,
			size);

		// verify that we don't get problems with dma boundary
		if (boundary != ~(uint32)0) {
			if (size > boundary + 1) {
				SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%"
					 B_PRId32 "/%" B_PRId32 " bytes)", size, boundary + 1);
				return false;
			}
		}

		virtual_address_restrictions virtualRestrictions = {};
		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
		physical_address_restrictions physicalRestrictions = {};
		if (dma_params->alignment != ~(uint32)0)
			physicalRestrictions.alignment = dma_params->alignment + 1;
		if (boundary != ~(uint32)0)
			physicalRestrictions.boundary = boundary + 1;
#if B_HAIKU_PHYSICAL_BITS > 32
		physicalRestrictions.high_address = 0x100000000ULL;
			// TODO: Use 64 bit addresses, if possible!
#endif
		buffer->area = create_area_etc(B_SYSTEM_TEAM, "DMA buffer", size,
			B_CONTIGUOUS, 0, 0, 0, &virtualRestrictions, &physicalRestrictions,
			(void**)&buffer->address);

		if (buffer->area < 0) {
			SHOW_ERROR(2, "Cannot create contignous DMA buffer of %" B_PRIu32
				" bytes", size);
			return false;
		}

		buffer->size = size;
	} else {
		// we can live with a fragmented buffer - very nice
		buffer->area = create_area("DMA buffer",
			(void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size,
			B_32_BIT_FULL_LOCK, 0);
				// TODO: Use B_FULL_LOCK, if possible!
		if (buffer->area < 0) {
			SHOW_ERROR(2, "Cannot create DMA buffer of %" B_PRIu32 " bytes",
				size);
			return false;
		}

		buffer->size = size;
	}

	// create S/G list
	// worst case is one entry per page, and size is page-aligned
	size_t sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
	// create_area has page-granularity
	sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	buffer->sg_list_area = create_area("DMA buffer S/G table",
		(void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size,
		B_32_BIT_FULL_LOCK, 0);
			// TODO: Use B_FULL_LOCK, if possible!
	if (buffer->sg_list_area < 0) {
		SHOW_ERROR( 2, "Cannot create DMA buffer S/G list of %" B_PRIuSIZE
			" bytes", sg_list_size );

		delete_area(buffer->area);
		buffer->area = 0;
		return false;
	}

	size_t sg_list_entries = sg_list_size / sizeof(physical_entry);

	{
		size_t mapped_len;
		status_t res;
		iovec vec = {
			buffer->address,
			buffer->size
		};

		res = get_iovec_memory_map(
			&vec, 1, 0, buffer->size,
			buffer->sg_list, sg_list_entries, &buffer->sg_count,
			&mapped_len );

		if( res != B_OK || mapped_len != buffer->size ) {
			SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted "
				"%" B_PRIuSIZE ", got %" B_PRIuSIZE " bytes)", strerror(res),
				mapped_len, buffer->size);
		}
	}

	return true;
}
示例#4
0
static bool
scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
{
	size_t sg_list_size, sg_list_entries;

	// free old buffer first
	scsi_free_dma_buffer( buffer );

	// just in case alignment is redicuously huge
	size = (size + dma_params->alignment) & ~dma_params->alignment;

	size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// calculate worst case number of S/G entries, i.e. if they are non-continuous;
	// there is a controller limit and a limit by our own S/G manager to check
	if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
		|| size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) {
		uint32 boundary = dma_params->dma_boundary;
		uchar *dma_buffer_address_unaligned;

		// alright - a contiguous buffer is required to keep S/G table short
		SHOW_INFO(1, "need to setup contiguous DMA buffer of size %d",
			(int)size);

		// verify that we don't get problems with dma boundary
		if (boundary != ~0UL) {
			if (size > boundary + 1) {
				SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%d/%d bytes)",
					(int)size, (int)boundary + 1);
				return false;
			}

			// round up to next power of two and allocate a buffer double the
			// needed size so we can cut out an area that doesn't cross
			// dma boundary
			size = (1 << log2( size )) * 2;
		}

		buffer->area = create_area("DMA buffer",
			(void **)&dma_buffer_address_unaligned,
			B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS, 0);
		if (buffer->area < 0) {
			SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes",
				(int)size);
			return false;
		}

		if (boundary != ~0UL) {
			uchar *next_boundary;

			// boundary case: cut out piece aligned on "size"
			buffer->address = (uchar *)(
				((addr_t)dma_buffer_address_unaligned + size - 1) & ~(size - 1));

			// determine how many bytes are available until next DMA boundary
			next_boundary = (uchar *)(((addr_t)buffer->address + boundary - 1) &
				~(boundary - 1));

			// adjust next boundary if outside allocated area
			if( next_boundary > dma_buffer_address_unaligned + size )
				next_boundary = dma_buffer_address_unaligned + size;

			buffer->size = next_boundary - buffer->address;
		} else {
			// non-boundary case: use buffer directly
			buffer->address = dma_buffer_address_unaligned;
			buffer->size = size;
		}
	} else {
		// we can live with a fragmented buffer - very nice
		buffer->area = create_area( "DMA buffer",
			(void **)&buffer->address,
			B_ANY_KERNEL_ADDRESS, size,
			B_FULL_LOCK, 0 );
		if (buffer->area < 0) {
			SHOW_ERROR(2, "Cannot create DMA buffer of %d bytes",
				(int)size);
			return false;
		}

		buffer->size = size;
	}

	// create S/G list
	// worst case is one entry per page, and size is page-aligned
	sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
	// create_area has page-granularity
	sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	buffer->sg_list_area = create_area("DMA buffer S/G table",
		(void **)&buffer->sg_list,
		B_ANY_KERNEL_ADDRESS, sg_list_size,
		B_FULL_LOCK, 0);
	if (buffer->sg_list_area < 0) {
		SHOW_ERROR( 2, "Cannot craete DMA buffer S/G list of %d bytes",
			(int)sg_list_size );

		delete_area(buffer->area);
		buffer->area = 0;
		return false;
	}

	sg_list_entries = sg_list_size / sizeof( physical_entry );

	{
		size_t mapped_len;
		status_t res;
		iovec vec = {
			buffer->address,
			buffer->size
		};

		res = get_iovec_memory_map(
			&vec, 1, 0, buffer->size,
			buffer->sg_list, sg_list_entries, &buffer->sg_count,
			&mapped_len );

		if( res != B_OK || mapped_len != buffer->size ) {
			SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted %d, got %d bytes)",
				strerror(res), (int)mapped_len, (int)buffer->size);
		}
	}

	return true;
}