static bool scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size) { size_t sg_list_size, sg_list_entries; // free old buffer first scsi_free_dma_buffer( buffer ); // just in case alignment is redicuously huge size = (size + dma_params->alignment) & ~dma_params->alignment; size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); // calculate worst case number of S/G entries, i.e. if they are non-continuous; // there is a controller limit and a limit by our own S/G manager to check if (size / B_PAGE_SIZE > dma_params->max_sg_blocks || size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) { uint32 boundary = dma_params->dma_boundary; uchar *dma_buffer_address_unaligned; // alright - a contiguous buffer is required to keep S/G table short SHOW_INFO(1, "need to setup contiguous DMA buffer of size %d", (int)size); // verify that we don't get problems with dma boundary if (boundary != ~0UL) { if (size > boundary + 1) { SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%d/%d bytes)", (int)size, (int)boundary + 1); return false; } // round up to next power of two and allocate a buffer double the // needed size so we can cut out an area that doesn't cross // dma boundary size = (1 << log2( size )) * 2; } buffer->area = create_area("DMA buffer", (void **)&dma_buffer_address_unaligned, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS, 0); if (buffer->area < 0) { SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes", (int)size); return false; } if (boundary != ~0UL) { uchar *next_boundary; // boundary case: cut out piece aligned on "size" buffer->address = (uchar *)( ((addr_t)dma_buffer_address_unaligned + size - 1) & ~(size - 1)); // determine how many bytes are available until next DMA boundary next_boundary = (uchar *)(((addr_t)buffer->address + boundary - 1) & ~(boundary - 1)); // adjust next boundary if outside allocated area if( next_boundary > dma_buffer_address_unaligned + size ) next_boundary = dma_buffer_address_unaligned + size; buffer->size = next_boundary - buffer->address; } else { // non-boundary case: use buffer directly buffer->address = dma_buffer_address_unaligned; buffer->size = size; } } else { // we can live with a fragmented buffer - very nice buffer->area = create_area( "DMA buffer", (void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK, 0 ); if (buffer->area < 0) { SHOW_ERROR(2, "Cannot create DMA buffer of %d bytes", (int)size); return false; } buffer->size = size; } // create S/G list // worst case is one entry per page, and size is page-aligned sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry ); // create_area has page-granularity sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); buffer->sg_list_area = create_area("DMA buffer S/G table", (void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size, B_FULL_LOCK, 0); if (buffer->sg_list_area < 0) { SHOW_ERROR( 2, "Cannot craete DMA buffer S/G list of %d bytes", (int)sg_list_size ); delete_area(buffer->area); buffer->area = 0; return false; } sg_list_entries = sg_list_size / sizeof( physical_entry ); { size_t mapped_len; status_t res; iovec vec = { buffer->address, buffer->size }; res = get_iovec_memory_map( &vec, 1, 0, buffer->size, buffer->sg_list, sg_list_entries, &buffer->sg_count, &mapped_len ); if( res != B_OK || mapped_len != buffer->size ) { SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted %d, got %d bytes)", strerror(res), (int)mapped_len, (int)buffer->size); } } return true; }
static bool scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size) { // free old buffer first scsi_free_dma_buffer(buffer); // just in case alignment is ridiculously huge size = (size + dma_params->alignment) & ~dma_params->alignment; size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); // calculate worst case number of S/G entries, i.e. if they are non-continuous; // there is a controller limit and a limit by our own S/G manager to check if (size / B_PAGE_SIZE > dma_params->max_sg_blocks || size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) { uint32 boundary = dma_params->dma_boundary; // alright - a contiguous buffer is required to keep S/G table short SHOW_INFO(1, "need to setup contiguous DMA buffer of size %" B_PRIu32, size); // verify that we don't get problems with dma boundary if (boundary != ~(uint32)0) { if (size > boundary + 1) { SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%" B_PRId32 "/%" B_PRId32 " bytes)", size, boundary + 1); return false; } } virtual_address_restrictions virtualRestrictions = {}; virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; physical_address_restrictions physicalRestrictions = {}; if (dma_params->alignment != ~(uint32)0) physicalRestrictions.alignment = dma_params->alignment + 1; if (boundary != ~(uint32)0) physicalRestrictions.boundary = boundary + 1; #if B_HAIKU_PHYSICAL_BITS > 32 physicalRestrictions.high_address = 0x100000000ULL; // TODO: Use 64 bit addresses, if possible! #endif buffer->area = create_area_etc(B_SYSTEM_TEAM, "DMA buffer", size, B_CONTIGUOUS, 0, 0, 0, &virtualRestrictions, &physicalRestrictions, (void**)&buffer->address); if (buffer->area < 0) { SHOW_ERROR(2, "Cannot create contignous DMA buffer of %" B_PRIu32 " bytes", size); return false; } buffer->size = size; } else { // we can live with a fragmented buffer - very nice buffer->area = create_area("DMA buffer", (void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size, B_32_BIT_FULL_LOCK, 0); // TODO: Use B_FULL_LOCK, if possible! if (buffer->area < 0) { SHOW_ERROR(2, "Cannot create DMA buffer of %" B_PRIu32 " bytes", size); return false; } buffer->size = size; } // create S/G list // worst case is one entry per page, and size is page-aligned size_t sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry ); // create_area has page-granularity sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); buffer->sg_list_area = create_area("DMA buffer S/G table", (void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size, B_32_BIT_FULL_LOCK, 0); // TODO: Use B_FULL_LOCK, if possible! if (buffer->sg_list_area < 0) { SHOW_ERROR( 2, "Cannot create DMA buffer S/G list of %" B_PRIuSIZE " bytes", sg_list_size ); delete_area(buffer->area); buffer->area = 0; return false; } size_t sg_list_entries = sg_list_size / sizeof(physical_entry); { size_t mapped_len; status_t res; iovec vec = { buffer->address, buffer->size }; res = get_iovec_memory_map( &vec, 1, 0, buffer->size, buffer->sg_list, sg_list_entries, &buffer->sg_count, &mapped_len ); if( res != B_OK || mapped_len != buffer->size ) { SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted " "%" B_PRIuSIZE ", got %" B_PRIuSIZE " bytes)", strerror(res), mapped_len, buffer->size); } } return true; }
static bool fill_temp_sg(scsi_ccb *ccb) { status_t res; scsi_bus_info *bus = ccb->bus; uint32 dma_boundary = bus->dma_params.dma_boundary; uint32 max_sg_block_size = bus->dma_params.max_sg_block_size; uint32 max_sg_blocks = std::min(bus->dma_params.max_sg_blocks, (uint32)MAX_TEMP_SG_FRAGMENTS); iovec vec = { ccb->data, ccb->data_length }; size_t num_entries; size_t mapped_len; physical_entry *temp_sg = (physical_entry *)ccb->sg_list; res = get_iovec_memory_map(&vec, 1, 0, ccb->data_length, temp_sg, max_sg_blocks, &num_entries, &mapped_len); if (res != B_OK) { SHOW_ERROR(2, "cannot create temporary S/G list for IO request (%s)", strerror(res)); return false; } if (mapped_len != ccb->data_length) goto too_complex; if (dma_boundary != ~(uint32)0 || ccb->data_length > max_sg_block_size) { // S/G list may not be controller-compatible: // we have to split offending entries SHOW_FLOW(3, "Checking violation of dma boundary 0x%" B_PRIx32 " and entry size 0x%" B_PRIx32, dma_boundary, max_sg_block_size); for (uint32 cur_idx = 0; cur_idx < num_entries; ++cur_idx) { addr_t max_len; // calculate space upto next dma boundary crossing max_len = (dma_boundary + 1) - (temp_sg[cur_idx].address & dma_boundary); // restrict size per sg item max_len = std::min(max_len, (addr_t)max_sg_block_size); SHOW_FLOW(4, "addr=%#" B_PRIxPHYSADDR ", size=%" B_PRIxPHYSADDR ", max_len=%" B_PRIxADDR ", idx=%" B_PRId32 ", num=%" B_PRIuSIZE, temp_sg[cur_idx].address, temp_sg[cur_idx].size, max_len, cur_idx, num_entries); if (max_len < temp_sg[cur_idx].size) { // split sg block if (++num_entries > max_sg_blocks) goto too_complex; memmove(&temp_sg[cur_idx + 1], &temp_sg[cur_idx], (num_entries - 1 - cur_idx) * sizeof(physical_entry)); temp_sg[cur_idx].size = max_len; temp_sg[cur_idx + 1].address = temp_sg[cur_idx + 1].address + max_len; temp_sg[cur_idx + 1].size -= max_len; } } } ccb->sg_count = num_entries; return true; too_complex: SHOW_ERROR( 2, "S/G list to complex for IO request (max %d entries)", MAX_TEMP_SG_FRAGMENTS ); return false; }