int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct device *dev = &msg->spi->dev; if (!drv_data->cur_chip->enable_dma) return 0; if (msg->is_dma_mapped) return drv_data->rx_dma && drv_data->tx_dma; if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) return 0; /* Modify setup if rx buffer is null */ if (drv_data->rx == NULL) { *drv_data->null_dma_buf = 0; drv_data->rx = drv_data->null_dma_buf; drv_data->rx_map_len = 4; } else drv_data->rx_map_len = drv_data->len; /* Modify setup if tx buffer is null */ if (drv_data->tx == NULL) { *drv_data->null_dma_buf = 0; drv_data->tx = drv_data->null_dma_buf; drv_data->tx_map_len = 4; } else drv_data->tx_map_len = drv_data->len; /* Stream map the tx buffer. Always do DMA_TO_DEVICE first * so we flush the cache *before* invalidating it, in case * the tx and rx buffers overlap. */ drv_data->tx_dma = dma_map_single(dev, drv_data->tx, drv_data->tx_map_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, drv_data->tx_dma)) return 0; /* Stream map the rx buffer */ drv_data->rx_dma = dma_map_single(dev, drv_data->rx, drv_data->rx_map_len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, drv_data->rx_dma)) { dma_unmap_single(dev, drv_data->tx_dma, drv_data->tx_map_len, DMA_TO_DEVICE); return 0; } return 1; }
void compute( in0_type in, out0_type out, Pinfo const& p_in, Pinfo const& p_out) { int length = p_out.l_total_size; int offset = p_in.l_offset[0]; if (IS_DMA_ALIGNED(in + offset)) { vector float* a = (vector float*)(in + offset); vector float* z = (vector float*)out; for (int i = 0; i < (length / 4); ++i) *z++ = *a++; } else { for (int i = 0; i < length; ++i) out[i] = in[offset + i]; } }