/* * Build all info and do all mappings required for a blit. */ static int via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int ret = 0; vsg->bounce_buffer = NULL; vsg->state = dr_via_sg_init; if (xfer->num_lines <= 0 || xfer->line_length <= 0) { DRM_ERROR("Zero size bitblt.\n"); return -EINVAL; } /* * Below check is a driver limitation, not a hardware one. We * don't want to lock unused pages, and don't want to incoporate the * extra logic of avoiding them. Make sure there are no. * (Not a big limitation anyway.) */ if ((xfer->mem_stride - xfer->line_length) > 2 * PAGE_SIZE) { DRM_ERROR("Too large system memory stride. Stride: %d, " "Length: %d\n", xfer->mem_stride, xfer->line_length); return -EINVAL; } if ((xfer->mem_stride == xfer->line_length) && (xfer->fb_stride == xfer->line_length)) { xfer->mem_stride *= xfer->num_lines; xfer->line_length = xfer->mem_stride; xfer->fb_stride = xfer->mem_stride; xfer->num_lines = 1; } /* * Don't lock an arbitrary large number of pages, since that causes a * DOS security hole. */ if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { DRM_ERROR("Too large PCI DMA bitblt.\n"); return -EINVAL; } /* * we allow a negative fb stride to allow flipping of images in * transfer. */ if (xfer->mem_stride < xfer->line_length || abs(xfer->fb_stride) < xfer->line_length) { DRM_ERROR("Invalid frame-buffer / memory stride.\n"); return -EINVAL; } /* * A hardware bug seems to be worked around if system memory addresses * start on 16 byte boundaries. This seems a bit restrictive however. * VIA is contacted about this. Meanwhile, impose the following * restrictions: */ #ifdef VIA_BUGFREE if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); return -EINVAL; } #else if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); return -EINVAL; } #endif if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { DRM_ERROR("Could not lock DMA pages.\n"); via_free_sg_info(vsg); return ret; } via_map_blit_for_device(xfer, vsg, 0); if (0 != (ret = via_alloc_desc_pages(vsg))) { DRM_ERROR("Could not allocate DMA descriptor pages.\n"); via_free_sg_info(vsg); return ret; } via_map_blit_for_device(xfer, vsg, 1); return 0; }
static int via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int draw = xfer->to_fb; int ret = 0; vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; vsg->bounce_buffer = 0; vsg->state = dr_via_sg_init; if (xfer->num_lines <= 0 || xfer->line_length <= 0) { DRM_ERROR("Zero size bitblt.\n"); return DRM_ERR(EINVAL); } /* * Below check is a driver limitation, not a hardware one. We * don't want to lock unused pages, and don't want to incoporate the * extra logic of avoiding them. Make sure there are no. * (Not a big limitation anyway.) */ if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) || (xfer->mem_stride > 2048)) { DRM_ERROR("Too large system memory stride.\n"); return DRM_ERR(EINVAL); } if (xfer->num_lines > 2048) { DRM_ERROR("Too many PCI DMA bitblt lines.\n"); return DRM_ERR(EINVAL); } /* * we allow a negative fb stride to allow flipping of images in * transfer. */ if (xfer->mem_stride < xfer->line_length || abs(xfer->fb_stride) < xfer->line_length) { DRM_ERROR("Invalid frame-buffer / memory stride.\n"); return DRM_ERR(EINVAL); } /* * A hardware bug seems to be worked around if system memory addresses start on * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted * about this. Meanwhile, impose the following restrictions: */ #ifdef VIA_BUGFREE if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); return DRM_ERR(EINVAL); } #else if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 15))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); return DRM_ERR(EINVAL); } #endif if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { DRM_ERROR("Could not lock DMA pages.\n"); via_free_sg_info(dev->pdev, vsg); return ret; } via_map_blit_for_device(dev->pdev, xfer, vsg, 0); if (0 != (ret = via_alloc_desc_pages(vsg))) { DRM_ERROR("Could not allocate DMA descriptor pages.\n"); via_free_sg_info(dev->pdev, vsg); return ret; } via_map_blit_for_device(dev->pdev, xfer, vsg, 1); return 0; }