static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, struct sg_table *pages) { i915_gem_gtt_finish_pages(obj, pages); internal_free_pages(pages); obj->mm.dirty = false; obj->mm.madv = I915_MADV_WILLNEED; }
static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; unsigned int sg_page_sizes; unsigned int npages; int max_order; gfp_t gfp; max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { unsigned int max_segment; max_segment = swiotlb_max_segment(); if (max_segment) { max_segment = max_t(unsigned int, max_segment, PAGE_SIZE) >> PAGE_SHIFT; max_order = min(max_order, ilog2(max_segment)); } } #endif gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_I965GM(i915) || IS_I965G(i915)) { /* 965gm cannot relocate objects above 4GiB. */ gfp &= ~__GFP_HIGHMEM; gfp |= __GFP_DMA32; } create_st: st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return -ENOMEM; npages = obj->base.size / PAGE_SIZE; if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return -ENOMEM; } sg = st->sgl; st->nents = 0; sg_page_sizes = 0; do { int order = min(fls(npages) - 1, max_order); struct page *page; do { page = alloc_pages(gfp | (order ? QUIET : MAYFAIL), order); if (page) break; if (!order--) goto err; /* Limit subsequent allocations as well */ max_order = order; } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); sg_page_sizes |= PAGE_SIZE << order; st->nents++; npages -= 1 << order; if (!npages) { sg_mark_end(sg); break; } sg = __sg_next(sg); } while (1); if (i915_gem_gtt_prepare_pages(obj, st)) { /* Failed to dma-map try again with single page sg segments */ if (get_order(st->sgl->length)) { internal_free_pages(st); max_order = 0; goto create_st; } goto err; } /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker, * and the caller is expected to repopulate - the contents of this * object are only valid whilst active and pinned. */ obj->mm.madv = I915_MADV_DONTNEED; __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; err: sg_set_page(sg, NULL, 0, 0); sg_mark_end(sg); internal_free_pages(st); return -ENOMEM; }
static struct sg_table * i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int npages = obj->base.size / PAGE_SIZE; struct sg_table *st; struct scatterlist *sg; int max_order; gfp_t gfp; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); } sg = st->sgl; st->nents = 0; max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); #endif gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) { /* 965gm cannot relocate objects above 4GiB. */ gfp &= ~__GFP_HIGHMEM; gfp |= __GFP_DMA32; } do { int order = min(fls(npages) - 1, max_order); struct page *page; do { page = alloc_pages(gfp | (order ? QUIET : 0), order); if (page) break; if (!order--) goto err; /* Limit subsequent allocations as well */ max_order = order; } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); st->nents++; npages -= 1 << order; if (!npages) { sg_mark_end(sg); break; } sg = __sg_next(sg); } while (1); if (i915_gem_gtt_prepare_pages(obj, st)) goto err; /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker, * and the caller is expected to repopulate - the contents of this * object are only valid whilst active and pinned. */ obj->mm.madv = I915_MADV_DONTNEED; return st; err: sg_mark_end(sg); internal_free_pages(st); return ERR_PTR(-ENOMEM); }