Exemple #1
0
/**
 * Reallocate the parser bo.
 */
static void
ilo_cp_realloc_bo(struct ilo_cp *cp)
{
    struct intel_bo *bo;

    /*
     * allocate the new bo before unreferencing the old one so that they
     * won't point at the same address, which is needed for jmpbuf
     */
    bo = intel_winsys_alloc_buffer(cp->winsys,
                                   "batch buffer", cp->bo_size * 4, 0);
    if (unlikely(!bo)) {
        /* reuse the old one */
        bo = cp->bo;
        intel_bo_reference(bo);
    }

    if (cp->bo)
        intel_bo_unreference(cp->bo);
    cp->bo = bo;

    if (!cp->sys) {
        intel_bo_map(cp->bo, true);
        cp->ptr = intel_bo_get_virtual(cp->bo);
    }
}
Exemple #2
0
/**
 * Allocate and map the buffer for writing.
 */
static VkResult cmd_writer_alloc_and_map(struct intel_cmd *cmd,
                                           enum intel_cmd_writer_type which)
{
    struct intel_cmd_writer *writer = &cmd->writers[which];
    struct intel_bo *bo;

    bo = alloc_writer_bo(cmd->dev->winsys, which, writer->size);
    if (bo) {
        intel_bo_unref(writer->bo);
        writer->bo = bo;
    } else if (writer->bo) {
        /* reuse the old bo */
        cmd_writer_discard(cmd, which);
    } else {
        return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    }

    writer->used = 0;
    writer->item_used = 0;

    writer->ptr = intel_bo_map(writer->bo, true);
    if (!writer->ptr) {
        return VK_ERROR_MEMORY_MAP_FAILED;
    }

    return VK_SUCCESS;
}
static float *
i965_spans_accumulate_rectangle (i965_spans_t *spans)
{
    float *vertices;
    uint32_t size;

    size = spans->device->rectangle_size;
    if (unlikely (spans->vbo_offset + size > I965_VERTEX_SIZE)) {
	struct i965_vbo *vbo;

	vbo = malloc (sizeof (struct i965_vbo));
	if (unlikely (vbo == NULL)) {
	    /* throw error! */
	}

	spans->tail->next = vbo;
	spans->tail = vbo;

	vbo->next = NULL;
	vbo->bo = intel_bo_create (&spans->device->intel,
				   I965_VERTEX_SIZE, I965_VERTEX_SIZE,
				   FALSE, I915_TILING_NONE, 0);
	vbo->count = 0;

	spans->vbo_offset = 0;
	spans->vbo_base = intel_bo_map (&spans->device->intel, vbo->bo);
    }

    vertices = spans->vbo_base + spans->vbo_offset;
    spans->vbo_offset += size;
    spans->tail->count += 3;

    return vertices;
}
Exemple #4
0
static struct intel_bo *queue_create_bo(struct intel_queue *queue,
                                        VkDeviceSize size,
                                        const void *cmd,
                                        size_t cmd_len)
{
    struct intel_bo *bo;
    void *ptr;

    bo = intel_winsys_alloc_bo(queue->dev->winsys,
            "queue bo", size, true);
    if (!bo)
        return NULL;

    if (!cmd_len)
        return bo;

    ptr = intel_bo_map(bo, true);
    if (!ptr) {
        intel_bo_unref(bo);
        return NULL;
    }

    memcpy(ptr, cmd, cmd_len);
    intel_bo_unmap(bo);

    return bo;
}
cairo_surface_t *
intel_surface_map_to_image (void *abstract_surface)
{
    intel_surface_t *surface = abstract_surface;

    if (surface->drm.fallback == NULL) {
	cairo_surface_t *image;
	cairo_status_t status;
	void *ptr;

	if (surface->drm.base.backend->flush != NULL) {
	    status = surface->drm.base.backend->flush (surface);
	    if (unlikely (status))
		return _cairo_surface_create_in_error (status);
	}

	ptr = intel_bo_map (to_intel_device (surface->drm.base.device),
			    to_intel_bo (surface->drm.bo));
	if (unlikely (ptr == NULL))
	    return _cairo_surface_create_in_error (CAIRO_STATUS_NO_MEMORY);

	image = cairo_image_surface_create_for_data (ptr,
						     surface->drm.format,
						     surface->drm.width,
						     surface->drm.height,
						     surface->drm.stride);
	if (unlikely (image->status))
	    return image;

	surface->drm.fallback = image;
    }

    return surface->drm.fallback;
}
cairo_status_t
intel_surface_acquire_source_image (void *abstract_surface,
				    cairo_image_surface_t **image_out,
				    void **image_extra)
{
    intel_surface_t *surface = abstract_surface;
    cairo_surface_t *image;
    cairo_status_t status;
    void *ptr;

    if (surface->drm.fallback != NULL) {
	image = surface->drm.fallback;
	goto DONE;
    }

    image = _cairo_surface_has_snapshot (&surface->drm.base,
	                                 &_cairo_image_surface_backend);
    if (image != NULL)
	goto DONE;

    if (surface->drm.base.backend->flush != NULL) {
	status = surface->drm.base.backend->flush (surface);
	if (unlikely (status))
	    return status;
    }

    ptr = intel_bo_map (to_intel_device (surface->drm.base.device),
			to_intel_bo (surface->drm.bo));
    if (unlikely (ptr == NULL))
	return _cairo_error (CAIRO_STATUS_NO_MEMORY);

    image = cairo_image_surface_create_for_data (ptr,
						 surface->drm.format,
						 surface->drm.width,
						 surface->drm.height,
						 surface->drm.stride);
    if (unlikely (image->status))
	return image->status;

    _cairo_surface_attach_snapshot (&surface->drm.base, image, surface_finish_and_destroy);

DONE:
    *image_out = (cairo_image_surface_t *) cairo_surface_reference (image);
    *image_extra = NULL;
    return CAIRO_STATUS_SUCCESS;
}
static void
ilo_builder_writer_decode_items(struct ilo_builder *builder,
                                enum ilo_builder_writer_type which)
{
   struct ilo_builder_writer *writer = &builder->writers[which];
   int i;

   if (!writer->item_used)
      return;

   writer->ptr = intel_bo_map(writer->bo, false);
   if (!writer->ptr)
      return;

   for (i = 0; i < writer->item_used; i++) {
      const struct ilo_builder_item *item = &writer->items[i];

      writer_decode_table[item->type].func(builder, which, item);
   }

   intel_bo_unmap(writer->bo);
}
Exemple #8
0
/**
 * Grow a mapped writer to at least \p new_size.  Failures are handled
 * silently.
 */
void cmd_writer_grow(struct intel_cmd *cmd,
                     enum intel_cmd_writer_type which,
                     size_t new_size)
{
    struct intel_cmd_writer *writer = &cmd->writers[which];
    struct intel_bo *new_bo;
    void *new_ptr;

    if (new_size < writer->size << 1)
        new_size = writer->size << 1;
    /* STATE_BASE_ADDRESS requires page-aligned buffers */
    new_size = u_align(new_size, 4096);

    new_bo = alloc_writer_bo(cmd->dev->winsys, which, new_size);
    if (!new_bo) {
        cmd_writer_discard(cmd, which);
        cmd_fail(cmd, VK_ERROR_OUT_OF_DEVICE_MEMORY);
        return;
    }

    /* map and copy the data over */
    new_ptr = intel_bo_map(new_bo, true);
    if (!new_ptr) {
        intel_bo_unref(new_bo);
        cmd_writer_discard(cmd, which);
        cmd_fail(cmd, VK_ERROR_VALIDATION_FAILED_EXT);
        return;
    }

    memcpy(new_ptr, writer->ptr, writer->used);

    intel_bo_unmap(writer->bo);
    intel_bo_unref(writer->bo);

    writer->size = new_size;
    writer->bo = new_bo;
    writer->ptr = new_ptr;
}
Exemple #9
0
void
intel_winsys_decode_bo(struct intel_winsys *winsys,
                       struct intel_bo *bo, int used)
{
   void *ptr;

   ptr = intel_bo_map(bo, false);
   if (!ptr) {
      debug_printf("failed to map buffer for decoding\n");
      return;
   }

   pipe_mutex_lock(winsys->mutex);

   if (!winsys->decode) {
      winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
      if (!winsys->decode) {
         pipe_mutex_unlock(winsys->mutex);
         intel_bo_unmap(bo);
         return;
      }

      /* debug_printf()/debug_error() uses stderr by default */
      drm_intel_decode_set_output_file(winsys->decode, stderr);
   }

   /* in dwords */
   used /= 4;

   drm_intel_decode_set_batch_pointer(winsys->decode,
         ptr, gem_bo(bo)->offset64, used);

   drm_intel_decode(winsys->decode);

   pipe_mutex_unlock(winsys->mutex);

   intel_bo_unmap(bo);
}
Exemple #10
0
/**
 * Process the bo and accumulate the result.  The bo is emptied.
 */
static void
query_process_bo(const struct ilo_context *ilo, struct ilo_query *q)
{
   const uint64_t *vals;
   uint64_t tmp;
   int i;

   if (!q->used)
      return;

   vals = intel_bo_map(q->bo, false);
   if (!vals) {
      q->used = 0;
      return;
   }

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_OCCLUSION_PREDICATE:
   case PIPE_QUERY_TIME_ELAPSED:
   case PIPE_QUERY_PRIMITIVES_GENERATED:
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      assert(q->stride == sizeof(*vals) * 2);

      tmp = 0;
      for (i = 0; i < q->used; i++)
         tmp += vals[2 * i + 1] - vals[2 * i];

      if (q->type == PIPE_QUERY_TIME_ELAPSED)
         tmp = query_timestamp_to_ns(ilo, tmp);

      q->result.u64 += tmp;
      break;
   case PIPE_QUERY_TIMESTAMP:
      assert(q->stride == sizeof(*vals));

      q->result.u64 = query_timestamp_to_ns(ilo, vals[q->used - 1]);
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      assert(q->stride == sizeof(*vals) * 22);

      for (i = 0; i < q->used; i++) {
         struct pipe_query_data_pipeline_statistics *stats =
            &q->result.pipeline_statistics;
         const uint64_t *begin = vals + 22 * i;
         const uint64_t *end = begin + 11;

         stats->ia_vertices    += end[0] - begin[0];
         stats->ia_primitives  += end[1] - begin[1];
         stats->vs_invocations += end[2] - begin[2];
         stats->gs_invocations += end[3] - begin[3];
         stats->gs_primitives  += end[4] - begin[4];
         stats->c_invocations  += end[5] - begin[5];
         stats->c_primitives   += end[6] - begin[6];
         stats->ps_invocations += end[7] - begin[7];
         stats->hs_invocations += end[8] - begin[8];
         stats->ds_invocations += end[9] - begin[9];
         stats->cs_invocations += end[10] - begin[10];
      }
      break;
   default:
      break;
   }

   intel_bo_unmap(q->bo);

   q->used = 0;
}
Exemple #11
0
static void
draw_vbo_with_sw_restart(struct ilo_context *ilo,
                         const struct pipe_draw_info *info)
{
   const struct ilo_ib_state *ib = &ilo->state_vector.ib;
   const struct ilo_vma *vma;
   union {
      const void *ptr;
      const uint8_t *u8;
      const uint16_t *u16;
      const uint32_t *u32;
   } u;

   /* we will draw with IB mapped */
   if (ib->state.buffer) {
      vma = ilo_resource_get_vma(ib->state.buffer);
      u.ptr = intel_bo_map(vma->bo, false);
      if (u.ptr)
         u.u8 += vma->bo_offset + ib->state.offset;
   } else {
      vma = NULL;
      u.ptr = ib->state.user_buffer;
   }

   if (!u.ptr)
      return;

#define DRAW_VBO_WITH_SW_RESTART(pipe, info, ptr) do {   \
   const unsigned end = (info)->start + (info)->count;   \
   struct pipe_draw_info subinfo;                        \
   unsigned i;                                           \
                                                         \
   subinfo = *(info);                                    \
   subinfo.primitive_restart = false;                    \
   for (i = (info)->start; i < end; i++) {               \
      if ((ptr)[i] == (info)->restart_index) {           \
         subinfo.count = i - subinfo.start;              \
         if (subinfo.count)                              \
            (pipe)->draw_vbo(pipe, &subinfo);            \
         subinfo.start = i + 1;                          \
      }                                                  \
   }                                                     \
   subinfo.count = i - subinfo.start;                    \
   if (subinfo.count)                                    \
      (pipe)->draw_vbo(pipe, &subinfo);                  \
} while (0)

   switch (ib->state.index_size) {
   case 1:
      DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u8);
      break;
   case 2:
      DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u16);
      break;
   case 4:
      DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u32);
      break;
   default:
      assert(!"unsupported index size");
      break;
   }

#undef DRAW_VBO_WITH_SW_RESTART

   if (vma)
      intel_bo_unmap(vma->bo);
}
Exemple #12
0
static cairo_status_t
i965_spans_init (i965_spans_t *spans,
		 i965_surface_t *dst,
		 cairo_operator_t op,
		 const cairo_pattern_t *pattern,
		 cairo_antialias_t antialias,
		 cairo_clip_t *clip,
		 const cairo_composite_rectangles_t *extents)
{
    cairo_status_t status;

    spans->device = i965_device (dst);
    i965_shader_init (&spans->shader, dst, op);

    spans->is_bounded = extents->is_bounded;
    if (extents->is_bounded) {
	if (antialias == CAIRO_ANTIALIAS_NONE)
	    spans->renderer.render_rows = i965_bounded_spans_mono;
	else
	    spans->renderer.render_rows = i965_bounded_spans;

	spans->extents = &extents->bounded;
    } else {
	if (antialias == CAIRO_ANTIALIAS_NONE)
	    spans->renderer.render_rows = i965_unbounded_spans_mono;
	else
	    spans->renderer.render_rows = i965_unbounded_spans;

	spans->extents = &extents->unbounded;
    }
    spans->xmin = spans->extents->x;
    spans->xmax = spans->extents->x + spans->extents->width;

    spans->clip_region = NULL;
    if (clip != NULL) {
	cairo_region_t *clip_region = NULL;

	status = _cairo_clip_get_region (clip, &clip_region);
	assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);

	if (clip_region != NULL && cairo_region_num_rectangles (clip_region) == 1)
	    clip_region = NULL;

	spans->clip_region = clip_region;
	if (status == CAIRO_INT_STATUS_UNSUPPORTED)
	    i965_shader_set_clip (&spans->shader, clip);
    }

    spans->head.next  = NULL;
    spans->head.bo    = NULL;
    spans->head.count = 0;
    spans->tail = &spans->head;

    if (spans->clip_region == NULL) {
	spans->get_rectangle = i965_spans_emit_rectangle;
    } else {
	spans->get_rectangle = i965_spans_accumulate_rectangle;
	spans->head.bo = intel_bo_create (&spans->device->intel,
					  I965_VERTEX_SIZE, I965_VERTEX_SIZE,
					  FALSE, I915_TILING_NONE, 0);
	if (unlikely (spans->head.bo == NULL))
	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);

	spans->vbo_base = intel_bo_map (&spans->device->intel, spans->head.bo);
    }
    spans->vbo_offset = 0;

    return i965_shader_acquire_pattern (&spans->shader,
					&spans->shader.source,
					pattern, &extents->bounded);
}
Exemple #13
0
cairo_int_status_t
i965_surface_glyphs (void			*abstract_surface,
		     cairo_operator_t		 op,
		     const cairo_pattern_t	*source,
		     cairo_glyph_t		*g,
		     int			 num_glyphs,
		     cairo_scaled_font_t	*scaled_font,
		     cairo_clip_t		*clip,
		     int *num_remaining)
{
    i965_surface_t *surface = abstract_surface;
    i965_surface_t *mask = NULL;
    i965_device_t *device;
    i965_glyphs_t glyphs;
    cairo_composite_rectangles_t extents;
    cairo_clip_t local_clip;
    cairo_bool_t have_clip = FALSE;
    cairo_bool_t overlap;
    cairo_region_t *clip_region = NULL;
    intel_bo_t *last_bo = NULL;
    cairo_scaled_glyph_t *glyph_cache[64];
    cairo_status_t status;
    int mask_x = 0, mask_y = 0;
    int i = 0;

    *num_remaining = 0;
    status = _cairo_composite_rectangles_init_for_glyphs (&extents,
							  surface->intel.drm.width,
							  surface->intel.drm.height,
							  op, source,
							  scaled_font,
							  g, num_glyphs,
							  clip,
							  &overlap);
    if (unlikely (status))
	return status;

    if (clip != NULL && _cairo_clip_contains_rectangle (clip, &extents.mask))
	clip = NULL;

    if (clip != NULL && extents.is_bounded) {
	clip = _cairo_clip_init_copy (&local_clip, clip);
	status = _cairo_clip_rectangle (clip, &extents.bounded);
	if (unlikely (status))
	    return status;

	have_clip = TRUE;
    }

    if (overlap || ! extents.is_bounded) {
	cairo_format_t format;

	format = CAIRO_FORMAT_A8;
	if (scaled_font->options.antialias == CAIRO_ANTIALIAS_SUBPIXEL)
	    format = CAIRO_FORMAT_ARGB32;

	mask = (i965_surface_t *)
	    i965_surface_create_internal (&i965_device (surface)->intel.base,
					  format,
					  extents.bounded.width,
					  extents.bounded.height,
					  I965_TILING_DEFAULT,
					  TRUE);
	if (unlikely (mask->intel.drm.base.status))
	    return mask->intel.drm.base.status;

	status = _cairo_surface_paint (&mask->intel.drm.base,
				       CAIRO_OPERATOR_CLEAR,
				       &_cairo_pattern_clear.base,
				       NULL);
	if (unlikely (status)) {
	    cairo_surface_destroy (&mask->intel.drm.base);
	    return status;
	}

	i965_shader_init (&glyphs.shader, mask, CAIRO_OPERATOR_ADD);

	status = i965_shader_acquire_pattern (&glyphs.shader, &glyphs.shader.source,
					      &_cairo_pattern_white.base,
					      &extents.bounded);
	if (unlikely (status)) {
	    cairo_surface_destroy (&mask->intel.drm.base);
	    return status;
	}

	mask_x = -extents.bounded.x;
	mask_y = -extents.bounded.y;
    } else {
	i965_shader_init (&glyphs.shader, surface, op);

	status = i965_shader_acquire_pattern (&glyphs.shader, &glyphs.shader.source,
					      source, &extents.bounded);
	if (unlikely (status))
	    return status;

	if (clip != NULL) {
	    status = _cairo_clip_get_region (clip, &clip_region);
	    assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);

	    if (status == CAIRO_INT_STATUS_UNSUPPORTED)
		i965_shader_set_clip (&glyphs.shader, clip);
	}
    }

    glyphs.head.next = NULL;
    glyphs.head.bo = NULL;
    glyphs.head.count = 0;
    glyphs.tail = &glyphs.head;

    device = i965_device (surface);
    if (mask != NULL || clip_region == NULL) {
	glyphs.get_rectangle = i965_glyphs_emit_rectangle;
    } else {
	glyphs.get_rectangle = i965_glyphs_accumulate_rectangle;
	glyphs.head.bo = intel_bo_create (&device->intel,
					  I965_VERTEX_SIZE, I965_VERTEX_SIZE,
					  FALSE, I915_TILING_NONE, 0);
	if (unlikely (glyphs.head.bo == NULL))
	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);

	glyphs.vbo_base = intel_bo_map (&device->intel, glyphs.head.bo);
    }
    glyphs.vbo_offset = 0;

    status = cairo_device_acquire (&device->intel.base.base);
    if (unlikely (status))
	goto CLEANUP_GLYPHS;

    _cairo_scaled_font_freeze_cache (scaled_font);
    //private = _cairo_scaled_font_get_device (scaled_font, device);
    if (scaled_font->surface_private == NULL) {
	scaled_font->surface_private = device;
	scaled_font->surface_backend = surface->intel.drm.base.backend;
	cairo_list_add (&scaled_font->link, &device->intel.fonts);
    }

    memset (glyph_cache, 0, sizeof (glyph_cache));

    for (i = 0; i < num_glyphs; i++) {
	cairo_scaled_glyph_t *scaled_glyph;
	int x, y, x1, x2, y1, y2;
	int cache_index = g[i].index % ARRAY_LENGTH (glyph_cache);
	intel_glyph_t *glyph;

	scaled_glyph = glyph_cache[cache_index];
	if (scaled_glyph == NULL ||
	    _cairo_scaled_glyph_index (scaled_glyph) != g[i].index)
	{
	    status = _cairo_scaled_glyph_lookup (scaled_font,
						 g[i].index,
						 CAIRO_SCALED_GLYPH_INFO_METRICS,
						 &scaled_glyph);
	    if (unlikely (status))
		goto FINISH;

	    glyph_cache[cache_index] = scaled_glyph;
	}

	if (unlikely (scaled_glyph->metrics.width  == 0 ||
		      scaled_glyph->metrics.height == 0))
	{
	    continue;
	}

	/* XXX glyph images are snapped to pixel locations */
	x = _cairo_lround (g[i].x);
	y = _cairo_lround (g[i].y);

	x1 = x + _cairo_fixed_integer_floor (scaled_glyph->bbox.p1.x);
	y1 = y + _cairo_fixed_integer_floor (scaled_glyph->bbox.p1.y);
	x2 = x + _cairo_fixed_integer_ceil (scaled_glyph->bbox.p2.x);
	y2 = y + _cairo_fixed_integer_ceil (scaled_glyph->bbox.p2.y);

	if (x2 < extents.bounded.x ||
	    y2 < extents.bounded.y ||
	    x1 > extents.bounded.x + extents.bounded.width ||
	    y1 > extents.bounded.y + extents.bounded.height)
	{
	    continue;
	}

	if (scaled_glyph->surface_private == NULL) {
	    status = intel_get_glyph (&device->intel, scaled_font, scaled_glyph);
	    if (unlikely (status == CAIRO_INT_STATUS_NOTHING_TO_DO)) {
		status = CAIRO_STATUS_SUCCESS;
		continue;
	    }
	    if (unlikely (status))
		goto FINISH;
	}
	glyph = intel_glyph_pin (scaled_glyph->surface_private);

	if (glyph->cache->buffer.bo != last_bo) {
	    intel_buffer_cache_t *cache = glyph->cache;

	    glyphs.shader.mask.type.vertex   = VS_GLYPHS;
	    glyphs.shader.mask.type.fragment = FS_GLYPHS;
	    glyphs.shader.mask.type.pattern  = PATTERN_BASE;

	    glyphs.shader.mask.base.bo = cache->buffer.bo;
	    glyphs.shader.mask.base.format = cache->buffer.format;
	    glyphs.shader.mask.base.width  = cache->buffer.width;
	    glyphs.shader.mask.base.height = cache->buffer.height;
	    glyphs.shader.mask.base.stride = cache->buffer.stride;
	    glyphs.shader.mask.base.filter = i965_filter (CAIRO_FILTER_NEAREST);
	    glyphs.shader.mask.base.extend = i965_extend (CAIRO_EXTEND_NONE);
	    glyphs.shader.mask.base.content = CAIRO_CONTENT_ALPHA; /* XXX */

	    glyphs.shader.committed = FALSE;
	    status = i965_shader_commit (&glyphs.shader, device);
	    if (unlikely (status))
		goto FINISH;

	    last_bo = cache->buffer.bo;
	}

	x2 = x1 + glyph->width;
	y2 = y1 + glyph->height;

	if (mask_x)
	    x1 += mask_x, x2 += mask_x;
	if (mask_y)
	    y1 += mask_y, y2 += mask_y;

	i965_add_glyph_rectangle (&glyphs, x1, y1, x2, y2, glyph);
    }

    if (mask != NULL && clip_region != NULL)
	i965_clipped_vertices (device, &glyphs.head, clip_region);

    status = CAIRO_STATUS_SUCCESS;
  FINISH:
    _cairo_scaled_font_thaw_cache (scaled_font);
    cairo_device_release (surface->intel.drm.base.device);
  CLEANUP_GLYPHS:
    i965_shader_fini (&glyphs.shader);

    if (glyphs.head.bo != NULL) {
	struct i965_vbo *vbo, *next;

	intel_bo_destroy (&device->intel, glyphs.head.bo);
	for (vbo = glyphs.head.next; vbo != NULL; vbo = next) {
	    next = vbo->next;
	    intel_bo_destroy (&device->intel, vbo->bo);
	    free (vbo);
	}
    }

    if (unlikely (status == CAIRO_INT_STATUS_UNSUPPORTED)) {
	cairo_path_fixed_t path;

	_cairo_path_fixed_init (&path);
	status = _cairo_scaled_font_glyph_path (scaled_font,
						g + i, num_glyphs - i,
						&path);
	if (mask_x | mask_y) {
	    _cairo_path_fixed_translate (&path,
					 _cairo_fixed_from_int (mask_x),
					 _cairo_fixed_from_int (mask_y));
	}
	if (likely (status == CAIRO_STATUS_SUCCESS)) {
	    status = surface->intel.drm.base.backend->fill (glyphs.shader.target,
							    glyphs.shader.op,
							    mask != NULL ? &_cairo_pattern_white.base : source,
							    &path,
							    CAIRO_FILL_RULE_WINDING,
							    0,
							    scaled_font->options.antialias,
							    clip);
	}
	_cairo_path_fixed_fini (&path);
    }

    if (mask != NULL) {
	if (likely (status == CAIRO_STATUS_SUCCESS)) {
	    status = i965_surface_mask_internal (surface, op, source, mask,
					         clip, &extents);
	}
	cairo_surface_finish (&mask->intel.drm.base);
	cairo_surface_destroy (&mask->intel.drm.base);
    }

    if (have_clip)
	_cairo_clip_fini (&local_clip);

    return status;
}