示例#1
0
static int compute_batches(struct xdf* xdf, int assign)
{
	struct data_batch curr, *currb;
	unsigned int nbatch = 1, iarr, foff, dlen;
	const struct xdfch* ch;

	currb = assign ? xdf->batch : &curr;
	reset_batch(currb, 0, 0);

	for (iarr=0; iarr < xdf->narrays; iarr++) {
		foff = 0;
		
		// Scan channels in order to find different batches
		for (ch=xdf->channels; ch; ch=ch->next) {
			if (ch->iarray < 0)
				continue;
			dlen = xdf_get_datasize(ch->inmemtype);

			// Consistency checks
			if ((unsigned int)ch->iarray > xdf->narrays
			    || ch->offset + dlen > xdf->array_stride[ch->iarray])
				return -1;

			// Linearize the processing of channel sourcing
			// the same input array
			if ((iarr == (unsigned int)ch->iarray)
			   && !add_to_batch(currb, ch, foff)) {
				nbatch++;
				if (assign)
					currb++;
				reset_batch(currb, iarr, foff);
				add_to_batch(currb, ch, foff);
			}
			foff += dlen;
		}
	}
	if (assign)
		link_batches(xdf, nbatch);

	return nbatch;
}
Bool intelInitBatchBuffer(void)
{
	if ((xvmc_driver->batch.buf =
	     drm_intel_bo_alloc(xvmc_driver->bufmgr,
				"batch buffer", BATCH_SIZE, 0x1000)) == NULL) {
		fprintf(stderr, "unable to alloc batch buffer\n");
		return False;
	}

	if (drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf)) {
		drm_intel_bo_unreference(xvmc_driver->batch.buf);
		return False;
	}

	reset_batch();
	return True;
}
void intelFlushBatch(void)
{
	dri_bo *bo;

	i965_end_batch();

	drm_intel_bo_exec(xvmc_driver->batch.buf,
			  xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr,
			  0, 0, 0);

	bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
				"batch buffer", BATCH_SIZE, 0x1000);
	if (bo != NULL && drm_intel_gem_bo_map_gtt(bo) == 0) {
		drm_intel_bo_unreference(xvmc_driver->batch.buf);
		xvmc_driver->batch.buf = bo;
	} else {
		if (bo != NULL)
			drm_intel_bo_unreference(bo);
		drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
	}

	reset_batch();
}