Ejemplo n.º 1
0
int DMAC_setup_buffers(int w, int h, int do_tl_conv)
{
	int pitch;
	struct tddmac_buffer ysrc, ydst;
	struct tddmac_buffer csrc, cdst;

	DMAC_data.w = ALIGN_STRIDE(w);
	DMAC_data.h = h;
	DMAC_data.do_tl_conv = do_tl_conv;

	if (do_tl_conv) {
		pitch = (w - 1);
		pitch |= pitch >> 1;
		pitch |= pitch >> 2;
		pitch |= pitch >> 4;
		pitch ++;

		if (pitch < 256)
			pitch = 256;

		close_meram(&DMAC_data.meram_data); //nop if not already opened
		open_meram(&DMAC_data.meram_data);

		setup_icb(&DMAC_data.meram_data,
			&DMAC_data.meram_data.decY_icb,
			pitch, VALIGN(h), 128, 0xD, 0, DMAC_YICB);
		setup_icb(&DMAC_data.meram_data,
			&DMAC_data.meram_data.decC_icb,
			pitch, VALIGN(h) / 2, 64, 0xC, 0, DMAC_CICB);
		DMAC_data.pitch = pitch;
		if (pitch < 1024)
			pitch = 1024;
	} else {
Ejemplo n.º 2
0
long
mcvdec_uf_get_frame_memory(MCVDEC_CONTEXT_T *context,
			   long xpic_size,
			   long ypic_size,
			   long required_fmem_cnt,
			   long nsampling,
			   long *fmem_cnt,
			   long fmem_x_size[],
			   long fmem_size[],
			   MCVDEC_FMEM_INFO_T *fmem[])
{
	MCVDEC_FMEM_INFO_T *_fmem;
	size_t fmemsize;
	long fmem_x;
	int i, ret;
	void *ypic_vaddr;
	unsigned long ypic_paddr, cpic_paddr;
	unsigned long align, alloc_size;
        shvpu_decode_PrivateType *shvpu_decode_Private =
                (shvpu_decode_PrivateType *)context->user_info;

	logd("%s(%d, %d, %d, %d) invoked.\n",
	       __FUNCTION__, xpic_size, ypic_size,
	       required_fmem_cnt, nsampling);

	if (shvpu_decode_Private->features.tl_conv_mode == OMX_FALSE) {
		fmem_x = ROUND_2POW(xpic_size, 32);
		align = 32;
		fmemsize = fmem_x * (ROUND_2POW(ypic_size, 16));
		alloc_size = fmemsize * 3 / 2;
	} else {
		unsigned long pitch;
		int next_power = 0;
		int align_bits;

		pitch = xpic_size;
		for (i = 0; i < 32; i++) {
			if (pitch <= 1)
				break;
			if (pitch & 1)
				next_power = 1;
			pitch >>=1;
		}
		pitch = (1 << (i + next_power));
		if (!shvpu_decode_Private->ipmmui_data) {
			shvpu_decode_Private->ipmmui_data = init_ipmmu(
				shvpu_decode_Private->uio_start_phys, pitch,
				shvpu_decode_Private->features.tl_conv_tbm,
				shvpu_decode_Private->features.tl_conv_vbm);
		}
		align_bits = i + next_power +
			shvpu_decode_Private->features.tl_conv_tbm;

		fmem_x = pitch;
		align = (1 << align_bits);
		fmemsize = fmem_x * (ROUND_2POW(ypic_size, 16));
		alloc_size = ((fmemsize * 3 / 2) + (align - 1)) & ~(align - 1);
		alloc_size += align;
	}
#ifdef MERAM_ENABLE
	open_meram(&shvpu_decode_Private->meram_data);
	setup_icb(&shvpu_decode_Private->meram_data,
		&shvpu_decode_Private->meram_data.decY_icb,
		fmem_x, ROUND_2POW(ypic_size, 16), 128, 0xD, 1, 21);
	setup_icb(&shvpu_decode_Private->meram_data,
		&shvpu_decode_Private->meram_data.decC_icb,
		fmem_x, ROUND_2POW(ypic_size, 16) / 2, 64, 0xC, 1, 22);
#endif

	/*
	   if the SYNC mode, the required_fmem_cnt value may not
	   be enough because of few (buffered) stream information.
	   A simple heuristic solution is one extra buffer chunk
	   prepared.
	*/
	if (shvpu_decode_Private->avCodec->codecMode == MCVDEC_MODE_SYNC)
		required_fmem_cnt += 1;

	/*
	   If OMX output buffers have been allocated by AllocateBuffers(),
	   the fmem buffers must be shared with down-stream modules, such
	   as renderer. So more number of fmem buffers may be necessary
	   for keeping efficient flow of buffers.
	 */
	if (!shvpu_decode_Private->features.use_buffer_mode) {
		required_fmem_cnt += shvpu_decode_Private->
			ports[OMX_BASE_FILTER_OUTPUTPORT_INDEX]->
			sPortParam.nBufferCountActual;
		if (required_fmem_cnt > MCVDEC_MAX_FMEM_CNT)
			required_fmem_cnt = MCVDEC_MAX_FMEM_CNT;
	}

	shvpu_decode_Private->avCodec->fmem = (shvpu_fmem_data *)
		calloc (required_fmem_cnt, sizeof(shvpu_fmem_data));

	if (shvpu_decode_Private->avCodec->fmem == NULL)
		return MCVDEC_FMEM_SKIP_BY_USER;

	_fmem = *fmem = (MCVDEC_FMEM_INFO_T *)
		calloc(required_fmem_cnt, sizeof(MCVDEC_FMEM_INFO_T));

	if (*fmem == NULL) {
		free(shvpu_decode_Private->avCodec->fmem);
		shvpu_decode_Private->avCodec->fmem = NULL;
		return MCVDEC_FMEM_SKIP_BY_USER;
	}

	shvpu_decode_Private->avCodec->fmem_info = _fmem;

	shvpu_decode_Private->avCodec->fmem_size = required_fmem_cnt;

	*fmem_cnt = 0;

	for (i=0; i<required_fmem_cnt; i++) {
		ypic_vaddr = pmem_alloc(alloc_size, align, &ypic_paddr);
		if (ypic_vaddr == NULL)
			break;
		shvpu_decode_Private->avCodec->fmem[i].fmem_start = ypic_paddr;
		shvpu_decode_Private->avCodec->fmem[i].fmem_len = alloc_size;
		pthread_mutex_init(&shvpu_decode_Private->
				   avCodec->fmem[i].filled, NULL);
		if (shvpu_decode_Private->features.tl_conv_mode == OMX_TRUE) {
			/*alignment offset*/
			ypic_paddr = (ypic_paddr + (align - 1)) & ~(align - 1);
#ifndef VPU_INTERNAL_TL
			/*access via IPMMUI*/
			ypic_paddr = phys_to_ipmmui(
				shvpu_decode_Private->ipmmui_data,
				ypic_paddr);
#endif
		}
		cpic_paddr = ypic_paddr + fmemsize;
		_fmem[i].Ypic_addr = ypic_paddr;
		logd("fmem[%d].Ypic_addr = %lx\n", i, _fmem[i].Ypic_addr);
		_fmem[i].Ypic_bot_addr = ypic_paddr + fmemsize / 2;
		logd("fmem[%d].Ypic_bot_addr = %lx\n",
		       i, _fmem[i].Ypic_bot_addr);
		_fmem[i].Cpic_addr = cpic_paddr;
		logd("fmem[%d].Cpic_addr = %lx\n", i, _fmem[i].Cpic_addr);
		_fmem[i].Cpic_bot_addr = cpic_paddr + fmemsize / 4;
		logd("fmem[%d].Cpic_bot_addr = %lx\n", i,
		       _fmem[i].Cpic_bot_addr);
		*fmem_cnt = i + 1;
	}

	fmem_x_size[MCVDEC_FMX_DEC] =
		fmem_x_size[MCVDEC_FMX_REF] =
		fmem_x_size[MCVDEC_FMX_FLT] = fmem_x;
	fmem_size[MCVDEC_FMX_DEC] =
		fmem_size[MCVDEC_FMX_REF] =
		fmem_size[MCVDEC_FMX_FLT] = fmemsize;

	if (*fmem_cnt == required_fmem_cnt)
		return MCVDEC_NML_END;

	/* cleanup on failure */

	for (i=0; i < *fmem_cnt; i++) {
		shvpu_fmem_data *outbuf = &shvpu_decode_Private->avCodec->fmem[i];
		phys_pmem_free(outbuf->fmem_start, outbuf->fmem_len);
		pthread_mutex_destroy(&outbuf->filled);
	}
	free(shvpu_decode_Private->avCodec->fmem);
	shvpu_decode_Private->avCodec->fmem = NULL;
	free(*fmem);
	*fmem = NULL;

	return MCVDEC_FMEM_SKIP_BY_USER;
}