static gboolean gst_imx_vpu_base_enc_alloc_enc_mem_blocks(GstImxVpuBaseEnc *vpu_base_enc) { int i; int size; unsigned char *ptr; GST_INFO_OBJECT(vpu_base_enc, "need to allocate %d sub blocks for decoding", vpu_base_enc->mem_info.nSubBlockNum); for (i = 0; i < vpu_base_enc->mem_info.nSubBlockNum; ++i) { size = vpu_base_enc->mem_info.MemSubBlock[i].nAlignment + vpu_base_enc->mem_info.MemSubBlock[i].nSize; GST_INFO_OBJECT(vpu_base_enc, "sub block %d type: %s size: %d", i, (vpu_base_enc->mem_info.MemSubBlock[i].MemType == VPU_MEM_VIRT) ? "virtual" : "phys", size); if (vpu_base_enc->mem_info.MemSubBlock[i].MemType == VPU_MEM_VIRT) { if (!gst_imx_vpu_alloc_virt_mem_block(&ptr, size)) return FALSE; vpu_base_enc->mem_info.MemSubBlock[i].pVirtAddr = (unsigned char *)ALIGN_VAL_TO(ptr, vpu_base_enc->mem_info.MemSubBlock[i].nAlignment); gst_imx_vpu_append_virt_mem_block(ptr, &(vpu_base_enc->virt_enc_mem_blocks)); } else if (vpu_base_enc->mem_info.MemSubBlock[i].MemType == VPU_MEM_PHY) { GstImxPhysMemory *memory = (GstImxPhysMemory *)gst_allocator_alloc(gst_imx_vpu_enc_allocator_obtain(), size, NULL); if (memory == NULL) return FALSE; /* it is OK to use mapped_virt_addr directly without explicit mapping here, * since the VPU encoder allocation functions define a virtual address upon * allocation, so an actual "mapping" does not exist (map just returns * mapped_virt_addr, unmap does nothing) */ vpu_base_enc->mem_info.MemSubBlock[i].pVirtAddr = (unsigned char *)ALIGN_VAL_TO((unsigned char*)(memory->mapped_virt_addr), vpu_base_enc->mem_info.MemSubBlock[i].nAlignment); vpu_base_enc->mem_info.MemSubBlock[i].pPhyAddr = (unsigned char *)ALIGN_VAL_TO((unsigned char*)(memory->phys_addr), vpu_base_enc->mem_info.MemSubBlock[i].nAlignment); gst_imx_vpu_append_phys_mem_block(memory, &(vpu_base_enc->phys_enc_mem_blocks)); } else { GST_WARNING_OBJECT(vpu_base_enc, "sub block %d type is unknown - skipping", i); } } return TRUE; }
static gboolean gst_fsl_vpu_framebuffers_configure(GstFslVpuFramebuffers *framebuffers, GstFslVpuFramebufferParams *params, GstAllocator *allocator) { int alignment; unsigned char *phys_ptr, *virt_ptr; guint i; g_assert(GST_IS_FSL_PHYS_MEM_ALLOCATOR(allocator)); framebuffers->num_reserve_framebuffers = params->min_framebuffer_count; framebuffers->num_framebuffers = MAX((guint)(params->min_framebuffer_count), (guint)10) + framebuffers->num_reserve_framebuffers; framebuffers->num_available_framebuffers = framebuffers->num_framebuffers - framebuffers->num_reserve_framebuffers; framebuffers->framebuffers = (VpuFrameBuffer *)g_slice_alloc(sizeof(VpuFrameBuffer) * framebuffers->num_framebuffers); framebuffers->allocator = allocator; framebuffers->y_stride = ALIGN_VAL_TO(params->pic_width, FRAME_ALIGN); if (params->interlace) framebuffers->y_size = framebuffers->y_stride * ALIGN_VAL_TO(params->pic_height, (2 * FRAME_ALIGN)); else framebuffers->y_size = framebuffers->y_stride * ALIGN_VAL_TO(params->pic_height, FRAME_ALIGN); switch (params->mjpeg_source_format) { case 0: /* I420 (4:2:0) */ framebuffers->uv_stride = framebuffers->y_stride / 2; framebuffers->u_size = framebuffers->v_size = framebuffers->mv_size = framebuffers->y_size / 4; break; case 1: /* Y42B (4:2:2 horizontal) */ framebuffers->uv_stride = framebuffers->y_stride / 2; framebuffers->u_size = framebuffers->v_size = framebuffers->mv_size = framebuffers->y_size / 2; break; case 3: /* Y444 (4:4:4) */ framebuffers->uv_stride = framebuffers->y_stride; framebuffers->u_size = framebuffers->v_size = framebuffers->mv_size = framebuffers->y_size; break; default: g_assert_not_reached(); } alignment = params->address_alignment; if (alignment > 1) { framebuffers->y_size = ALIGN_VAL_TO(framebuffers->y_size, alignment); framebuffers->u_size = ALIGN_VAL_TO(framebuffers->u_size, alignment); framebuffers->v_size = ALIGN_VAL_TO(framebuffers->v_size, alignment); framebuffers->mv_size = ALIGN_VAL_TO(framebuffers->mv_size, alignment); } framebuffers->pic_width = params->pic_width; framebuffers->pic_height = params->pic_height; framebuffers->total_size = framebuffers->y_size + framebuffers->u_size + framebuffers->v_size + framebuffers->mv_size + alignment; GST_DEBUG_OBJECT(framebuffers, "num framebuffers: total: %u reserved: %u available: %d", framebuffers->num_framebuffers, framebuffers->num_reserve_framebuffers, framebuffers->num_available_framebuffers); GST_DEBUG_OBJECT(framebuffers, "framebuffer memory block size: total: %d Y: %d U: %d V: %d Mv: %d alignment: %d", framebuffers->total_size, framebuffers->y_size, framebuffers->u_size, framebuffers->v_size, framebuffers->mv_size, alignment); for (i = 0; i < framebuffers->num_framebuffers; ++i) { GstFslPhysMemory *memory; VpuFrameBuffer *framebuffer; framebuffer = &(framebuffers->framebuffers[i]); memory = (GstFslPhysMemory *)gst_allocator_alloc(allocator, framebuffers->total_size, NULL); if (memory == NULL) return FALSE; gst_fsl_vpu_append_phys_mem_block(memory, &(framebuffers->fb_mem_blocks)); phys_ptr = (unsigned char*)(memory->phys_addr); virt_ptr = (unsigned char*)(memory->mapped_virt_addr); /* TODO */ if (alignment > 1) { phys_ptr = (unsigned char*)ALIGN_VAL_TO(phys_ptr, alignment); virt_ptr = (unsigned char*)ALIGN_VAL_TO(virt_ptr, alignment); } framebuffer->nStrideY = framebuffers->y_stride; framebuffer->nStrideC = framebuffers->uv_stride; /* fill phy addr*/ framebuffer->pbufY = phys_ptr; framebuffer->pbufCb = phys_ptr + framebuffers->y_size; framebuffer->pbufCr = phys_ptr + framebuffers->y_size + framebuffers->u_size; framebuffer->pbufMvCol = phys_ptr + framebuffers->y_size + framebuffers->u_size + framebuffers->v_size; /* fill virt addr */ framebuffer->pbufVirtY = virt_ptr; framebuffer->pbufVirtCb = virt_ptr + framebuffers->y_size; framebuffer->pbufVirtCr = virt_ptr + framebuffers->y_size + framebuffers->u_size; framebuffer->pbufVirtMvCol = virt_ptr + framebuffers->y_size + framebuffers->u_size + framebuffers->v_size; framebuffer->pbufY_tilebot = 0; framebuffer->pbufCb_tilebot = 0; framebuffer->pbufVirtY_tilebot = 0; framebuffer->pbufVirtCb_tilebot = 0; } return TRUE; }