void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc) { GF_AVCConfig *avcc, *svcc; if (avc->emul_esd) gf_odf_desc_del((GF_Descriptor *)avc->emul_esd); avc->emul_esd = gf_odf_desc_esd_new(2); avc->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; /*AVC OTI is 0x21, AVC parameter set stream OTI (not supported in gpac) is 0x22*/ avc->emul_esd->decoderConfig->objectTypeIndication = GPAC_OTI_VIDEO_AVC; if (avc->bitrate) { avc->emul_esd->decoderConfig->bufferSizeDB = avc->bitrate->bufferSizeDB; avc->emul_esd->decoderConfig->avgBitrate = avc->bitrate->avgBitrate; avc->emul_esd->decoderConfig->maxBitrate = avc->bitrate->maxBitrate; } if (avc->descr) { u32 i=0; GF_Descriptor *desc,*clone; i=0; while ((desc = (GF_Descriptor *)gf_list_enum(avc->descr->descriptors, &i))) { clone = NULL; gf_odf_desc_copy(desc, &clone); if (gf_odf_desc_add_desc((GF_Descriptor *)avc->emul_esd, clone) != GF_OK) gf_odf_desc_del(clone); } } if (avc->avc_config) { avcc = avc->avc_config->config ? AVC_DuplicateConfig(avc->avc_config->config) : NULL; /*merge SVC config*/ if (avc->svc_config) { svcc = AVC_DuplicateConfig(avc->svc_config->config); while (gf_list_count(svcc->sequenceParameterSets)) { GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(svcc->sequenceParameterSets, 0); gf_list_rem(svcc->sequenceParameterSets, 0); gf_list_add(avcc->sequenceParameterSets, p); } while (gf_list_count(svcc->pictureParameterSets)) { GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(svcc->pictureParameterSets, 0); gf_list_rem(svcc->pictureParameterSets, 0); gf_list_add(avcc->pictureParameterSets, p); } gf_odf_avc_cfg_del(svcc); } if (avcc) { gf_odf_avc_cfg_write(avcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(avcc); } } else if (avc->svc_config) { svcc = AVC_DuplicateConfig(avc->svc_config->config); gf_odf_avc_cfg_write(svcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(svcc); } }
void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc) { if (avc->emul_esd) gf_odf_desc_del((GF_Descriptor *)avc->emul_esd); avc->emul_esd = gf_odf_desc_esd_new(2); avc->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; /*AVC OTI is 0x21, AVC parameter set stream OTI (not supported in gpac) is 0x22*/ avc->emul_esd->decoderConfig->objectTypeIndication = 0x21; if (avc->bitrate) { avc->emul_esd->decoderConfig->bufferSizeDB = avc->bitrate->bufferSizeDB; avc->emul_esd->decoderConfig->avgBitrate = avc->bitrate->avgBitrate; avc->emul_esd->decoderConfig->maxBitrate = avc->bitrate->maxBitrate; } if (avc->descr) { u32 i=0; GF_Descriptor *desc,*clone; i=0; while ((desc = (GF_Descriptor *)gf_list_enum(avc->descr->descriptors, &i))) { clone = NULL; gf_odf_desc_copy(desc, &clone); if (gf_odf_desc_add_desc((GF_Descriptor *)avc->emul_esd, clone) != GF_OK) gf_odf_desc_del(clone); } } if (avc->avc_config && avc->avc_config->config) { gf_odf_avc_cfg_write(avc->avc_config->config, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); } }
void isor_declare_objects(ISOMReader *read) { GF_ObjectDescriptor *od; GF_ESD *esd; const char *tag; u32 i, count, ocr_es_id, tlen, base_track, j, track_id; Bool highest_stream; char *opt; Bool add_ps_lower = GF_TRUE; ocr_es_id = 0; opt = (char*) gf_modules_get_option((GF_BaseInterface *)read->input, "ISOReader", "DeclareScalableXPS"); if (!opt) { gf_modules_set_option((GF_BaseInterface *)read->input, "ISOReader", "DeclareScalableXPS", "yes"); } else if (!strcmp(opt, "no")) { add_ps_lower = GF_FALSE; } /*TODO check for alternate tracks*/ count = gf_isom_get_track_count(read->mov); for (i=0; i<count; i++) { if (!gf_isom_is_track_enabled(read->mov, i+1)) continue; switch (gf_isom_get_media_type(read->mov, i+1)) { case GF_ISOM_MEDIA_AUDIO: case GF_ISOM_MEDIA_VISUAL: case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: case GF_ISOM_MEDIA_SCENE: case GF_ISOM_MEDIA_SUBPIC: break; default: continue; } /*we declare only the highest video track (i.e the track we play)*/ highest_stream = GF_TRUE; track_id = gf_isom_get_track_id(read->mov, i+1); for (j = 0; j < count; j++) { if (gf_isom_has_track_reference(read->mov, j+1, GF_ISOM_REF_SCAL, track_id) > 0) { highest_stream = GF_FALSE; break; } } if ((gf_isom_get_media_type(read->mov, i+1) == GF_ISOM_MEDIA_VISUAL) && !highest_stream) continue; esd = gf_media_map_esd(read->mov, i+1); if (esd) { gf_isom_get_reference(read->mov, i+1, GF_ISOM_REF_BASE, 1, &base_track); esd->has_ref_base = base_track ? GF_TRUE : GF_FALSE; /*FIXME: if we declare only SPS/PPS of the highest layer, we have a problem in decoding even though we have all SPS/PPS inband (OpenSVC bug ?)*/ /*so we add by default the SPS/PPS of the lower layers to this esd*/ if (esd->has_ref_base && add_ps_lower) { u32 count, refIndex, ref_track, num_sps, num_pps, t; GF_AVCConfig *cfg = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength); GF_AVCConfig *avccfg, *svccfg; count = gf_isom_get_reference_count(read->mov, i+1, GF_ISOM_REF_SCAL); for (refIndex = count; refIndex != 0; refIndex--) { gf_isom_get_reference(read->mov, i+1, GF_ISOM_REF_SCAL, refIndex, &ref_track); avccfg = gf_isom_avc_config_get(read->mov, ref_track, 1); svccfg = gf_isom_svc_config_get(read->mov, ref_track, 1); if (avccfg) { num_sps = gf_list_count(avccfg->sequenceParameterSets); for (t = 0; t < num_sps; t++) { GF_AVCConfigSlot *slc = gf_list_get(avccfg->sequenceParameterSets, t); GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); sl->id = slc->id; sl->size = slc->size; sl->data = (char*)gf_malloc(sizeof(char)*sl->size); memcpy(sl->data, slc->data, sizeof(char)*sl->size); gf_list_insert(cfg->sequenceParameterSets, sl, 0); } num_pps = gf_list_count(avccfg->pictureParameterSets); for (t = 0; t < num_sps; t++) { GF_AVCConfigSlot *slc = gf_list_get(avccfg->pictureParameterSets, t); GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); sl->id = slc->id; sl->size = slc->size; sl->data = (char*)gf_malloc(sizeof(char)*sl->size); memcpy(sl->data, slc->data, sizeof(char)*sl->size); gf_list_insert(cfg->pictureParameterSets, sl, 0); } gf_odf_avc_cfg_del(avccfg); } if (svccfg) { num_sps = gf_list_count(svccfg->sequenceParameterSets); for (t = 0; t < num_sps; t++) { GF_AVCConfigSlot *slc = gf_list_get(svccfg->sequenceParameterSets, t); GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); sl->id = slc->id; sl->size = slc->size; sl->data = (char*)gf_malloc(sizeof(char)*sl->size); memcpy(sl->data, slc->data, sizeof(char)*sl->size); gf_list_insert(cfg->sequenceParameterSets, sl, 0); } num_pps = gf_list_count(svccfg->pictureParameterSets); for (t = 0; t < num_pps; t++) { GF_AVCConfigSlot *slc = gf_list_get(svccfg->pictureParameterSets, t); GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); sl->id = slc->id; sl->size = slc->size; sl->data = (char*)gf_malloc(sizeof(char)*sl->size); memcpy(sl->data, slc->data, sizeof(char)*sl->size); gf_list_insert(cfg->pictureParameterSets, sl, 0); } gf_odf_avc_cfg_del(svccfg); } } if (esd->decoderConfig->decoderSpecificInfo->data) gf_free(esd->decoderConfig->decoderSpecificInfo->data); gf_odf_avc_cfg_write(cfg, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(cfg); } od = (GF_ObjectDescriptor *) gf_odf_desc_new(GF_ODF_OD_TAG); od->service_ifce = read->input; od->objectDescriptorID = 0; if (!ocr_es_id) ocr_es_id = esd->ESID; esd->OCRESID = ocr_es_id; gf_list_add(od->ESDescriptors, esd); if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) { send_proxy_command(read, GF_FALSE, GF_TRUE, GF_OK, (GF_Descriptor*)od, NULL); } else { gf_term_add_media(read->service, (GF_Descriptor*)od, GF_TRUE); } } } /*if cover art, extract it in cache*/ if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_COVER_ART, &tag, &tlen)==GF_OK) { const char *cdir = gf_modules_get_option((GF_BaseInterface *)gf_term_get_service_interface(read->service), "General", "CacheDirectory"); if (cdir) { char szName[GF_MAX_PATH]; const char *sep; FILE *t; sep = strrchr(gf_isom_get_filename(read->mov), '\\'); if (!sep) sep = strrchr(gf_isom_get_filename(read->mov), '/'); if (!sep) sep = gf_isom_get_filename(read->mov); if ((cdir[strlen(cdir)-1] != '\\') && (cdir[strlen(cdir)-1] != '/')) { sprintf(szName, "%s/%s_cover.%s", cdir, sep, (tlen & 0x80000000) ? "png" : "jpg"); } else { sprintf(szName, "%s%s_cover.%s", cdir, sep, (tlen & 0x80000000) ? "png" : "jpg"); } t = gf_f64_open(szName, "wb"); if (t) { Bool isom_contains_video = GF_FALSE; /*write cover data*/ assert(!(tlen & 0x80000000)); gf_fwrite(tag, tlen & 0x7FFFFFFF, 1, t); fclose(t); /*don't display cover art when video is present*/ for (i=0; i<gf_isom_get_track_count(read->mov); i++) { if (!gf_isom_is_track_enabled(read->mov, i+1)) continue; if (gf_isom_get_media_type(read->mov, i+1) == GF_ISOM_MEDIA_VISUAL) { isom_contains_video = GF_TRUE; break; } } if (!isom_contains_video) { od = (GF_ObjectDescriptor *) gf_odf_desc_new(GF_ODF_OD_TAG); od->service_ifce = read->input; od->objectDescriptorID = GF_MEDIA_EXTERNAL_ID; od->URLString = gf_strdup(szName); if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) { send_proxy_command(read, GF_FALSE, GF_TRUE, GF_OK, (GF_Descriptor*)od, NULL); } else { gf_term_add_media(read->service, (GF_Descriptor*)od, GF_TRUE); } } } } } if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) { send_proxy_command(read, GF_FALSE, GF_TRUE, GF_OK, NULL, NULL); } else { gf_term_add_media(read->service, NULL, GF_FALSE); } }
static GF_Err VTBDec_InitDecoder(VTBDec *ctx, Bool force_dsi_rewrite) { CFMutableDictionaryRef dec_dsi, dec_type; CFMutableDictionaryRef dsi; VTDecompressionOutputCallbackRecord cbacks; CFDictionaryRef buffer_attribs; OSStatus status; OSType kColorSpace; CFDataRef data = NULL; char *dsi_data=NULL; u32 dsi_data_size=0; dec_dsi = CFDictionaryCreateMutable(kCFAllocatorDefault, 1, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); kColorSpace = kCVPixelFormatType_420YpCbCr8Planar; ctx->pix_fmt = GF_PIXEL_YV12; switch (ctx->esd->decoderConfig->objectTypeIndication) { case GPAC_OTI_VIDEO_AVC : if (ctx->sps && ctx->pps) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_media_avc_read_sps(ctx->sps, ctx->sps_size, &avc, 0, NULL); ctx->vtb_type = kCMVideoCodecType_H264; assert(ctx->sps); ctx->width = avc.sps[idx].width; ctx->height = avc.sps[idx].height; if (avc.sps[idx].vui.par_num && avc.sps[idx].vui.par_den) { ctx->pixel_ar = avc.sps[idx].vui.par_num; ctx->pixel_ar <<= 16; ctx->pixel_ar |= avc.sps[idx].vui.par_den; } ctx->chroma_format = avc.sps[idx].chroma_format; ctx->luma_bit_depth = 8 + avc.sps[idx].luma_bit_depth_m8; ctx->chroma_bit_depth = 8 + avc.sps[idx].chroma_bit_depth_m8; switch (ctx->chroma_format) { case 2: //422 decoding doesn't seem supported ... if (ctx->luma_bit_depth>8) { kColorSpace = kCVPixelFormatType_422YpCbCr10; ctx->pix_fmt = GF_PIXEL_YUV422_10; } else { kColorSpace = kCVPixelFormatType_422YpCbCr8; ctx->pix_fmt = GF_PIXEL_YUV422; } break; case 3: if (ctx->luma_bit_depth>8) { kColorSpace = kCVPixelFormatType_444YpCbCr10; ctx->pix_fmt = GF_PIXEL_YUV444_10; } else { kColorSpace = kCVPixelFormatType_444YpCbCr8; ctx->pix_fmt = GF_PIXEL_YUV444; } break; default: if (ctx->luma_bit_depth>8) { kColorSpace = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; ctx->pix_fmt = GF_PIXEL_YV12_10; } break; } if (!ctx->esd->decoderConfig->decoderSpecificInfo || force_dsi_rewrite || !ctx->esd->decoderConfig->decoderSpecificInfo->data) { GF_AVCConfigSlot *slc_s, *slc_p; GF_AVCConfig *cfg = gf_odf_avc_cfg_new(); cfg->configurationVersion = 1; cfg->profile_compatibility = avc.sps[idx].prof_compat; cfg->AVCProfileIndication = avc.sps[idx].profile_idc; cfg->AVCLevelIndication = avc.sps[idx].level_idc; cfg->chroma_format = avc.sps[idx].chroma_format; cfg->luma_bit_depth = 8 + avc.sps[idx].luma_bit_depth_m8; cfg->chroma_bit_depth = 8 + avc.sps[idx].chroma_bit_depth_m8; cfg->nal_unit_size = 4; GF_SAFEALLOC(slc_s, GF_AVCConfigSlot); slc_s->data = ctx->sps; slc_s->size = ctx->sps_size; gf_list_add(cfg->sequenceParameterSets, slc_s); GF_SAFEALLOC(slc_p, GF_AVCConfigSlot); slc_p->data = ctx->pps; slc_p->size = ctx->pps_size; gf_list_add(cfg->pictureParameterSets , slc_p); gf_odf_avc_cfg_write(cfg, &dsi_data, &dsi_data_size); slc_s->data = slc_p->data = NULL; gf_odf_avc_cfg_del((cfg)); } else { dsi_data = ctx->esd->decoderConfig->decoderSpecificInfo->data; dsi_data_size = ctx->esd->decoderConfig->decoderSpecificInfo->dataLength; } dsi = CFDictionaryCreateMutable(kCFAllocatorDefault, 1, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); data = CFDataCreate(kCFAllocatorDefault, dsi_data, dsi_data_size); if (data) { CFDictionarySetValue(dsi, CFSTR("avcC"), data); CFDictionarySetValue(dec_dsi, kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms, dsi); CFRelease(data); } CFRelease(dsi); if (!ctx->esd->decoderConfig->decoderSpecificInfo || !ctx->esd->decoderConfig->decoderSpecificInfo->data) { gf_free(ctx->sps); ctx->sps = NULL; gf_free(ctx->pps); ctx->pps = NULL; gf_free(dsi_data); } } break; case GPAC_OTI_VIDEO_MPEG2_SIMPLE: case GPAC_OTI_VIDEO_MPEG2_MAIN: case GPAC_OTI_VIDEO_MPEG2_SNR: case GPAC_OTI_VIDEO_MPEG2_SPATIAL: case GPAC_OTI_VIDEO_MPEG2_HIGH: case GPAC_OTI_VIDEO_MPEG2_422: ctx->vtb_type = kCMVideoCodecType_MPEG2Video; if (!ctx->width || !ctx->height) { ctx->init_mpeg12 = GF_TRUE; return GF_OK; } ctx->init_mpeg12 = GF_FALSE; break; case GPAC_OTI_VIDEO_MPEG1: ctx->vtb_type = kCMVideoCodecType_MPEG1Video; if (!ctx->width || !ctx->height) { ctx->init_mpeg12 = GF_TRUE; return GF_OK; } ctx->init_mpeg12 = GF_FALSE; break; case GPAC_OTI_VIDEO_MPEG4_PART2 : { Bool reset_dsi = GF_FALSE; ctx->vtb_type = kCMVideoCodecType_MPEG4Video; if (!ctx->esd->decoderConfig->decoderSpecificInfo) { ctx->esd->decoderConfig->decoderSpecificInfo = (GF_DefaultDescriptor *) gf_odf_desc_new(GF_ODF_DSI_TAG); } if (!ctx->esd->decoderConfig->decoderSpecificInfo->data) { reset_dsi = GF_TRUE; ctx->esd->decoderConfig->decoderSpecificInfo->data = ctx->vosh; ctx->esd->decoderConfig->decoderSpecificInfo->dataLength = ctx->vosh_size; } if (ctx->esd->decoderConfig->decoderSpecificInfo->data) { GF_M4VDecSpecInfo vcfg; GF_BitStream *bs; gf_m4v_get_config(ctx->esd->decoderConfig->decoderSpecificInfo->data, ctx->esd->decoderConfig->decoderSpecificInfo->dataLength, &vcfg); ctx->width = vcfg.width; ctx->height = vcfg.height; if (ctx->esd->slConfig) { ctx->esd->slConfig->predefined = 2; } bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_u32(bs, 0); gf_odf_desc_write_bs((GF_Descriptor *) ctx->esd, bs); gf_bs_get_content(bs, &dsi_data, &dsi_data_size); gf_bs_del(bs); dsi = CFDictionaryCreateMutable(kCFAllocatorDefault, 1, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); data = CFDataCreate(kCFAllocatorDefault, dsi_data, dsi_data_size); gf_free(dsi_data); if (data) { CFDictionarySetValue(dsi, CFSTR("esds"), data); CFDictionarySetValue(dec_dsi, kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms, dsi); CFRelease(data); } CFRelease(dsi); if (reset_dsi) { ctx->esd->decoderConfig->decoderSpecificInfo->data = NULL; ctx->esd->decoderConfig->decoderSpecificInfo->dataLength = 0; } ctx->skip_mpeg4_vosh = GF_FALSE; } else { ctx->skip_mpeg4_vosh = GF_TRUE; return GF_OK; } break; } case GPAC_OTI_MEDIA_GENERIC: if (ctx->esd->decoderConfig->decoderSpecificInfo && ctx->esd->decoderConfig->decoderSpecificInfo->dataLength) { char *dsi = ctx->esd->decoderConfig->decoderSpecificInfo->data; if (ctx->esd->decoderConfig->decoderSpecificInfo->dataLength<8) return GF_NON_COMPLIANT_BITSTREAM; if (strnicmp(dsi, "s263", 4)) return GF_NOT_SUPPORTED; ctx->width = ((u8) dsi[4]); ctx->width<<=8; ctx->width |= ((u8) dsi[5]); ctx->height = ((u8) dsi[6]); ctx->height<<=8; ctx->height |= ((u8) dsi[7]); ctx->vtb_type = kCMVideoCodecType_H263; } break; default : return GF_NOT_SUPPORTED; } if (! ctx->width || !ctx->height) return GF_NOT_SUPPORTED; status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault, ctx->vtb_type, ctx->width, ctx->height, dec_dsi, &ctx->fmt_desc); if (!ctx->fmt_desc) { if (dec_dsi) CFRelease(dec_dsi); return GF_NON_COMPLIANT_BITSTREAM; } buffer_attribs = VTBDec_CreateBufferAttributes(ctx->width, ctx->height, kColorSpace); cbacks.decompressionOutputCallback = VTBDec_on_frame; cbacks.decompressionOutputRefCon = ctx; dec_type = CFDictionaryCreateMutable(kCFAllocatorDefault, 1, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); CFDictionarySetValue(dec_type, kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder, kCFBooleanTrue); ctx->is_hardware = GF_TRUE; status = VTDecompressionSessionCreate(NULL, ctx->fmt_desc, dec_type, NULL, &cbacks, &ctx->vtb_session); //if HW decoder not available, try soft one if (status) { status = VTDecompressionSessionCreate(NULL, ctx->fmt_desc, NULL, buffer_attribs, &cbacks, &ctx->vtb_session); ctx->is_hardware = GF_FALSE; } if (dec_dsi) CFRelease(dec_dsi); if (dec_type) CFRelease(dec_type); if (buffer_attribs) CFRelease(buffer_attribs); switch (status) { case kVTVideoDecoderNotAvailableNowErr: case kVTVideoDecoderUnsupportedDataFormatErr: return GF_NOT_SUPPORTED; case kVTVideoDecoderMalfunctionErr: return GF_IO_ERR; case kVTVideoDecoderBadDataErr : return GF_BAD_PARAM; case kVTPixelTransferNotSupportedErr: case kVTCouldNotFindVideoDecoderErr: return GF_NOT_SUPPORTED; case 0: break; default: return GF_SERVICE_ERROR; } //good to go ! if (ctx->pix_fmt == GF_PIXEL_YUV422) { ctx->out_size = ctx->width*ctx->height*2; } else if (ctx->pix_fmt == GF_PIXEL_YUV444) { ctx->out_size = ctx->width*ctx->height*3; } else { // (ctx->pix_fmt == GF_PIXEL_YV12) ctx->out_size = ctx->width*ctx->height*3/2; } if (ctx->luma_bit_depth>8) { ctx->out_size *= 2; } return GF_OK; }