static int encode_init(AVCodecContext *avctx) { MscEncoderContext * mscEncoderContext; MscCodecContext * mscContext; const int scale = 1; mscEncoderContext = avctx->priv_data; mscContext = &mscEncoderContext->mscContext; init_common(avctx, &mscEncoderContext->mscContext); if(avctx->global_quality == 0) avctx->global_quality= 4*FF_QUALITY_SCALE; mscContext->inv_qscale = (32*scale*FF_QUALITY_SCALE + avctx->global_quality/2) / avctx->global_quality; for(int i = 0; i < 64; i++) { int q= 32*scale * quant_intra_matrix[i]; int qNonIntra= 32*scale * ff_mpeg1_default_non_intra_matrix[i]; mscContext->q_intra_matrix[i]= ((mscContext->inv_qscale << 16) + q/2) / q; mscContext->q_non_intra_matrix[i]= ((mscContext->inv_qscale << 16) + qNonIntra/2) / qNonIntra; } avctx->extradata= av_mallocz(8); avctx->extradata_size=8; ((uint32_t*)avctx->extradata)[0]= av_le2ne32(mscContext->inv_qscale); ((uint32_t*)avctx->extradata)[1]= av_le2ne32(AV_RL32("MSC0")); //check TODO ff_init_scantable(mscContext->dsp.idct_permutation, &mscContext->scantable, scantab); for(int i = 0; i < 64; i++){ int index= scantab[i]; mscContext->intra_matrix[i]= 64 * scale * quant_intra_matrix[index] / mscContext->inv_qscale; mscContext->non_intra_matrix[i]= 64 * scale * ff_mpeg1_default_non_intra_matrix[index] / mscContext->inv_qscale; } // allocate frame avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n"); return AVERROR(ENOMEM); } // allocate buffers mscEncoderContext->arithBuffSize = 6 * avctx->coded_width * avctx->coded_height; mscEncoderContext->arithBuff = av_malloc(mscEncoderContext->arithBuffSize); if (mscEncoderContext->arithBuff == NULL) { av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n"); return AVERROR(ENOMEM); } return 0; }
static void v210_convert(uint16_t *dst, const uint32_t *bytes, const int width, const int height) { const int stride = ((width + 47) / 48) * 48 * 8 / 3 / 4; uint16_t *y = &dst[0]; uint16_t *u = &dst[width * height * 2 / 2]; uint16_t *v = &dst[width * height * 3 / 2]; #define READ_PIXELS(a, b, c) \ do { \ val = av_le2ne32(*src++); \ *a++ = val & 0x3FF; \ *b++ = (val >> 10) & 0x3FF; \ *c++ = (val >> 20) & 0x3FF; \ } while (0) for (int h = 0; h < height; h++) { const uint32_t *src = bytes; uint32_t val = 0; int w; for (w = 0; w < width - 5; w += 6) { READ_PIXELS(u, y, v); READ_PIXELS(y, u, y); READ_PIXELS(v, y, u); READ_PIXELS(y, v, y); } if (w < width - 1) { READ_PIXELS(u, y, v); val = av_le2ne32(*src++); *y++ = val & 0x3FF; } if (w < width - 3) { *u++ = (val >> 10) & 0x3FF; *y++ = (val >> 20) & 0x3FF; val = av_le2ne32(*src++); *v++ = val & 0x3FF; *y++ = (val >> 10) & 0x3FF; } bytes += stride; }
void av_md5_final(AVMD5 *ctx, uint8_t *dst){ int i; uint64_t finalcount= av_le2ne64(ctx->len<<3); av_md5_update(ctx, "\200", 1); while((ctx->len & 63)!=56) av_md5_update(ctx, "", 1); av_md5_update(ctx, (uint8_t*)&finalcount, 8); for(i=0; i<4; i++) ((uint32_t*)dst)[i]= av_le2ne32(ctx->ABCD[3-i]); }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { V210DecContext *s = avctx->priv_data; int h, w, ret, stride, aligned_input; AVFrame *pic = data; const uint8_t *psrc = avpkt->data; uint16_t *y, *u, *v; if (s->custom_stride ) stride = s->custom_stride; else { int aligned_width = ((avctx->width + 47) / 48) * 48; stride = aligned_width * 8 / 3; } if (avpkt->size < stride * avctx->height) { if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) { stride = avpkt->size / avctx->height; if (!s->stride_warning_shown) av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n"); s->stride_warning_shown = 1; } else { av_log(avctx, AV_LOG_ERROR, "packet too small\n"); return AVERROR_INVALIDDATA; } } aligned_input = !((uintptr_t)psrc & 0xf) && !(stride & 0xf); if (aligned_input != s->aligned_input) { s->aligned_input = aligned_input; if (HAVE_MMX) v210_x86_init(s); } if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; y = (uint16_t*)pic->data[0]; u = (uint16_t*)pic->data[1]; v = (uint16_t*)pic->data[2]; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; for (h = 0; h < avctx->height; h++) { const uint32_t *src = (const uint32_t*)psrc; uint32_t val; w = (avctx->width / 6) * 6; s->unpack_frame(src, y, u, v, w); y += w; u += w >> 1; v += w >> 1; src += (w << 1) / 3; if (w < avctx->width - 1) { READ_PIXELS(u, y, v); val = av_le2ne32(*src++); *y++ = val & 0x3FF; if (w < avctx->width - 3) { *u++ = (val >> 10) & 0x3FF; *y++ = (val >> 20) & 0x3FF; val = av_le2ne32(*src++); *v++ = val & 0x3FF; *y++ = (val >> 10) & 0x3FF; } }
static int smacker_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIOContext *pb = s->pb; SmackerContext *smk = s->priv_data; AVStream *st, *ast[7]; int i, ret; int tbase; /* read and check header */ smk->magic = avio_rl32(pb); if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4')) return -1; smk->width = avio_rl32(pb); smk->height = avio_rl32(pb); smk->frames = avio_rl32(pb); smk->pts_inc = (int32_t)avio_rl32(pb); smk->flags = avio_rl32(pb); if(smk->flags & SMACKER_FLAG_RING_FRAME) smk->frames++; for(i = 0; i < 7; i++) smk->audio[i] = avio_rl32(pb); smk->treesize = avio_rl32(pb); if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant) av_log(s, AV_LOG_ERROR, "treesize too large\n"); return -1; } //FIXME remove extradata "rebuilding" smk->mmap_size = avio_rl32(pb); smk->mclr_size = avio_rl32(pb); smk->full_size = avio_rl32(pb); smk->type_size = avio_rl32(pb); for(i = 0; i < 7; i++) { smk->rates[i] = avio_rl24(pb); smk->aflags[i] = avio_r8(pb); } smk->pad = avio_rl32(pb); /* setup data */ if(smk->frames > 0xFFFFFF) { av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames); return -1; } smk->frm_size = av_malloc(smk->frames * 4); smk->frm_flags = av_malloc(smk->frames); smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2')); /* read frame info */ for(i = 0; i < smk->frames; i++) { smk->frm_size[i] = avio_rl32(pb); } for(i = 0; i < smk->frames; i++) { smk->frm_flags[i] = avio_r8(pb); } /* init video codec */ st = avformat_new_stream(s, NULL); if (!st) return -1; smk->videoindex = st->index; st->codec->width = smk->width; st->codec->height = smk->height; st->codec->pix_fmt = PIX_FMT_PAL8; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_SMACKVIDEO; st->codec->codec_tag = smk->magic; /* Smacker uses 100000 as internal timebase */ if(smk->pts_inc < 0) smk->pts_inc = -smk->pts_inc; else smk->pts_inc *= 100; tbase = 100000; av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1); avpriv_set_pts_info(st, 33, smk->pts_inc, tbase); st->duration = smk->frames; /* handle possible audio streams */ for(i = 0; i < 7; i++) { smk->indexes[i] = -1; if (smk->rates[i]) { ast[i] = avformat_new_stream(s, NULL); smk->indexes[i] = ast[i]->index; ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (smk->aflags[i] & SMK_AUD_BINKAUD) { ast[i]->codec->codec_id = CODEC_ID_BINKAUDIO_RDFT; } else if (smk->aflags[i] & SMK_AUD_USEDCT) { ast[i]->codec->codec_id = CODEC_ID_BINKAUDIO_DCT; } else if (smk->aflags[i] & SMK_AUD_PACKED){ ast[i]->codec->codec_id = CODEC_ID_SMACKAUDIO; ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A'); } else { ast[i]->codec->codec_id = CODEC_ID_PCM_U8; } ast[i]->codec->channels = (smk->aflags[i] & SMK_AUD_STEREO) ? 2 : 1; ast[i]->codec->sample_rate = smk->rates[i]; ast[i]->codec->bits_per_coded_sample = (smk->aflags[i] & SMK_AUD_16BITS) ? 16 : 8; if(ast[i]->codec->bits_per_coded_sample == 16 && ast[i]->codec->codec_id == CODEC_ID_PCM_U8) ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE; avpriv_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate * ast[i]->codec->channels * ast[i]->codec->bits_per_coded_sample / 8); } } /* load trees to extradata, they will be unpacked by decoder */ st->codec->extradata = av_malloc(smk->treesize + 16); st->codec->extradata_size = smk->treesize + 16; if(!st->codec->extradata){ av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16); av_free(smk->frm_size); av_free(smk->frm_flags); return -1; } ret = avio_read(pb, st->codec->extradata + 16, st->codec->extradata_size - 16); if(ret != st->codec->extradata_size - 16){ av_free(smk->frm_size); av_free(smk->frm_flags); return AVERROR(EIO); } ((int32_t*)st->codec->extradata)[0] = av_le2ne32(smk->mmap_size); ((int32_t*)st->codec->extradata)[1] = av_le2ne32(smk->mclr_size); ((int32_t*)st->codec->extradata)[2] = av_le2ne32(smk->full_size); ((int32_t*)st->codec->extradata)[3] = av_le2ne32(smk->type_size); smk->curstream = -1; smk->nextpos = avio_tell(pb); return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { int h, w; AVFrame *pic = avctx->coded_frame; const uint8_t *psrc = avpkt->data; uint16_t *y, *u, *v; int aligned_width = ((avctx->width + 47) / 48) * 48; int stride = aligned_width * 8 / 3; if (pic->data[0]) avctx->release_buffer(avctx, pic); if (avpkt->size < stride * avctx->height) { av_log(avctx, AV_LOG_ERROR, "packet too small\n"); return -1; } pic->reference = 0; if (avctx->get_buffer(avctx, pic) < 0) return -1; y = (uint16_t*)pic->data[0]; u = (uint16_t*)pic->data[1]; v = (uint16_t*)pic->data[2]; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; #define READ_PIXELS(a, b, c) \ do { \ val = av_le2ne32(*src++); \ *a++ = val & 0x3FF; \ *b++ = (val >> 10) & 0x3FF; \ *c++ = (val >> 20) & 0x3FF; \ } while (0) for (h = 0; h < avctx->height; h++) { const uint32_t *src = (const uint32_t*)psrc; uint32_t val; for (w = 0; w < avctx->width - 5; w += 6) { READ_PIXELS(u, y, v); READ_PIXELS(y, u, y); READ_PIXELS(v, y, u); READ_PIXELS(y, v, y); } if (w < avctx->width - 1) { READ_PIXELS(u, y, v); val = av_le2ne32(*src++); *y++ = val & 0x3FF; } if (w < avctx->width - 3) { *u++ = (val >> 10) & 0x3FF; *y++ = (val >> 20) & 0x3FF; val = av_le2ne32(*src++); *v++ = val & 0x3FF; *y++ = (val >> 10) & 0x3FF; } psrc += stride; y += pic->linesize[0] / 2 - avctx->width; u += pic->linesize[1] / 2 - avctx->width / 2; v += pic->linesize[2] / 2 - avctx->width / 2; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { V210DecContext *s = avctx->priv_data; int h, w, stride, aligned_input; AVFrame *pic = avctx->coded_frame; const uint8_t *psrc = avpkt->data; uint16_t *y, *u, *v; if (s->custom_stride ) stride = s->custom_stride; else { int aligned_width = ((avctx->width + 47) / 48) * 48; stride = aligned_width * 8 / 3; } aligned_input = !((uintptr_t)psrc & 0xf) && !(stride & 0xf); if (aligned_input != s->aligned_input) { s->aligned_input = aligned_input; if (HAVE_MMX) v210_x86_init(s); } if (pic->data[0]) avctx->release_buffer(avctx, pic); if (avpkt->size < stride * avctx->height) { av_log(avctx, AV_LOG_ERROR, "packet too small\n"); return -1; } pic->reference = 0; if (avctx->get_buffer(avctx, pic) < 0) return -1; y = (uint16_t*)pic->data[0]; u = (uint16_t*)pic->data[1]; v = (uint16_t*)pic->data[2]; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; for (h = 0; h < avctx->height; h++) { const uint32_t *src = (const uint32_t*)psrc; uint32_t val; w = (avctx->width / 6) * 6; s->unpack_frame(src, y, u, v, w); y += w; u += w >> 1; v += w >> 1; src += (w << 1) / 3; if (w < avctx->width - 1) { READ_PIXELS(u, y, v); val = av_le2ne32(*src++); *y++ = val & 0x3FF; } if (w < avctx->width - 3) { *u++ = (val >> 10) & 0x3FF; *y++ = (val >> 20) & 0x3FF; val = av_le2ne32(*src++); *v++ = val & 0x3FF; *y++ = (val >> 10) & 0x3FF; } psrc += stride; y += pic->linesize[0] / 2 - avctx->width; u += pic->linesize[1] / 2 - avctx->width / 2; v += pic->linesize[2] / 2 - avctx->width / 2; }