static int encode_init(AVCodecContext * avctx){ WMACodecContext *s = avctx->priv_data; int i, flags1, flags2; uint8_t *extradata; s->avctx = avctx; if(avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "too many channels: got %i, need %i or fewer", avctx->channels, MAX_CHANNELS); return AVERROR(EINVAL); } if(avctx->bit_rate < 24*1000) { av_log(avctx, AV_LOG_ERROR, "bitrate too low: got %"PRId64", need 24000 or higher\n", avctx->bit_rate); return AVERROR(EINVAL); } /* extract flag infos */ flags1 = 0; flags2 = 1; if (avctx->codec->id == CODEC_ID_WMAV1) { extradata= av_malloc(4); avctx->extradata_size= 4; AV_WL16(extradata, flags1); AV_WL16(extradata+2, flags2); } else if (avctx->codec->id == CODEC_ID_WMAV2) { extradata= av_mallocz(10); avctx->extradata_size= 10; AV_WL32(extradata, flags1); AV_WL16(extradata+4, flags2); }else assert(0); avctx->extradata= extradata; s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; ff_wma_init(avctx, flags2); /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0); avctx->block_align= s->block_align= avctx->bit_rate*s->frame_len / (avctx->sample_rate*8); //av_log(NULL, AV_LOG_ERROR, "%d %"PRId64" %d %d\n", s->block_align, avctx->bit_rate, s->frame_len, avctx->sample_rate); avctx->frame_size= s->frame_len; return 0; }
static void gamma_convert(uint8_t * src[], int width, uint16_t *gamma) { int i; uint16_t *src1 = (uint16_t*)src[0]; for (i = 0; i < width; ++i) { uint16_t r = AV_RL16(src1 + i*4 + 0); uint16_t g = AV_RL16(src1 + i*4 + 1); uint16_t b = AV_RL16(src1 + i*4 + 2); AV_WL16(src1 + i*4 + 0, gamma[r]); AV_WL16(src1 + i*4 + 1, gamma[g]); AV_WL16(src1 + i*4 + 2, gamma[b]); } }
static int encode_init(AVCodecContext * avctx){ WMACodecContext *s = avctx->priv_data; int i, flags1, flags2; uint8_t *extradata; s->avctx = avctx; if(avctx->channels > MAX_CHANNELS) return -1; if(avctx->bit_rate < 24*1000) return -1; /* extract flag infos */ flags1 = 0; flags2 = 1; if (avctx->codec->id == CODEC_ID_WMAV1) { extradata= av_malloc(4); avctx->extradata_size= 4; AV_WL16(extradata, flags1); AV_WL16(extradata+2, flags2); } else if (avctx->codec->id == CODEC_ID_WMAV2) { extradata= av_mallocz(10); avctx->extradata_size= 10; AV_WL32(extradata, flags1); AV_WL16(extradata+4, flags2); }else assert(0); avctx->extradata= extradata; s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; ff_wma_init(avctx, flags2); /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0); avctx->block_align= s->block_align= avctx->bit_rate*(int64_t)s->frame_len / (avctx->sample_rate*8); //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d\n", s->block_align, avctx->bit_rate, s->frame_len, avctx->sample_rate); avctx->frame_size= s->frame_len; return 0; }
static int wsaud_read_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE]; unsigned int chunk_size; int ret = 0; AVStream *st = s->streams[0]; if (avio_read(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) != AUD_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); /* validate the chunk */ if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE) return AVERROR_INVALIDDATA; chunk_size = AV_RL16(&preamble[0]); if (st->codec->codec_id == CODEC_ID_WESTWOOD_SND1) { /* For Westwood SND1 audio we need to add the output size and input size to the start of the packet to match what is in VQA. Specifically, this is needed to signal when a packet should be decoding as raw 8-bit pcm or variable-size ADPCM. */ int out_size = AV_RL16(&preamble[2]); if ((ret = av_new_packet(pkt, chunk_size + 4))) return ret; if ((ret = avio_read(pb, &pkt->data[4], chunk_size)) != chunk_size) return ret < 0 ? ret : AVERROR(EIO); AV_WL16(&pkt->data[0], out_size); AV_WL16(&pkt->data[2], chunk_size); pkt->duration = out_size; } else { ret = av_get_packet(pb, pkt, chunk_size); if (ret != chunk_size) return AVERROR(EIO); /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ pkt->duration = (chunk_size * 2) / st->codec->channels; } pkt->stream_index = st->index; return ret; }
static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) { SIFFContext *c = s->priv_data; int size; if (c->has_video) { if (c->cur_frame >= c->frames) return AVERROR(EIO); if (c->curstrm == -1) { c->pktsize = avio_rl32(s->pb) - 4; c->flags = avio_rl16(s->pb); c->gmcsize = (c->flags & VB_HAS_GMC) ? 4 : 0; if (c->gmcsize) avio_read(s->pb, c->gmc, c->gmcsize); c->sndsize = (c->flags & VB_HAS_AUDIO) ? avio_rl32(s->pb): 0; c->curstrm = !!(c->flags & VB_HAS_AUDIO); } if (!c->curstrm) { size = c->pktsize - c->sndsize; if (av_new_packet(pkt, size) < 0) return AVERROR(ENOMEM); AV_WL16(pkt->data, c->flags); if (c->gmcsize) memcpy(pkt->data + 2, c->gmc, c->gmcsize); avio_read(s->pb, pkt->data + 2 + c->gmcsize, size - c->gmcsize - 2); pkt->stream_index = 0; c->curstrm = -1; } else { if (av_get_packet(s->pb, pkt, c->sndsize - 4) < 0) return AVERROR(EIO); pkt->stream_index = 1; c->curstrm = 0; } if(!c->cur_frame || c->curstrm) pkt->flags |= AV_PKT_FLAG_KEY; if (c->curstrm == -1) c->cur_frame++; } else { size = av_get_packet(s->pb, pkt, c->block_align); if(size <= 0) return AVERROR(EIO); } return pkt->size; }
static void rgbpack_fields(void *ctx_, uint8_t *src[AVS_MAX_COMPONENTS], int sstrides[AVS_MAX_COMPONENTS], uint8_t *dst[AVS_MAX_COMPONENTS], int dstrides[AVS_MAX_COMPONENTS], int w, int h) { RGBPackContext *ctx = ctx_; uint8_t *rgb[3], *dest; unsigned val; int i, j, c; rgb[0] = src[0]; rgb[1] = src[1]; rgb[2] = src[2]; dest = dst[0]; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { val = 0; if (ctx->inbpp <= 8) { for (c = 0; c < 3; c++) val |= rgb[c][i] << ctx->shift[c]; } else { for (c = 0; c < 3; c++) val |= AV_RN16(rgb[c] + i * 2) << ctx->shift[c]; } switch (ctx->step) { case 1: dest[i] = val; break; case 2: if (ctx->be) AV_WB16(dest + i * 2, val); else AV_WL16(dest + i * 2, val); break; case 4: if (ctx->be) AV_WB32(dest + i * 4, val); else AV_WL32(dest + i * 4, val); break; } } for (c = 0; c < 3; c++) rgb[c] += sstrides[0]; dest += dstrides[0]; } }
static int dfa_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; int frames; int version; uint32_t mspf; if (avio_rl32(pb) != MKTAG('D', 'F', 'I', 'A')) { av_log(s, AV_LOG_ERROR, "Invalid magic for DFA\n"); return AVERROR_INVALIDDATA; } version = avio_rl16(pb); frames = avio_rl16(pb); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_DFA; st->codec->width = avio_rl16(pb); st->codec->height = avio_rl16(pb); mspf = avio_rl32(pb); if (!mspf) { av_log(s, AV_LOG_WARNING, "Zero FPS reported, defaulting to 10\n"); mspf = 100; } avpriv_set_pts_info(st, 24, mspf, 1000); avio_skip(pb, 128 - 16); // padding st->duration = frames; if (ff_alloc_extradata(st->codec, 2)) return AVERROR(ENOMEM); AV_WL16(st->codec->extradata, version); if (version == 0x100) st->sample_aspect_ratio = (AVRational){2, 1}; return 0; }
static int encode_init(AVCodecContext * avctx){ WMACodecContext *s = avctx->priv_data; int i, flags1, flags2; uint8_t *extradata; s->avctx = avctx; if(avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "too many channels: got %i, need %i or fewer", avctx->channels, MAX_CHANNELS); return AVERROR(EINVAL); } if (avctx->sample_rate > 48000) { av_log(avctx, AV_LOG_ERROR, "sample rate is too high: %d > 48kHz", avctx->sample_rate); return AVERROR(EINVAL); } if(avctx->bit_rate < 24*1000) { av_log(avctx, AV_LOG_ERROR, "bitrate too low: got %i, need 24000 or higher\n", avctx->bit_rate); return AVERROR(EINVAL); } /* extract flag infos */ flags1 = 0; flags2 = 1; if (avctx->codec->id == AV_CODEC_ID_WMAV1) { extradata= av_malloc(4); avctx->extradata_size= 4; AV_WL16(extradata, flags1); AV_WL16(extradata+2, flags2); } else if (avctx->codec->id == AV_CODEC_ID_WMAV2) { extradata= av_mallocz(10); avctx->extradata_size= 10; AV_WL32(extradata, flags1); AV_WL16(extradata+4, flags2); }else assert(0); avctx->extradata= extradata; s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; if (avctx->channels == 2) s->ms_stereo = 1; ff_wma_init(avctx, flags2); /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0); s->block_align = avctx->bit_rate * (int64_t)s->frame_len / (avctx->sample_rate * 8); s->block_align = FFMIN(s->block_align, MAX_CODED_SUPERFRAME_SIZE); avctx->block_align = s->block_align; avctx->bit_rate = avctx->block_align * 8LL * avctx->sample_rate / s->frame_len; avctx->frame_size = avctx->delay = s->frame_len; #if FF_API_OLD_ENCODE_AUDIO avctx->coded_frame = &s->frame; avcodec_get_frame_defaults(avctx->coded_frame); #endif return 0; }
static int bind_lavc(audio_encoder_t *encoder, muxer_stream_t *mux_a) { mux_a->wf = malloc(sizeof(WAVEFORMATEX)+lavc_actx->extradata_size+256); mux_a->wf->wFormatTag = lavc_param_atag; mux_a->wf->nChannels = lavc_actx->channels; mux_a->wf->nSamplesPerSec = lavc_actx->sample_rate; mux_a->wf->nAvgBytesPerSec = (lavc_actx->bit_rate / 8); mux_a->avg_rate= lavc_actx->bit_rate; mux_a->h.dwRate = mux_a->wf->nAvgBytesPerSec; if(lavc_actx->block_align) mux_a->h.dwSampleSize = mux_a->h.dwScale = lavc_actx->block_align; else { mux_a->h.dwScale = (mux_a->wf->nAvgBytesPerSec * lavc_actx->frame_size)/ mux_a->wf->nSamplesPerSec; /* for cbr */ if ((mux_a->wf->nAvgBytesPerSec * lavc_actx->frame_size) % mux_a->wf->nSamplesPerSec) { mux_a->h.dwScale = lavc_actx->frame_size; mux_a->h.dwRate = lavc_actx->sample_rate; mux_a->h.dwSampleSize = 0; // Blocksize not constant } else mux_a->h.dwSampleSize = 0; } if(mux_a->h.dwSampleSize) mux_a->wf->nBlockAlign = mux_a->h.dwSampleSize; else mux_a->wf->nBlockAlign = 1; mux_a->h.dwSuggestedBufferSize = (encoder->params.audio_preload*mux_a->wf->nAvgBytesPerSec)/1000; mux_a->h.dwSuggestedBufferSize -= mux_a->h.dwSuggestedBufferSize % mux_a->wf->nBlockAlign; switch(lavc_param_atag) { case 0x11: /* imaadpcm */ mux_a->wf->wBitsPerSample = 4; mux_a->wf->cbSize = 2; AV_WL16(mux_a->wf+1, lavc_actx->frame_size); break; case 0x55: /* mp3 */ mux_a->wf->cbSize = 12; mux_a->wf->wBitsPerSample = 0; /* does not apply */ ((MPEGLAYER3WAVEFORMAT *) (mux_a->wf))->wID = 1; ((MPEGLAYER3WAVEFORMAT *) (mux_a->wf))->fdwFlags = 2; ((MPEGLAYER3WAVEFORMAT *) (mux_a->wf))->nBlockSize = mux_a->wf->nBlockAlign; ((MPEGLAYER3WAVEFORMAT *) (mux_a->wf))->nFramesPerBlock = 1; ((MPEGLAYER3WAVEFORMAT *) (mux_a->wf))->nCodecDelay = 0; break; default: mux_a->wf->wBitsPerSample = 0; /* Unknown */ if (lavc_actx->extradata && (lavc_actx->extradata_size > 0)) { memcpy(mux_a->wf+1, lavc_actx->extradata, lavc_actx->extradata_size); mux_a->wf->cbSize = lavc_actx->extradata_size; } else mux_a->wf->cbSize = 0; break; } // Fix allocation mux_a->wf = realloc(mux_a->wf, sizeof(WAVEFORMATEX)+mux_a->wf->cbSize); encoder->input_format = AF_FORMAT_S16_NE; encoder->min_buffer_size = mux_a->h.dwSuggestedBufferSize; encoder->max_buffer_size = mux_a->h.dwSuggestedBufferSize*2; return 1; }
static int smush_read_header(AVFormatContext *ctx) { SMUSHContext *smush = ctx->priv_data; AVIOContext *pb = ctx->pb; AVStream *vst, *ast; uint32_t magic, nframes, size, subversion, i; uint32_t width = 0, height = 0, got_audio = 0, read = 0; uint32_t sample_rate, channels, palette[256]; magic = avio_rb32(pb); avio_skip(pb, 4); // skip movie size if (magic == MKBETAG('A', 'N', 'I', 'M')) { if (avio_rb32(pb) != MKBETAG('A', 'H', 'D', 'R')) return AVERROR_INVALIDDATA; size = avio_rb32(pb); if (size < 3 * 256 + 6) return AVERROR_INVALIDDATA; smush->version = 0; subversion = avio_rl16(pb); nframes = avio_rl16(pb); if (!nframes) return AVERROR_INVALIDDATA; avio_skip(pb, 2); // skip pad for (i = 0; i < 256; i++) palette[i] = avio_rb24(pb); avio_skip(pb, size - (3 * 256 + 6)); } else if (magic == MKBETAG('S', 'A', 'N', 'M')) { if (avio_rb32(pb) != MKBETAG('S', 'H', 'D', 'R')) return AVERROR_INVALIDDATA; size = avio_rb32(pb); if (size < 14) return AVERROR_INVALIDDATA; smush->version = 1; subversion = avio_rl16(pb); nframes = avio_rl32(pb); if (!nframes) return AVERROR_INVALIDDATA; avio_skip(pb, 2); // skip pad width = avio_rl16(pb); height = avio_rl16(pb); avio_skip(pb, 2); // skip pad avio_skip(pb, size - 14); if (avio_rb32(pb) != MKBETAG('F', 'L', 'H', 'D')) return AVERROR_INVALIDDATA; size = avio_rb32(pb); while (!got_audio && ((read + 8) < size)) { uint32_t sig, chunk_size; if (avio_feof(pb)) return AVERROR_EOF; sig = avio_rb32(pb); chunk_size = avio_rb32(pb); read += 8; switch (sig) { case MKBETAG('W', 'a', 'v', 'e'): got_audio = 1; sample_rate = avio_rl32(pb); if (!sample_rate) return AVERROR_INVALIDDATA; channels = avio_rl32(pb); if (!channels) return AVERROR_INVALIDDATA; avio_skip(pb, chunk_size - 8); read += chunk_size; break; case MKBETAG('B', 'l', '1', '6'): case MKBETAG('A', 'N', 'N', 'O'): avio_skip(pb, chunk_size); read += chunk_size; break; default: return AVERROR_INVALIDDATA; break; } } avio_skip(pb, size - read); } else { av_log(ctx, AV_LOG_ERROR, "Wrong magic\n"); return AVERROR_INVALIDDATA; } vst = avformat_new_stream(ctx, 0); if (!vst) return AVERROR(ENOMEM); smush->video_stream_index = vst->index; avpriv_set_pts_info(vst, 64, 1, 15); vst->start_time = 0; vst->duration = vst->nb_frames = nframes; vst->avg_frame_rate = av_inv_q(vst->time_base); vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_id = AV_CODEC_ID_SANM; vst->codec->codec_tag = 0; vst->codec->width = width; vst->codec->height = height; if (!smush->version) { if (ff_alloc_extradata(vst->codec, 1024 + 2)) return AVERROR(ENOMEM); AV_WL16(vst->codec->extradata, subversion); for (i = 0; i < 256; i++) AV_WL32(vst->codec->extradata + 2 + i * 4, palette[i]); } if (got_audio) { ast = avformat_new_stream(ctx, 0); if (!ast) return AVERROR(ENOMEM); smush->audio_stream_index = ast->index; ast->start_time = 0; ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_id = AV_CODEC_ID_ADPCM_VIMA; ast->codec->codec_tag = 0; ast->codec->sample_rate = sample_rate; ast->codec->channels = channels; avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate); } return 0; }
static av_cold int encode_init(AVCodecContext *avctx) { WMACodecContext *s = avctx->priv_data; int i, flags1, flags2, block_align; uint8_t *extradata; int ret; s->avctx = avctx; if (avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "too many channels: got %i, need %i or fewer\n", avctx->channels, MAX_CHANNELS); return AVERROR(EINVAL); } if (avctx->sample_rate > 48000) { av_log(avctx, AV_LOG_ERROR, "sample rate is too high: %d > 48kHz\n", avctx->sample_rate); return AVERROR(EINVAL); } if (avctx->bit_rate < 24 * 1000) { av_log(avctx, AV_LOG_ERROR, "bitrate too low: got %i, need 24000 or higher\n", avctx->bit_rate); return AVERROR(EINVAL); } /* extract flag infos */ flags1 = 0; flags2 = 1; if (avctx->codec->id == AV_CODEC_ID_WMAV1) { extradata = av_malloc(4); if (!extradata) return AVERROR(ENOMEM); avctx->extradata_size = 4; AV_WL16(extradata, flags1); AV_WL16(extradata + 2, flags2); } else if (avctx->codec->id == AV_CODEC_ID_WMAV2) { extradata = av_mallocz(10); if (!extradata) return AVERROR(ENOMEM); avctx->extradata_size = 10; AV_WL32(extradata, flags1); AV_WL16(extradata + 4, flags2); } else { av_assert0(0); } avctx->extradata = extradata; s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; if (avctx->channels == 2) s->ms_stereo = 1; if ((ret = ff_wma_init(avctx, flags2)) < 0) return ret; /* init MDCT */ for (i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0); block_align = avctx->bit_rate * (int64_t) s->frame_len / (avctx->sample_rate * 8); block_align = FFMIN(block_align, MAX_CODED_SUPERFRAME_SIZE); avctx->block_align = block_align; avctx->frame_size = avctx->initial_padding = s->frame_len; return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { TiffContext *const s = avctx->priv_data; AVFrame *const p = data; ThreadFrame frame = { .f = data }; unsigned off; int le, ret, plane, planes; int i, j, entries, stride; unsigned soff, ssize; uint8_t *dst; GetByteContext stripsizes; GetByteContext stripdata; bytestream2_init(&s->gb, avpkt->data, avpkt->size); // parse image header if ((ret = ff_tdecode_header(&s->gb, &le, &off))) { av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n"); return ret; } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) { av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n"); return AVERROR_INVALIDDATA; } s->le = le; // TIFF_BPP is not a required tag and defaults to 1 s->bppcount = s->bpp = 1; s->photometric = TIFF_PHOTOMETRIC_NONE; s->compr = TIFF_RAW; s->fill_order = 0; free_geotags(s); // Reset these offsets so we can tell if they were set this frame s->stripsizesoff = s->strippos = 0; /* parse image file directory */ bytestream2_seek(&s->gb, off, SEEK_SET); entries = ff_tget_short(&s->gb, le); if (bytestream2_get_bytes_left(&s->gb) < entries * 12) return AVERROR_INVALIDDATA; for (i = 0; i < entries; i++) { if ((ret = tiff_decode_tag(s, p)) < 0) return ret; } for (i = 0; i<s->geotag_count; i++) { const char *keyname = get_geokey_name(s->geotags[i].key); if (!keyname) { av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key); continue; } if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) { av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key); continue; } ret = av_dict_set(avpriv_frame_get_metadatap(p), keyname, s->geotags[i].val, 0); if (ret<0) { av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname); return ret; } } if (!s->strippos && !s->stripoff) { av_log(avctx, AV_LOG_ERROR, "Image data is missing\n"); return AVERROR_INVALIDDATA; } /* now we have the data and may start decoding */ if ((ret = init_image(s, &frame)) < 0) return ret; if (s->strips == 1 && !s->stripsize) { av_log(avctx, AV_LOG_WARNING, "Image data size missing\n"); s->stripsize = avpkt->size - s->stripoff; } if (s->stripsizesoff) { if (s->stripsizesoff >= (unsigned)avpkt->size) return AVERROR_INVALIDDATA; bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff, avpkt->size - s->stripsizesoff); } if (s->strippos) { if (s->strippos >= (unsigned)avpkt->size) return AVERROR_INVALIDDATA; bytestream2_init(&stripdata, avpkt->data + s->strippos, avpkt->size - s->strippos); } if (s->rps <= 0) { av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps); return AVERROR_INVALIDDATA; } planes = s->planar ? s->bppcount : 1; for (plane = 0; plane < planes; plane++) { stride = p->linesize[plane]; dst = p->data[plane]; for (i = 0; i < s->height; i += s->rps) { if (s->stripsizesoff) ssize = ff_tget(&stripsizes, s->sstype, le); else ssize = s->stripsize; if (s->strippos) soff = ff_tget(&stripdata, s->sot, le); else soff = s->stripoff; if (soff > avpkt->size || ssize > avpkt->size - soff) { av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n"); return AVERROR_INVALIDDATA; } if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i, FFMIN(s->rps, s->height - i))) < 0) { if (avctx->err_recognition & AV_EF_EXPLODE) return ret; break; } dst += s->rps * stride; } if (s->predictor == 2) { if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) { av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported"); return AVERROR_PATCHWELCOME; } dst = p->data[plane]; soff = s->bpp >> 3; if (s->planar) soff = FFMAX(soff / s->bppcount, 1); ssize = s->width * soff; if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE || s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j += 2) AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff)); dst += stride; } } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE || s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j += 2) AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff)); dst += stride; } } else { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j++) dst[j] += dst[j - soff]; dst += stride; } } } if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) { dst = p->data[plane]; for (i = 0; i < s->height; i++) { for (j = 0; j < p->linesize[plane]; j++) dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j]; dst += stride; } } } if (s->planar && s->bppcount > 2) { FFSWAP(uint8_t*, p->data[0], p->data[2]); FFSWAP(int, p->linesize[0], p->linesize[2]); FFSWAP(uint8_t*, p->data[0], p->data[1]); FFSWAP(int, p->linesize[0], p->linesize[1]); }
static int filter_out(struct af_instance *af) { af_ac3enc_t *s = af->priv; if (!s->pending) return 0; AVFrame *frame = av_frame_alloc(); if (!frame) { MP_FATAL(af, "Could not allocate memory \n"); return -1; } int err = -1; AVPacket pkt = {0}; av_init_packet(&pkt); #if HAVE_AVCODEC_NEW_CODEC_API // Send input as long as it wants. while (1) { err = read_input_frame(af, frame); if (err < 0) goto done; if (err == 0) break; err = -1; int lavc_ret = avcodec_send_frame(s->lavc_actx, frame); // On EAGAIN, we're supposed to read remaining output. if (lavc_ret == AVERROR(EAGAIN)) break; if (lavc_ret < 0) { MP_FATAL(af, "Encode failed.\n"); goto done; } s->encoder_buffered += s->input->samples; s->input->samples = 0; } int lavc_ret = avcodec_receive_packet(s->lavc_actx, &pkt); if (lavc_ret == AVERROR(EAGAIN)) { // Need to buffer more input. err = 0; goto done; } if (lavc_ret < 0) { MP_FATAL(af, "Encode failed.\n"); goto done; } #else err = read_input_frame(af, frame); if (err < 0) goto done; if (err == 0) goto done; err = -1; int ok; int lavc_ret = avcodec_encode_audio2(s->lavc_actx, &pkt, frame, &ok); s->input->samples = 0; if (lavc_ret < 0 || !ok) { MP_FATAL(af, "Encode failed.\n"); goto done; } #endif MP_DBG(af, "avcodec_encode_audio got %d, pending %d.\n", pkt.size, s->pending->samples + s->input->samples); s->encoder_buffered -= AC3_FRAME_SIZE; struct mp_audio *out = mp_audio_pool_get(af->out_pool, af->data, s->out_samples); if (!out) goto done; mp_audio_copy_attributes(out, s->pending); int frame_size = pkt.size; int header_len = 0; char hdr[8]; if (s->cfg_add_iec61937_header && pkt.size > 5) { int bsmod = pkt.data[5] & 0x7; int len = frame_size; frame_size = AC3_FRAME_SIZE * 2 * 2; header_len = 8; AV_WL16(hdr, 0xF872); // iec 61937 syncword 1 AV_WL16(hdr + 2, 0x4E1F); // iec 61937 syncword 2 hdr[5] = bsmod; // bsmod hdr[4] = 0x01; // data-type ac3 AV_WL16(hdr + 6, len << 3); // number of bits in payload } if (frame_size > out->samples * out->sstride) abort(); char *buf = (char *)out->planes[0]; memcpy(buf, hdr, header_len); memcpy(buf + header_len, pkt.data, pkt.size); memset(buf + header_len + pkt.size, 0, frame_size - (header_len + pkt.size)); swap_16((uint16_t *)(buf + header_len), pkt.size / 2); out->samples = frame_size / out->sstride; af_add_output_frame(af, out); err = 0; done: av_packet_unref(&pkt); av_frame_free(&frame); update_delay(af); return err; }
static int wsvqa_read_packet(AVFormatContext *s, AVPacket *pkt) { WsVqaDemuxContext *wsvqa = s->priv_data; AVIOContext *pb = s->pb; int ret = -1; unsigned char preamble[VQA_PREAMBLE_SIZE]; unsigned int chunk_type; unsigned int chunk_size; int skip_byte; while (avio_read(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) { chunk_type = AV_RB32(&preamble[0]); chunk_size = AV_RB32(&preamble[4]); skip_byte = chunk_size & 0x01; if ((chunk_type == SND0_TAG) || (chunk_type == SND1_TAG) || (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) { if (av_new_packet(pkt, chunk_size)) return AVERROR(EIO); ret = avio_read(pb, pkt->data, chunk_size); if (ret != chunk_size) { av_free_packet(pkt); return AVERROR(EIO); } switch (chunk_type) { case SND0_TAG: case SND1_TAG: case SND2_TAG: if (wsvqa->audio_stream_index == -1) { AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); wsvqa->audio_stream_index = st->index; if (!wsvqa->sample_rate) wsvqa->sample_rate = 22050; if (!wsvqa->channels) wsvqa->channels = 1; if (!wsvqa->bps) wsvqa->bps = 8; st->codec->sample_rate = wsvqa->sample_rate; st->codec->bits_per_coded_sample = wsvqa->bps; st->codec->channels = wsvqa->channels; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); switch (chunk_type) { case SND0_TAG: if (wsvqa->bps == 16) st->codec->codec_id = AV_CODEC_ID_PCM_S16LE; else st->codec->codec_id = AV_CODEC_ID_PCM_U8; break; case SND1_TAG: st->codec->codec_id = AV_CODEC_ID_WESTWOOD_SND1; break; case SND2_TAG: st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WS; st->codec->extradata_size = 2; st->codec->extradata = av_mallocz(2 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); AV_WL16(st->codec->extradata, wsvqa->version); break; } } pkt->stream_index = wsvqa->audio_stream_index; switch (chunk_type) { case SND1_TAG: /* unpacked size is stored in header */ pkt->duration = AV_RL16(pkt->data) / wsvqa->channels; break; case SND2_TAG: /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ pkt->duration = (chunk_size * 2) / wsvqa->channels; break; } break; case VQFR_TAG: pkt->stream_index = wsvqa->video_stream_index; pkt->duration = 1; break; } /* stay on 16-bit alignment */ if (skip_byte) avio_skip(pb, 1); return ret; } else { switch(chunk_type){ case CMDS_TAG: break; default: av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type); } avio_skip(pb, chunk_size + skip_byte); } } return ret; }
static int filter_slice(RotContext *rot, ThreadData *td, int job, int nb_jobs) { AVFrame *in = td->in; AVFrame *out = td->out; const int outw = td->outw, outh = td->outh; const int inw = td->inw, inh = td->inh; const int plane = td->plane; const int xi = td->xi, yi = td->yi; const int c = td->c, s = td->s; const int start = (outh * job ) / nb_jobs; const int end = (outh * (job+1)) / nb_jobs; int xprime = td->xprime + start * s; int yprime = td->yprime + start * c; int i, j, x, y; for (j = start; j < end; j++) { x = xprime + xi + FIXP*(inw-1)/2; y = yprime + yi + FIXP*(inh-1)/2; if (fabs(rot->angle - 0) < FLT_EPSILON && outw == inw && outh == inh) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + j * in->linesize[plane], in->linesize[plane], 0, rot->draw.pixelstep[plane], outw); } else if (fabs(rot->angle - M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + j * rot->draw.pixelstep[plane], in->linesize[plane], 1, rot->draw.pixelstep[plane], outw); } else if (fabs(rot->angle - M_PI) < FLT_EPSILON && outw == inw && outh == inh) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + (outh-j-1) * in->linesize[plane], in->linesize[plane], 2, rot->draw.pixelstep[plane], outw); } else if (fabs(rot->angle - 3*M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + (outh-j-1) * rot->draw.pixelstep[plane], in->linesize[plane], 3, rot->draw.pixelstep[plane], outw); } else { for (i = 0; i < outw; i++) { int32_t v; int x1, y1; uint8_t *pin, *pout; x1 = x>>16; y1 = y>>16; /* the out-of-range values avoid border artifacts */ if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) { uint8_t inp_inv[4]; /* interpolated input value */ pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane]; if (rot->use_bilinear) { pin = rot->interpolate_bilinear(inp_inv, in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane], x, y, inw-1, inh-1); } else { int x2 = av_clip(x1, 0, inw-1); int y2 = av_clip(y1, 0, inh-1); pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane]; } switch (rot->draw.pixelstep[plane]) { case 1: *pout = *pin; break; case 2: v = AV_RL16(pin); AV_WL16(pout, v); break; case 3: v = AV_RB24(pin); AV_WB24(pout, v); break; case 4: *((uint32_t *)pout) = *((uint32_t *)pin); break; default: memcpy(pout, pin, rot->draw.pixelstep[plane]); break; } } x += c; y -= s; } } xprime += s; yprime += c; } return 0; }
static int targa_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){ AVFrame *p = (AVFrame *)data; int bpp, picsize, datasize = -1; uint8_t *out; if(avctx->width > 0xffff || avctx->height > 0xffff) { av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n"); return AVERROR(EINVAL); } picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); if(buf_size < picsize + 45) { av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); return AVERROR(EINVAL); } p->pict_type= AV_PICTURE_TYPE_I; p->key_frame= 1; /* zero out the header and only set applicable fields */ memset(outbuf, 0, 12); AV_WL16(outbuf+12, avctx->width); AV_WL16(outbuf+14, avctx->height); /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */ outbuf[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0); switch(avctx->pix_fmt) { case PIX_FMT_GRAY8: outbuf[2] = TGA_BW; /* uncompressed grayscale image */ outbuf[16] = 8; /* bpp */ break; case PIX_FMT_RGB555LE: outbuf[2] = TGA_RGB; /* uncompresses true-color image */ outbuf[16] = 16; /* bpp */ break; case PIX_FMT_BGR24: outbuf[2] = TGA_RGB; /* uncompressed true-color image */ outbuf[16] = 24; /* bpp */ break; case PIX_FMT_BGRA: outbuf[2] = TGA_RGB; /* uncompressed true-color image */ outbuf[16] = 32; /* bpp */ break; default: av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n", av_get_pix_fmt_name(avctx->pix_fmt)); return AVERROR(EINVAL); } bpp = outbuf[16] >> 3; out = outbuf + 18; /* skip past the header we just output */ /* try RLE compression */ if (avctx->coder_type != FF_CODER_TYPE_RAW) datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height); /* if that worked well, mark the picture as RLE compressed */ if(datasize >= 0) outbuf[2] |= 8; /* if RLE didn't make it smaller, go back to no compression */ else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height); out += datasize; /* The standard recommends including this section, even if we don't use * any of the features it affords. TODO: take advantage of the pixel * aspect ratio and encoder ID fields available? */ memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26); return out + 26 - outbuf; }
static void peak_write_frame(AVFormatContext *s) { WAVMuxContext *wav = s->priv_data; AVCodecContext *enc = s->streams[0]->codec; int peak_of_peaks; int c; if (!wav->peak_output) return; for (c = 0; c < enc->channels; c++) { wav->peak_maxneg[c] = -wav->peak_maxneg[c]; if (wav->peak_bps == 2 && wav->peak_format == PEAK_FORMAT_UINT8) { wav->peak_maxpos[c] = wav->peak_maxpos[c] / 256; wav->peak_maxneg[c] = wav->peak_maxneg[c] / 256; } if (wav->peak_ppv == 1) wav->peak_maxpos[c] = FFMAX(wav->peak_maxpos[c], wav->peak_maxneg[c]); peak_of_peaks = FFMAX3(wav->peak_maxpos[c], wav->peak_maxneg[c], wav->peak_pop); if (peak_of_peaks > wav->peak_pop) wav->peak_pos_pop = wav->peak_num_frames; wav->peak_pop = peak_of_peaks; if (wav->peak_outbuf_size - wav->peak_outbuf_bytes < wav->peak_format * wav->peak_ppv) { wav->peak_outbuf_size += PEAK_BUFFER_SIZE; wav->peak_output = av_realloc(wav->peak_output, wav->peak_outbuf_size); if (!wav->peak_output) { av_log(s, AV_LOG_ERROR, "No memory for peak data\n"); return; } } if (wav->peak_format == PEAK_FORMAT_UINT8) { wav->peak_output[wav->peak_outbuf_bytes++] = wav->peak_maxpos[c]; if (wav->peak_ppv == 2) { wav->peak_output[wav->peak_outbuf_bytes++] = wav->peak_maxneg[c]; } } else { AV_WL16(wav->peak_output + wav->peak_outbuf_bytes, wav->peak_maxpos[c]); wav->peak_outbuf_bytes += 2; if (wav->peak_ppv == 2) { AV_WL16(wav->peak_output + wav->peak_outbuf_bytes, wav->peak_maxneg[c]); wav->peak_outbuf_bytes += 2; } } wav->peak_maxpos[c] = 0; wav->peak_maxneg[c] = 0; } wav->peak_num_frames++; }
static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, AVPacket *pkt) { int chunk_type; if (s->audio_chunk_offset && s->audio_channels && s->audio_bits) { if (s->audio_type == AV_CODEC_ID_NONE) { av_log(NULL, AV_LOG_ERROR, "Can not read audio packet before" "audio codec is known\n"); return CHUNK_BAD; } /* adjust for PCM audio by skipping chunk header */ if (s->audio_type != AV_CODEC_ID_INTERPLAY_DPCM) { s->audio_chunk_offset += 6; s->audio_chunk_size -= 6; } avio_seek(pb, s->audio_chunk_offset, SEEK_SET); s->audio_chunk_offset = 0; if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size)) return CHUNK_EOF; pkt->stream_index = s->audio_stream_index; pkt->pts = s->audio_frame_count; /* audio frame maintenance */ if (s->audio_type != AV_CODEC_ID_INTERPLAY_DPCM) s->audio_frame_count += (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8)); else s->audio_frame_count += (s->audio_chunk_size - 6 - s->audio_channels) / s->audio_channels; av_log(NULL, AV_LOG_TRACE, "sending audio frame with pts %"PRId64" (%d audio frames)\n", pkt->pts, s->audio_frame_count); chunk_type = CHUNK_VIDEO; } else if (s->decode_map_chunk_offset) { /* send both the decode map and the video data together */ if (av_new_packet(pkt, 2 + s->decode_map_chunk_size + s->video_chunk_size)) return CHUNK_NOMEM; if (s->has_palette) { uint8_t *pal; pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (pal) { memcpy(pal, s->palette, AVPALETTE_SIZE); s->has_palette = 0; } } if (s->changed) { ff_add_param_change(pkt, 0, 0, 0, s->video_width, s->video_height); s->changed = 0; } pkt->pos= s->decode_map_chunk_offset; avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET); s->decode_map_chunk_offset = 0; AV_WL16(pkt->data, s->decode_map_chunk_size); if (avio_read(pb, pkt->data + 2, s->decode_map_chunk_size) != s->decode_map_chunk_size) { av_free_packet(pkt); return CHUNK_EOF; } avio_seek(pb, s->video_chunk_offset, SEEK_SET); s->video_chunk_offset = 0; if (avio_read(pb, pkt->data + 2 + s->decode_map_chunk_size, s->video_chunk_size) != s->video_chunk_size) { av_free_packet(pkt); return CHUNK_EOF; } pkt->stream_index = s->video_stream_index; pkt->pts = s->video_pts; av_log(NULL, AV_LOG_TRACE, "sending video frame with pts %"PRId64"\n", pkt->pts); s->video_pts += s->frame_pts_inc; chunk_type = CHUNK_VIDEO; } else { avio_seek(pb, s->next_chunk_offset, SEEK_SET); chunk_type = CHUNK_DONE; } return chunk_type; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { SmackVContext * const smk = avctx->priv_data; uint8_t *out; uint32_t *pal; GetByteContext gb2; GetBitContext gb; int blocks, blk, bw, bh; int i, ret; int stride; int flags; if (avpkt->size <= 769) return AVERROR_INVALIDDATA; if ((ret = ff_reget_buffer(avctx, smk->pic)) < 0) return ret; /* make the palette available on the way out */ pal = (uint32_t*)smk->pic->data[1]; bytestream2_init(&gb2, avpkt->data, avpkt->size); flags = bytestream2_get_byteu(&gb2); smk->pic->palette_has_changed = flags & 1; smk->pic->key_frame = !!(flags & 2); if (smk->pic->key_frame) smk->pic->pict_type = AV_PICTURE_TYPE_I; else smk->pic->pict_type = AV_PICTURE_TYPE_P; for(i = 0; i < 256; i++) *pal++ = 0xFFU << 24 | bytestream2_get_be24u(&gb2); last_reset(smk->mmap_tbl, smk->mmap_last); last_reset(smk->mclr_tbl, smk->mclr_last); last_reset(smk->full_tbl, smk->full_last); last_reset(smk->type_tbl, smk->type_last); if ((ret = init_get_bits8(&gb, avpkt->data + 769, avpkt->size - 769)) < 0) return ret; blk = 0; bw = avctx->width >> 2; bh = avctx->height >> 2; blocks = bw * bh; stride = smk->pic->linesize[0]; while(blk < blocks) { int type, run, mode; uint16_t pix; type = smk_get_code(&gb, smk->type_tbl, smk->type_last); run = block_runs[(type >> 2) & 0x3F]; switch(type & 3){ case SMK_BLK_MONO: while(run-- && blk < blocks){ int clr, map; int hi, lo; clr = smk_get_code(&gb, smk->mclr_tbl, smk->mclr_last); map = smk_get_code(&gb, smk->mmap_tbl, smk->mmap_last); out = smk->pic->data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4; hi = clr >> 8; lo = clr & 0xFF; for(i = 0; i < 4; i++) { if(map & 1) out[0] = hi; else out[0] = lo; if(map & 2) out[1] = hi; else out[1] = lo; if(map & 4) out[2] = hi; else out[2] = lo; if(map & 8) out[3] = hi; else out[3] = lo; map >>= 4; out += stride; } blk++; } break; case SMK_BLK_FULL: mode = 0; if(avctx->codec_tag == MKTAG('S', 'M', 'K', '4')) { // In case of Smacker v4 we have three modes if(get_bits1(&gb)) mode = 1; else if(get_bits1(&gb)) mode = 2; } while(run-- && blk < blocks){ out = smk->pic->data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4; switch(mode){ case 0: for(i = 0; i < 4; i++) { pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); AV_WL16(out+2,pix); pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); AV_WL16(out,pix); out += stride; } break; case 1: pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; break; case 2: for(i = 0; i < 2; i++) { uint16_t pix1, pix2; pix2 = smk_get_code(&gb, smk->full_tbl, smk->full_last); pix1 = smk_get_code(&gb, smk->full_tbl, smk->full_last); AV_WL16(out,pix1); AV_WL16(out+2,pix2); out += stride; AV_WL16(out,pix1); AV_WL16(out+2,pix2); out += stride; } break; } blk++; } break; case SMK_BLK_SKIP: while(run-- && blk < blocks) blk++; break; case SMK_BLK_FILL: mode = type >> 8; while(run-- && blk < blocks){ uint32_t col; out = smk->pic->data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4; col = mode * 0x01010101; for(i = 0; i < 4; i++) { *((uint32_t*)out) = col; out += stride; } blk++; } break; } } if ((ret = av_frame_ref(data, smk->pic)) < 0) return ret; *got_frame = 1; /* always report that the buffer was completely consumed */ return avpkt->size; }
static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ BMPContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize; const uint32_t *pal = NULL; int pad_bytes_per_row, bit_count, pal_entries = 0, compression = BMP_RGB; uint8_t *ptr; unsigned char* buf0 = buf; *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; switch (avctx->pix_fmt) { case PIX_FMT_BGR24: bit_count = 24; break; case PIX_FMT_RGB555: bit_count = 16; break; case PIX_FMT_RGB565: bit_count = 16; compression = BMP_BITFIELDS; pal = rgb565_masks; // abuse pal to hold color masks pal_entries = 3; break; case PIX_FMT_RGB8: case PIX_FMT_BGR8: case PIX_FMT_RGB4_BYTE: case PIX_FMT_BGR4_BYTE: case PIX_FMT_GRAY8: case PIX_FMT_PAL8: bit_count = 8; pal = (uint32_t *)p->data[1]; break; case PIX_FMT_MONOBLACK: bit_count = 1; pal = monoblack_pal; break; default: return -1; } if (pal && !pal_entries) pal_entries = 1 << bit_count; n_bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL; pad_bytes_per_row = (4 - n_bytes_per_row) & 3; n_bytes_image = avctx->height * (n_bytes_per_row + pad_bytes_per_row); // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER // and related pages. #define SIZE_BITMAPFILEHEADER 14 #define SIZE_BITMAPINFOHEADER 40 hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER + (pal_entries << 2); n_bytes = n_bytes_image + hsize; if(n_bytes>buf_size) { av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", n_bytes, buf_size); return -1; } bytestream_put_byte(&buf, 'B'); // BITMAPFILEHEADER.bfType bytestream_put_byte(&buf, 'M'); // do. bytestream_put_le32(&buf, n_bytes); // BITMAPFILEHEADER.bfSize bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved1 bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved2 bytestream_put_le32(&buf, hsize); // BITMAPFILEHEADER.bfOffBits bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize bytestream_put_le32(&buf, avctx->width); // BITMAPINFOHEADER.biWidth bytestream_put_le32(&buf, avctx->height); // BITMAPINFOHEADER.biHeight bytestream_put_le16(&buf, 1); // BITMAPINFOHEADER.biPlanes bytestream_put_le16(&buf, bit_count); // BITMAPINFOHEADER.biBitCount bytestream_put_le32(&buf, compression); // BITMAPINFOHEADER.biCompression bytestream_put_le32(&buf, n_bytes_image); // BITMAPINFOHEADER.biSizeImage bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biXPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biYPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrUsed bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrImportant for (i = 0; i < pal_entries; i++) bytestream_put_le32(&buf, pal[i] & 0xFFFFFF); // BMP files are bottom-to-top so we start from the end... ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; buf = buf0 + hsize; for(i = 0; i < avctx->height; i++) { if (bit_count == 16) { const uint16_t *src = (const uint16_t *) ptr; uint16_t *dst = (uint16_t *) buf; for(n = 0; n < avctx->width; n++) AV_WL16(dst + n, src[n]); } else { memcpy(buf, ptr, n_bytes_per_row); } buf += n_bytes_per_row; memset(buf, 0, pad_bytes_per_row); buf += pad_bytes_per_row; ptr -= p->linesize[0]; // ... and go back } return n_bytes; }
static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { BMPContext *s = avctx->priv_data; AVFrame * const p = &s->picture; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret; const uint32_t *pal = NULL; uint32_t palette256[256]; int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB; int bit_count = avctx->bits_per_coded_sample; uint8_t *ptr, *buf; *p = *pict; p->pict_type= AV_PICTURE_TYPE_I; p->key_frame= 1; switch (avctx->pix_fmt) { case AV_PIX_FMT_RGB444: compression = BMP_BITFIELDS; pal = rgb444_masks; // abuse pal to hold color masks pal_entries = 3; break; case AV_PIX_FMT_RGB565: compression = BMP_BITFIELDS; pal = rgb565_masks; // abuse pal to hold color masks pal_entries = 3; break; case AV_PIX_FMT_RGB8: case AV_PIX_FMT_BGR8: case AV_PIX_FMT_RGB4_BYTE: case AV_PIX_FMT_BGR4_BYTE: case AV_PIX_FMT_GRAY8: av_assert1(bit_count == 8); ff_set_systematic_pal2(palette256, avctx->pix_fmt); pal = palette256; break; case AV_PIX_FMT_PAL8: pal = (uint32_t *)p->data[1]; break; case AV_PIX_FMT_MONOBLACK: pal = monoblack_pal; break; } if (pal && !pal_entries) pal_entries = 1 << bit_count; n_bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL; pad_bytes_per_row = (4 - n_bytes_per_row) & 3; n_bytes_image = avctx->height * (n_bytes_per_row + pad_bytes_per_row); // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER // and related pages. #define SIZE_BITMAPFILEHEADER 14 #define SIZE_BITMAPINFOHEADER 40 hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER + (pal_entries << 2); n_bytes = n_bytes_image + hsize; if ((ret = ff_alloc_packet2(avctx, pkt, n_bytes)) < 0) return ret; buf = pkt->data; bytestream_put_byte(&buf, 'B'); // BITMAPFILEHEADER.bfType bytestream_put_byte(&buf, 'M'); // do. bytestream_put_le32(&buf, n_bytes); // BITMAPFILEHEADER.bfSize bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved1 bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved2 bytestream_put_le32(&buf, hsize); // BITMAPFILEHEADER.bfOffBits bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize bytestream_put_le32(&buf, avctx->width); // BITMAPINFOHEADER.biWidth bytestream_put_le32(&buf, avctx->height); // BITMAPINFOHEADER.biHeight bytestream_put_le16(&buf, 1); // BITMAPINFOHEADER.biPlanes bytestream_put_le16(&buf, bit_count); // BITMAPINFOHEADER.biBitCount bytestream_put_le32(&buf, compression); // BITMAPINFOHEADER.biCompression bytestream_put_le32(&buf, n_bytes_image); // BITMAPINFOHEADER.biSizeImage bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biXPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biYPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrUsed bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrImportant for (i = 0; i < pal_entries; i++) bytestream_put_le32(&buf, pal[i] & 0xFFFFFF); // BMP files are bottom-to-top so we start from the end... ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; buf = pkt->data + hsize; for(i = 0; i < avctx->height; i++) { if (bit_count == 16) { const uint16_t *src = (const uint16_t *) ptr; uint16_t *dst = (uint16_t *) buf; for(n = 0; n < avctx->width; n++) AV_WL16(dst + n, src[n]); } else { memcpy(buf, ptr, n_bytes_per_row); } buf += n_bytes_per_row; memset(buf, 0, pad_bytes_per_row); buf += pad_bytes_per_row; ptr -= p->linesize[0]; // ... and go back } pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
static int filter_out(struct af_instance *af) { af_ac3enc_t *s = af->priv; if (!fill_buffer(af)) return 0; // need more input AVFrame *frame = av_frame_alloc(); if (!frame) { MP_FATAL(af, "Could not allocate memory \n"); return -1; } frame->nb_samples = s->in_samples; frame->format = s->lavc_actx->sample_fmt; frame->channel_layout = s->lavc_actx->channel_layout; assert(s->input->num_planes <= AV_NUM_DATA_POINTERS); frame->extended_data = frame->data; for (int n = 0; n < s->input->num_planes; n++) frame->data[n] = s->input->planes[n]; frame->linesize[0] = s->input->samples * s->input->sstride; int ok; int lavc_ret = avcodec_encode_audio2(s->lavc_actx, &s->pkt, frame, &ok); av_frame_free(&frame); s->input->samples = 0; if (lavc_ret < 0 || !ok) { MP_FATAL(af, "Encode failed.\n"); return -1; } MP_DBG(af, "avcodec_encode_audio got %d, pending %d.\n", s->pkt.size, s->pending->samples); struct mp_audio *out = mp_audio_pool_get(af->out_pool, af->data, s->out_samples); if (!out) return -1; mp_audio_copy_attributes(out, s->pending); int frame_size = s->pkt.size; int header_len = 0; char hdr[8]; if (s->cfg_add_iec61937_header && s->pkt.size > 5) { int bsmod = s->pkt.data[5] & 0x7; int len = frame_size; frame_size = AC3_FRAME_SIZE * 2 * 2; header_len = 8; AV_WL16(hdr, 0xF872); // iec 61937 syncword 1 AV_WL16(hdr + 2, 0x4E1F); // iec 61937 syncword 2 hdr[5] = bsmod; // bsmod hdr[4] = 0x01; // data-type ac3 AV_WL16(hdr + 6, len << 3); // number of bits in payload } if (frame_size > out->samples * out->sstride) abort(); char *buf = (char *)out->planes[0]; memcpy(buf, hdr, header_len); memcpy(buf + header_len, s->pkt.data, s->pkt.size); memset(buf + header_len + s->pkt.size, 0, frame_size - (header_len + s->pkt.size)); swap_16((uint16_t *)(buf + header_len), s->pkt.size / 2); out->samples = frame_size / out->sstride; af_add_output_frame(af, out); update_delay(af); return 0; }
static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap) { AVIOContext *pb = s->pb; APEContext *ape = s->priv_data; AVStream *st; uint32_t tag; int i; int total_blocks; int64_t pts; /* Skip any leading junk such as id3v2 tags */ ape->junklength = avio_tell(pb); tag = avio_rl32(pb); if (tag != MKTAG('M', 'A', 'C', ' ')) return -1; ape->fileversion = avio_rl16(pb); if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) { av_log(s, AV_LOG_ERROR, "Unsupported file version - %"PRId16".%02"PRId16"\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10); return -1; } if (ape->fileversion >= 3980) { ape->padding1 = avio_rl16(pb); ape->descriptorlength = avio_rl32(pb); ape->headerlength = avio_rl32(pb); ape->seektablelength = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->audiodatalength = avio_rl32(pb); ape->audiodatalength_high = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); avio_read(pb, ape->md5, 16); /* Skip any unknown bytes at the end of the descriptor. This is for future compatibility */ if (ape->descriptorlength > 52) avio_skip(pb, ape->descriptorlength - 52); /* Read header data */ ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->blocksperframe = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->bps = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); } else { ape->descriptorlength = 0; ape->headerlength = 32; ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) { avio_skip(pb, 4); /* Skip the peak level */ ape->headerlength += 4; } if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) { ape->seektablelength = avio_rl32(pb); ape->headerlength += 4; ape->seektablelength *= sizeof(int32_t); } else ape->seektablelength = ape->totalframes * sizeof(int32_t); if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT) ape->bps = 8; else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT) ape->bps = 24; else ape->bps = 16; if (ape->fileversion >= 3950) ape->blocksperframe = 73728 * 4; else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000)) ape->blocksperframe = 73728; else ape->blocksperframe = 9216; /* Skip any stored wav header */ if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER)) avio_skip(pb, ape->wavheaderlength); } if(!ape->totalframes){ av_log(s, AV_LOG_ERROR, "No frames in the file!\n"); return AVERROR(EINVAL); } if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){ av_log(s, AV_LOG_ERROR, "Too many frames: %"PRIu32"\n", ape->totalframes); return -1; } if (ape->seektablelength && (ape->seektablelength / sizeof(*ape->seektable)) < ape->totalframes) { av_log(s, AV_LOG_ERROR, "Number of seek entries is less than number of frames: %ld vs. %"PRIu32"\n", ape->seektablelength / sizeof(*ape->seektable), ape->totalframes); return AVERROR_INVALIDDATA; } ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame)); if(!ape->frames) return AVERROR(ENOMEM); ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength; ape->currentframe = 0; ape->totalsamples = ape->finalframeblocks; if (ape->totalframes > 1) ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1); if (ape->seektablelength > 0) { ape->seektable = av_malloc(ape->seektablelength); for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++) ape->seektable[i] = avio_rl32(pb); } ape->frames[0].pos = ape->firstframe; ape->frames[0].nblocks = ape->blocksperframe; ape->frames[0].skip = 0; for (i = 1; i < ape->totalframes; i++) { ape->frames[i].pos = ape->seektable[i] + ape->junklength; ape->frames[i].nblocks = ape->blocksperframe; ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos; ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3; } ape->frames[ape->totalframes - 1].size = ape->finalframeblocks * 4; ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks; for (i = 0; i < ape->totalframes; i++) { if(ape->frames[i].skip){ ape->frames[i].pos -= ape->frames[i].skip; ape->frames[i].size += ape->frames[i].skip; } ape->frames[i].size = (ape->frames[i].size + 3) & ~3; } ape_dumpinfo(s, ape); /* try to read APE tags */ if (pb->seekable) { ff_ape_parse_tag(s); avio_seek(pb, 0, SEEK_SET); } av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %"PRIu16"\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype); /* now we are ready: build format streams */ st = av_new_stream(s, 0); if (!st) return -1; total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_APE; st->codec->codec_tag = MKTAG('A', 'P', 'E', ' '); st->codec->channels = ape->channels; st->codec->sample_rate = ape->samplerate; st->codec->bits_per_coded_sample = ape->bps; st->codec->frame_size = MAC_SUBFRAME_SIZE; st->nb_frames = ape->totalframes; st->start_time = 0; st->duration = total_blocks / MAC_SUBFRAME_SIZE; av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate); st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE); st->codec->extradata_size = APE_EXTRADATA_SIZE; AV_WL16(st->codec->extradata + 0, ape->fileversion); AV_WL16(st->codec->extradata + 2, ape->compressiontype); AV_WL16(st->codec->extradata + 4, ape->formatflags); pts = 0; for (i = 0; i < ape->totalframes; i++) { ape->frames[i].pts = pts; av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME); pts += ape->blocksperframe / MAC_SUBFRAME_SIZE; } return 0; }
static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet) { int bpp, picsize, datasize = -1, ret, i; uint8_t *out; if(avctx->width > 0xffff || avctx->height > 0xffff) { av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n"); return AVERROR(EINVAL); } picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); if ((ret = ff_alloc_packet2(avctx, pkt, picsize + 45)) < 0) return ret; /* zero out the header and only set applicable fields */ memset(pkt->data, 0, 12); AV_WL16(pkt->data+12, avctx->width); AV_WL16(pkt->data+14, avctx->height); /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */ pkt->data[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0); out = pkt->data + 18; /* skip past the header we write */ avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]); switch(avctx->pix_fmt) { case PIX_FMT_PAL8: pkt->data[1] = 1; /* palette present */ pkt->data[2] = TGA_PAL; /* uncompressed palettised image */ pkt->data[6] = 1; /* palette contains 256 entries */ pkt->data[7] = 24; /* palette contains 24 bit entries */ pkt->data[16] = 8; /* bpp */ for (i = 0; i < 256; i++) AV_WL24(pkt->data + 18 + 3 * i, *(uint32_t *)(p->data[1] + i * 4)); out += 256 * 3; /* skip past the palette we just output */ break; case PIX_FMT_GRAY8: pkt->data[2] = TGA_BW; /* uncompressed grayscale image */ avctx->bits_per_coded_sample = 0x28; pkt->data[16] = 8; /* bpp */ break; case PIX_FMT_RGB555LE: pkt->data[2] = TGA_RGB; /* uncompresses true-color image */ avctx->bits_per_coded_sample = pkt->data[16] = 16; /* bpp */ break; case PIX_FMT_BGR24: pkt->data[2] = TGA_RGB; /* uncompressed true-color image */ pkt->data[16] = 24; /* bpp */ break; case PIX_FMT_BGRA: pkt->data[2] = TGA_RGB; /* uncompressed true-color image */ pkt->data[16] = 32; /* bpp */ break; default: av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n", av_get_pix_fmt_name(avctx->pix_fmt)); return AVERROR(EINVAL); } bpp = pkt->data[16] >> 3; /* try RLE compression */ if (avctx->coder_type != FF_CODER_TYPE_RAW) datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height); /* if that worked well, mark the picture as RLE compressed */ if(datasize >= 0) pkt->data[2] |= 8; /* if RLE didn't make it smaller, go back to no compression */ else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height); out += datasize; /* The standard recommends including this section, even if we don't use * any of the features it affords. TODO: take advantage of the pixel * aspect ratio and encoder ID fields available? */ memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26); pkt->size = out + 26 - pkt->data; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { SmackVContext * const smk = avctx->priv_data; uint8_t *out; uint32_t *pal; GetBitContext gb; int blocks, blk, bw, bh; int i; int stride; if(buf_size <= 769) return 0; if(smk->pic.data[0]) avctx->release_buffer(avctx, &smk->pic); smk->pic.reference = 1; smk->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if(avctx->reget_buffer(avctx, &smk->pic) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } /* make the palette available on the way out */ pal = (uint32_t*)smk->pic.data[1]; smk->pic.palette_has_changed = buf[0] & 1; smk->pic.key_frame = !!(buf[0] & 2); if(smk->pic.key_frame) smk->pic.pict_type = FF_I_TYPE; else smk->pic.pict_type = FF_P_TYPE; buf++; for(i = 0; i < 256; i++) *pal++ = bytestream_get_be24(&buf); buf_size -= 769; last_reset(smk->mmap_tbl, smk->mmap_last); last_reset(smk->mclr_tbl, smk->mclr_last); last_reset(smk->full_tbl, smk->full_last); last_reset(smk->type_tbl, smk->type_last); init_get_bits(&gb, buf, buf_size * 8); blk = 0; bw = avctx->width >> 2; bh = avctx->height >> 2; blocks = bw * bh; out = smk->pic.data[0]; stride = smk->pic.linesize[0]; while(blk < blocks) { int type, run, mode; uint16_t pix; type = smk_get_code(&gb, smk->type_tbl, smk->type_last); run = block_runs[(type >> 2) & 0x3F]; switch(type & 3){ case SMK_BLK_MONO: while(run-- && blk < blocks){ int clr, map; int hi, lo; clr = smk_get_code(&gb, smk->mclr_tbl, smk->mclr_last); map = smk_get_code(&gb, smk->mmap_tbl, smk->mmap_last); out = smk->pic.data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4; hi = clr >> 8; lo = clr & 0xFF; for(i = 0; i < 4; i++) { if(map & 1) out[0] = hi; else out[0] = lo; if(map & 2) out[1] = hi; else out[1] = lo; if(map & 4) out[2] = hi; else out[2] = lo; if(map & 8) out[3] = hi; else out[3] = lo; map >>= 4; out += stride; } blk++; } break; case SMK_BLK_FULL: mode = 0; if(avctx->codec_tag == MKTAG('S', 'M', 'K', '4')) { // In case of Smacker v4 we have three modes if(get_bits1(&gb)) mode = 1; else if(get_bits1(&gb)) mode = 2; } while(run-- && blk < blocks){ out = smk->pic.data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4; switch(mode){ case 0: for(i = 0; i < 4; i++) { pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); AV_WL16(out+2,pix); pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); AV_WL16(out,pix); out += stride; } break; case 1: pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; pix = smk_get_code(&gb, smk->full_tbl, smk->full_last); out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; out[0] = out[1] = pix & 0xFF; out[2] = out[3] = pix >> 8; out += stride; break; case 2: for(i = 0; i < 2; i++) { uint16_t pix1, pix2; pix2 = smk_get_code(&gb, smk->full_tbl, smk->full_last); pix1 = smk_get_code(&gb, smk->full_tbl, smk->full_last); AV_WL16(out,pix1); AV_WL16(out+2,pix2); out += stride; AV_WL16(out,pix1); AV_WL16(out+2,pix2); out += stride; } break; } blk++; } break; case SMK_BLK_SKIP: while(run-- && blk < blocks) blk++; break; case SMK_BLK_FILL: mode = type >> 8; while(run-- && blk < blocks){ uint32_t col; out = smk->pic.data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4; col = mode * 0x01010101; for(i = 0; i < 4; i++) { *((uint32_t*)out) = col; out += stride; } blk++; } break; } } *data_size = sizeof(AVFrame); *(AVFrame*)data = smk->pic; /* always report that the buffer was completely consumed */ return buf_size; }
static int genh_read_header(AVFormatContext *s) { unsigned start_offset, header_size, codec, coef_type, coef[2]; GENHDemuxContext *c = s->priv_data; av_unused unsigned coef_splitted[2]; int align, ch, ret; AVStream *st; avio_skip(s->pb, 4); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->channels = avio_rl32(s->pb); if (st->codec->channels <= 0) return AVERROR_INVALIDDATA; if (st->codec->channels == 1) st->codec->channel_layout = AV_CH_LAYOUT_MONO; else if (st->codec->channels == 2) st->codec->channel_layout = AV_CH_LAYOUT_STEREO; align = c->interleave_size = avio_rl32(s->pb); if (align < 0 || align > INT_MAX / st->codec->channels) return AVERROR_INVALIDDATA; st->codec->block_align = align * st->codec->channels; st->codec->sample_rate = avio_rl32(s->pb); avio_skip(s->pb, 4); st->duration = avio_rl32(s->pb); codec = avio_rl32(s->pb); switch (codec) { case 0: st->codec->codec_id = AV_CODEC_ID_ADPCM_PSX; break; case 1: case 11: st->codec->bits_per_coded_sample = 4; st->codec->block_align = 36 * st->codec->channels; st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WAV; break; case 2: st->codec->codec_id = AV_CODEC_ID_ADPCM_DTK; break; case 3: st->codec->codec_id = st->codec->block_align > 0 ? AV_CODEC_ID_PCM_S16BE_PLANAR : AV_CODEC_ID_PCM_S16BE; break; case 4: st->codec->codec_id = st->codec->block_align > 0 ? AV_CODEC_ID_PCM_S16LE_PLANAR : AV_CODEC_ID_PCM_S16LE; break; case 5: st->codec->codec_id = st->codec->block_align > 0 ? AV_CODEC_ID_PCM_S8_PLANAR : AV_CODEC_ID_PCM_S8; break; case 6: st->codec->codec_id = AV_CODEC_ID_SDX2_DPCM; break; case 7: ret = ff_alloc_extradata(st->codec, 2); if (ret < 0) return ret; AV_WL16(st->codec->extradata, 3); st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WS; break; case 10: st->codec->codec_id = AV_CODEC_ID_ADPCM_AICA; break; case 12: st->codec->codec_id = AV_CODEC_ID_ADPCM_THP; break; case 13: st->codec->codec_id = AV_CODEC_ID_PCM_U8; break; case 17: st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_QT; break; default: avpriv_request_sample(s, "codec %d", codec); return AVERROR_PATCHWELCOME; } start_offset = avio_rl32(s->pb); header_size = avio_rl32(s->pb); if (header_size > start_offset) return AVERROR_INVALIDDATA; if (header_size == 0) start_offset = 0x800; coef[0] = avio_rl32(s->pb); coef[1] = avio_rl32(s->pb); c->dsp_int_type = avio_rl32(s->pb); coef_type = avio_rl32(s->pb); coef_splitted[0] = avio_rl32(s->pb); coef_splitted[1] = avio_rl32(s->pb); if (st->codec->codec_id == AV_CODEC_ID_ADPCM_THP) { if (st->codec->channels > 2) { avpriv_request_sample(s, "channels %d>2", st->codec->channels); return AVERROR_PATCHWELCOME; } ff_alloc_extradata(st->codec, 32 * st->codec->channels); for (ch = 0; ch < st->codec->channels; ch++) { if (coef_type & 1) { avpriv_request_sample(s, "coef_type & 1"); return AVERROR_PATCHWELCOME; } else { avio_seek(s->pb, coef[ch], SEEK_SET); avio_read(s->pb, st->codec->extradata + 32 * ch, 32); } } if (c->dsp_int_type == 1) { st->codec->block_align = 8 * st->codec->channels; if (c->interleave_size != 1 && c->interleave_size != 2 && c->interleave_size != 4) return AVERROR_INVALIDDATA; } } avio_skip(s->pb, start_offset - avio_tell(s->pb)); avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); return 0; }