static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb, RMDemuxContext *rm, RMStream *vst, AVPacket *pkt, int len, int *pseq, int64_t *timestamp) { int hdr, seq, pic_num, len2, pos; int type; hdr = avio_r8(pb); len--; type = hdr >> 6; if(type != 3){ // not frame as a part of packet seq = avio_r8(pb); len--; } if(type != 1){ // not whole frame len2 = get_num(pb, &len); pos = get_num(pb, &len); pic_num = avio_r8(pb); len--; } if(len<0) return -1; rm->remaining_len = len; if(type&1){ // frame, not slice if(type == 3){ // frame as a part of packet len= len2; *timestamp = pos; } if(rm->remaining_len < len) return -1; rm->remaining_len -= len; if(av_new_packet(pkt, len + 9) < 0) return AVERROR(EIO); pkt->data[0] = 0; AV_WL32(pkt->data + 1, 1); AV_WL32(pkt->data + 5, 0); avio_read(pb, pkt->data + 9, len); return 0; } //now we have to deal with single slice *pseq = seq; if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){ vst->slices = ((hdr & 0x3F) << 1) + 1; vst->videobufsize = len2 + 8*vst->slices + 1; av_free_packet(&vst->pkt); //FIXME this should be output. if(av_new_packet(&vst->pkt, vst->videobufsize) < 0) return AVERROR(ENOMEM); vst->videobufpos = 8*vst->slices + 1; vst->cur_slice = 0; vst->curpic_num = pic_num; vst->pktpos = avio_tell(pb); } if(type == 2) len = FFMIN(len, pos); if(++vst->cur_slice > vst->slices) return 1; AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1); AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1); if(vst->videobufpos + len > vst->videobufsize) return 1; if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len) return AVERROR(EIO); vst->videobufpos += len; rm->remaining_len-= len; if (type == 2 || vst->videobufpos == vst->videobufsize) { vst->pkt.data[0] = vst->cur_slice-1; *pkt= vst->pkt; vst->pkt.data= NULL; vst->pkt.size= 0; vst->pkt.buf = NULL; #if FF_API_DESTRUCT_PACKET vst->pkt.destruct = NULL; #endif if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices, vst->videobufpos - 1 - 8*vst->slices); pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices); pkt->pts = AV_NOPTS_VALUE; pkt->pos = vst->pktpos; vst->slices = 0; return 0; } return 1; }
static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *ast, int read_all) { char buf[256]; uint32_t version; int ret; /* ra type header */ version = avio_rb16(pb); /* version */ if (version == 3) { unsigned bytes_per_minute; int header_size = avio_rb16(pb); int64_t startpos = avio_tell(pb); avio_skip(pb, 8); bytes_per_minute = avio_rb16(pb); avio_skip(pb, 4); rm_read_metadata(s, pb, 0); if ((startpos + header_size) >= avio_tell(pb) + 2) { // fourcc (should always be "lpcJ") avio_r8(pb); get_str8(pb, buf, sizeof(buf)); } // Skip extra header crap (this should never happen) if ((startpos + header_size) > avio_tell(pb)) avio_skip(pb, header_size + startpos - avio_tell(pb)); if (bytes_per_minute) st->codec->bit_rate = 8LL * bytes_per_minute / 60; st->codec->sample_rate = 8000; st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_RA_144; ast->deint_id = DEINT_ID_INT0; } else { int flavor, sub_packet_h, coded_framesize, sub_packet_size; int codecdata_length; unsigned bytes_per_minute; /* old version (4) */ avio_skip(pb, 2); /* unused */ avio_rb32(pb); /* .ra4 */ avio_rb32(pb); /* data size */ avio_rb16(pb); /* version2 */ avio_rb32(pb); /* header size */ flavor= avio_rb16(pb); /* add codec info / flavor */ ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */ avio_rb32(pb); /* ??? */ bytes_per_minute = avio_rb32(pb); if (version == 4) { if (bytes_per_minute) st->codec->bit_rate = 8LL * bytes_per_minute / 60; } avio_rb32(pb); /* ??? */ ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */ st->codec->block_align= avio_rb16(pb); /* frame size */ ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */ avio_rb16(pb); /* ??? */ if (version == 5) { avio_rb16(pb); avio_rb16(pb); avio_rb16(pb); } st->codec->sample_rate = avio_rb16(pb); avio_rb32(pb); st->codec->channels = avio_rb16(pb); if (version == 5) { ast->deint_id = avio_rl32(pb); avio_read(pb, buf, 4); buf[4] = 0; } else { AV_WL32(buf, 0); get_str8(pb, buf, sizeof(buf)); /* desc */ ast->deint_id = AV_RL32(buf); get_str8(pb, buf, sizeof(buf)); /* desc */ } st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = AV_RL32(buf); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); switch (st->codec->codec_id) { case AV_CODEC_ID_AC3: st->need_parsing = AVSTREAM_PARSE_FULL; break; case AV_CODEC_ID_RA_288: st->codec->extradata_size= 0; ast->audio_framesize = st->codec->block_align; st->codec->block_align = coded_framesize; break; case AV_CODEC_ID_COOK: st->need_parsing = AVSTREAM_PARSE_HEADERS; case AV_CODEC_ID_ATRAC3: case AV_CODEC_ID_SIPR: if (read_all) { codecdata_length = 0; } else { avio_rb16(pb); avio_r8(pb); if (version == 5) avio_r8(pb); codecdata_length = avio_rb32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); return -1; } } ast->audio_framesize = st->codec->block_align; if (st->codec->codec_id == AV_CODEC_ID_SIPR) { if (flavor > 3) { av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n", flavor); return -1; } st->codec->block_align = ff_sipr_subpk_size[flavor]; } else { if(sub_packet_size <= 0){ av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n"); return -1; } st->codec->block_align = ast->sub_packet_size; } if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0) return ret; break; case AV_CODEC_ID_AAC: avio_rb16(pb); avio_r8(pb); if (version == 5) avio_r8(pb); codecdata_length = avio_rb32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); return -1; } if (codecdata_length >= 1) { avio_r8(pb); if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0) return ret; } break; } switch (ast->deint_id) { case DEINT_ID_INT4: if (ast->coded_framesize > ast->audio_framesize || sub_packet_h <= 1 || ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize) return AVERROR_INVALIDDATA; if (ast->coded_framesize * sub_packet_h != 2*ast->audio_framesize) { avpriv_request_sample(s, "mismatching interleaver parameters"); return AVERROR_INVALIDDATA; } break; case DEINT_ID_GENR: if (ast->sub_packet_size <= 0 || ast->sub_packet_size > ast->audio_framesize) return AVERROR_INVALIDDATA; if (ast->audio_framesize % ast->sub_packet_size) return AVERROR_INVALIDDATA; break; case DEINT_ID_SIPR: case DEINT_ID_INT0: case DEINT_ID_VBRS: case DEINT_ID_VBRF: break; default: av_log(s, AV_LOG_ERROR ,"Unknown interleaver %"PRIX32"\n", ast->deint_id); return AVERROR_INVALIDDATA; } if (ast->deint_id == DEINT_ID_INT4 || ast->deint_id == DEINT_ID_GENR || ast->deint_id == DEINT_ID_SIPR) { if (st->codec->block_align <= 0 || ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX || ast->audio_framesize * sub_packet_h < st->codec->block_align) return AVERROR_INVALIDDATA; if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0) return AVERROR(ENOMEM); } if (read_all) { avio_r8(pb); avio_r8(pb); avio_r8(pb); rm_read_metadata(s, pb, 0); } } return 0; }
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoDemuxData *s = s1->priv_data; char filename[1024]; int i, res; int size[3] = { 0 }, ret[3] = { 0 }; AVIOContext *f[3] = { NULL }; AVCodecContext *codec = s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s->loop && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (av_get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0 && s->img_number > 1) return AVERROR(EIO); for (i = 0; i < 3; i++) { if (avio_open2(&f[i], filename, AVIO_FLAG_READ, &s1->interrupt_callback, NULL) < 0) { if (i >= 1) break; av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n", filename); return AVERROR(EIO); } size[i] = avio_size(f[i]); if (codec->codec_id != AV_CODEC_ID_RAWVIDEO) break; filename[strlen(filename) - 1] = 'U' + i; } if (codec->codec_id == AV_CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (f[0]->eof_reached) return AVERROR(EIO); size[0] = 4096; } res = av_new_packet(pkt, size[0] + size[1] + size[2]); if (res < 0) return res; pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; pkt->size = 0; for (i = 0; i < 3; i++) { if (f[i]) { ret[i] = avio_read(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) avio_close(f[i]); if (ret[i] > 0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1] < 0 || ret[2] < 0) { av_packet_unref(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }
bool AudioDecoder::decode(const QByteArray &encoded) { if (!isAvailable()) return false; DPTR_D(AudioDecoder); AVPacket packet; av_new_packet(&packet, encoded.size()); memcpy(packet.data, encoded.data(), encoded.size()); //TODO: use AVPacket directly instead of Packet? int ret = avcodec_decode_audio4(d.codec_ctx, d.frame, &d.got_frame_ptr, &packet); av_free_packet(&packet); if (ret == AVERROR(EAGAIN)) { return false; } if (ret < 0) { qWarning("[AudioDecoder] %s", av_err2str(ret)); return false; } if (!d.got_frame_ptr) { qWarning("[AudioDecoder] got_frame_ptr=false"); return false; } #if !QTAV_HAVE(SWRESAMPLE) && !QTAV_HAVE(AVRESAMPLE) int samples_with_channels = d.frame->nb_samples * d.codec_ctx->channels; int samples_with_channels_half = samples_with_channels/2; d.decoded.resize(samples_with_channels * sizeof(float)); float *decoded_data = (float*)d.decoded.data(); static const float kInt8_inv = 1.0f/128.0f; static const float kInt16_inv = 1.0f/32768.0f; static const float kInt32_inv = 1.0f/2147483648.0f; //TODO: hwa //https://code.google.com/p/lavfilters/source/browse/decoder/LAVAudio/LAVAudio.cpp switch (d.codec_ctx->sample_fmt) { case AV_SAMPLE_FMT_U8: { uint8_t *data = (uint8_t*)*d.frame->data; for (int i = 0; i < samples_with_channels_half; i++) { decoded_data[i] = (data[i] - 0x7F) * kInt8_inv; decoded_data[samples_with_channels - i] = (data[samples_with_channels - i] - 0x7F) * kInt8_inv; } } break; case AV_SAMPLE_FMT_S16: { int16_t *data = (int16_t*)*d.frame->data; for (int i = 0; i < samples_with_channels_half; i++) { decoded_data[i] = data[i] * kInt16_inv; decoded_data[samples_with_channels - i] = data[samples_with_channels - i] * kInt16_inv; } } break; case AV_SAMPLE_FMT_S32: { int32_t *data = (int32_t*)*d.frame->data; for (int i = 0; i < samples_with_channels_half; i++) { decoded_data[i] = data[i] * kInt32_inv; decoded_data[samples_with_channels - i] = data[samples_with_channels - i] * kInt32_inv; } } break; case AV_SAMPLE_FMT_FLT: memcpy(decoded_data, *d.frame->data, d.decoded.size()); break; case AV_SAMPLE_FMT_DBL: { double *data = (double*)*d.frame->data; for (int i = 0; i < samples_with_channels_half; i++) { decoded_data[i] = data[i]; decoded_data[samples_with_channels - i] = data[samples_with_channels - i]; } } break; case AV_SAMPLE_FMT_U8P: { uint8_t **data = (uint8_t**)d.frame->extended_data; for (int i = 0; i < d.frame->nb_samples; ++i) { for (int ch = 0; ch < d.codec_ctx->channels; ++ch) { *decoded_data++ = (data[ch][i] - 0x7F) * kInt8_inv; } } } break; case AV_SAMPLE_FMT_S16P: { uint16_t **data = (uint16_t**)d.frame->extended_data; for (int i = 0; i < d.frame->nb_samples; ++i) { for (int ch = 0; ch < d.codec_ctx->channels; ++ch) { *decoded_data++ = data[ch][i] * kInt16_inv; } } } break; case AV_SAMPLE_FMT_S32P: { uint32_t **data = (uint32_t**)d.frame->extended_data; for (int i = 0; i < d.frame->nb_samples; ++i) { for (int ch = 0; ch < d.codec_ctx->channels; ++ch) { *decoded_data++ = data[ch][i] * kInt32_inv; } } } break; case AV_SAMPLE_FMT_FLTP: { float **data = (float**)d.frame->extended_data; for (int i = 0; i < d.frame->nb_samples; ++i) { for (int ch = 0; ch < d.codec_ctx->channels; ++ch) { *decoded_data++ = data[ch][i]; } } } break; case AV_SAMPLE_FMT_DBLP: { double **data = (double**)d.frame->extended_data; for (int i = 0; i < d.frame->nb_samples; ++i) { for (int ch = 0; ch < d.codec_ctx->channels; ++ch) { *decoded_data++ = data[ch][i]; } } } break; default: static bool sWarn_a_fmt = true; //FIXME: no warning when replay. warn only once if (sWarn_a_fmt) { qWarning("Unsupported audio format: %d", d.codec_ctx->sample_fmt); sWarn_a_fmt = false; } d.decoded.clear(); break; } #else d.resampler->setInSampesPerChannel(d.frame->nb_samples); if (!d.resampler->convert((const quint8**)d.frame->extended_data)) { return false; } d.decoded = d.resampler->outData(); return true; #endif //!(QTAV_HAVE(SWRESAMPLE) && !QTAV_HAVE(AVRESAMPLE)) return !d.decoded.isEmpty(); }
int cv_finance_encoder_video_input_begin( struct Encoderinfo* encoder_handle, const char* filename, //where you want to store enum AVPixelFormat pixel_format, //picture pix_format int image_width, int image_height // int image_stride ) { if (pixel_format != AV_PIX_FMT_YUV420P) { //convert to YUV420P } AVOutputFormat* fmt; AVCodec* pCodec; // set Params encoder_handle->in_w = image_width; encoder_handle->in_h = image_height; encoder_handle->framecnt = 0; encoder_handle->frameindex = 0; av_register_all(); encoder_handle->pFormatCtx = avformat_alloc_context(); //Guess Format fmt = av_guess_format(NULL, filename, NULL); encoder_handle->pFormatCtx->oformat = fmt; //Open output URL if (avio_open(&(encoder_handle->pFormatCtx->pb), filename, AVIO_FLAG_READ_WRITE) < 0) { printf("Failed to open output file! \n"); return -1; } encoder_handle->video_st = avformat_new_stream(encoder_handle->pFormatCtx, 0); encoder_handle->video_st->time_base.num = 1; encoder_handle->video_st->time_base.den = 10; if (encoder_handle->video_st == NULL) { return -1; } encoder_handle->pCodecCtx = encoder_handle->video_st->codec; //Must Param encoder_handle->pCodecCtx->codec_id = fmt->video_codec; encoder_handle->pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; encoder_handle->pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; encoder_handle->pCodecCtx->width = encoder_handle->in_w; encoder_handle->pCodecCtx->height = encoder_handle->in_h; encoder_handle->pCodecCtx->time_base.num = 1; encoder_handle->pCodecCtx->time_base.den = 10; encoder_handle->pCodecCtx->bit_rate = 1600; encoder_handle->pCodecCtx->gop_size = 250; encoder_handle->pCodecCtx->qmin = 10; encoder_handle->pCodecCtx->qmax = 51; //Optional Param encoder_handle->pCodecCtx->max_b_frames = 3; // printf("147%d\n", AV_PIX_FMT_YUV420P); //Set Option;local var AVDictionary *param = 0; //H264 if (encoder_handle->pCodecCtx->codec_id == AV_CODEC_ID_H264) { av_dict_set(¶m, "preset", "slow", 0); av_dict_set(¶m, "tune", "zerolatency", 0); // printf("153\n"); } //av_dump_format(encoder_handle->pFormatCtx, 0, filename, 1); //open codec pCodec = avcodec_find_encoder(encoder_handle->pCodecCtx->codec_id); if (!pCodec) { printf("Can not find encoder! \n"); return -1; } if (avcodec_open2(encoder_handle->pCodecCtx, pCodec, ¶m) < 0) { printf("Failed to open encoder! \n"); return -1; } int ret; //initial frame int picture_size; encoder_handle->pFrame = av_frame_alloc(); encoder_handle->pFrame->format = encoder_handle->pCodecCtx->pix_fmt; encoder_handle->pFrame->width = encoder_handle->pCodecCtx->width; encoder_handle->pFrame->height = encoder_handle->pCodecCtx->height; picture_size=av_image_get_buffer_size(encoder_handle->pCodecCtx->pix_fmt, encoder_handle->pCodecCtx->width, encoder_handle->pCodecCtx->height,1); printf("%d:%d\n", __LINE__,picture_size); ret = av_image_alloc(encoder_handle->pFrame->data, encoder_handle->pFrame->linesize, encoder_handle->pCodecCtx->width, encoder_handle->pCodecCtx->height, encoder_handle->pCodecCtx->pix_fmt,1); // picture_size = avpicture_get_size(encoder_handle->pCodecCtx->pix_fmt, encoder_handle->pCodecCtx->width, encoder_handle->pCodecCtx->height); // uint8_t* picture_buf = (uint8_t *)av_malloc(picture_size); // avpicture_fill((AVPicture *)encoder_handle->pFrame, picture_buf, encoder_handle->pCodecCtx->pix_fmt, encoder_handle->pCodecCtx->width, encoder_handle->pCodecCtx->height); //Write File Header ret= avformat_write_header(encoder_handle->pFormatCtx, NULL); if (ret < 0) { printf("Error occurred when opening output file!\n"); return 1; } av_new_packet(&encoder_handle->pkt, picture_size); encoder_handle->y_size = encoder_handle->pCodecCtx->width * encoder_handle->pCodecCtx->height; return 0; }
// Convert an MPEG I-frame into a bitmap. This is used as the way of // sending still pictures. We convert the image to a QImage even // though that actually means converting it from YUV and eventually // converting it back again but we do this very infrequently so the // cost is outweighed by the simplification. void MHIBitmap::CreateFromMPEG(const unsigned char *data, int length) { AVCodecContext *c = NULL; AVFrame *picture = NULL; AVPacket pkt; uint8_t *buff = NULL; int gotPicture = 0, len; m_image = QImage(); // Find the mpeg2 video decoder. AVCodec *codec = avcodec_find_decoder(CODEC_ID_MPEG2VIDEO); if (!codec) return; c = avcodec_alloc_context3(NULL); picture = avcodec_alloc_frame(); if (avcodec_open2(c, codec, NULL) < 0) goto Close; // Copy the data into AVPacket if (av_new_packet(&pkt, length) < 0) goto Close; memcpy(pkt.data, data, length); buff = pkt.data; while (pkt.size > 0 && ! gotPicture) { len = avcodec_decode_video2(c, picture, &gotPicture, &pkt); if (len < 0) // Error goto Close; pkt.data += len; pkt.size -= len; } if (!gotPicture) { pkt.data = NULL; pkt.size = 0; // Process any buffered data if (avcodec_decode_video2(c, picture, &gotPicture, &pkt) < 0) goto Close; } if (gotPicture) { int nContentWidth = c->width; int nContentHeight = c->height; m_image = QImage(nContentWidth, nContentHeight, QImage::Format_ARGB32); m_opaque = true; // MPEG images are always opaque. AVPicture retbuf; memset(&retbuf, 0, sizeof(AVPicture)); int bufflen = nContentWidth * nContentHeight * 3; unsigned char *outputbuf = new unsigned char[bufflen]; avpicture_fill(&retbuf, outputbuf, PIX_FMT_RGB24, nContentWidth, nContentHeight); myth_sws_img_convert( &retbuf, PIX_FMT_RGB24, (AVPicture*)picture, c->pix_fmt, nContentWidth, nContentHeight); uint8_t * buf = outputbuf; // Copy the data a pixel at a time. // This should handle endianness correctly. for (int i = 0; i < nContentHeight; i++) { for (int j = 0; j < nContentWidth; j++) { int red = *buf++; int green = *buf++; int blue = *buf++; m_image.setPixel(j, i, qRgb(red, green, blue)); } } delete [] outputbuf; } Close: pkt.data = buff; av_free_packet(&pkt); avcodec_close(c); av_free(c); av_free(picture); }
static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt, uint8_t block_type, AVFormatContext *s) { uint8_t * vidbuf_start = NULL; int vidbuf_nbytes = 0; int code; int bytes_copied = 0; int position, duration, npixels; unsigned int vidbuf_capacity; int ret = 0; AVStream *st; if (vid->video_index < 0) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vid->video_index = st->index; if (vid->audio_index < 0) { av_log_ask_for_sample(s, "No audio packet before first video " "packet. Using default video time base.\n"); } avpriv_set_pts_info(st, 64, 185, vid->sample_rate); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_BETHSOFTVID; st->codec->width = vid->width; st->codec->height = vid->height; } st = s->streams[vid->video_index]; npixels = st->codec->width * st->codec->height; vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); // save the file position for the packet, include block type position = avio_tell(pb) - 1; vidbuf_start[vidbuf_nbytes++] = block_type; // get the current packet duration duration = vid->bethsoft_global_delay + avio_rl16(pb); // set the y offset if it exists (decoder header data should be in data section) if(block_type == VIDEO_YOFF_P_FRAME){ if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += 2; } do{ vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); code = avio_r8(pb); vidbuf_start[vidbuf_nbytes++] = code; if(code >= 0x80){ // rle sequence if(block_type == VIDEO_I_FRAME) vidbuf_start[vidbuf_nbytes++] = avio_r8(pb); } else if(code){ // plain sequence if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += code; } bytes_copied += code & 0x7F; if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied // may contain a 0 byte even if read all pixels if(avio_r8(pb)) avio_seek(pb, -1, SEEK_CUR); break; } if (bytes_copied > npixels) { ret = AVERROR_INVALIDDATA; goto fail; } } while(code); // copy data into packet if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0) goto fail; memcpy(pkt->data, vidbuf_start, vidbuf_nbytes); av_free(vidbuf_start); pkt->pos = position; pkt->stream_index = vid->video_index; pkt->duration = duration; if (block_type == VIDEO_I_FRAME) pkt->flags |= AV_PKT_FLAG_KEY; /* if there is a new palette available, add it to packet side data */ if (vid->palette) { uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, BVID_PALETTE_SIZE); memcpy(pdata, vid->palette, BVID_PALETTE_SIZE); av_freep(&vid->palette); } vid->nframes--; // used to check if all the frames were read return 0; fail: av_free(vidbuf_start); return ret; }
static int film_read_packet(AVFormatContext *s, AVPacket *pkt) { FilmDemuxContext *film = s->priv_data; ByteIOContext *pb = s->pb; film_sample_t *sample; int ret = 0; int i; int left, right; if (film->current_sample >= film->sample_count) return AVERROR(EIO); sample = &film->sample_table[film->current_sample]; /* position the stream (will probably be there anyway) */ url_fseek(pb, sample->sample_offset, SEEK_SET); /* do a special song and dance when loading FILM Cinepak chunks */ if ((sample->stream == film->video_stream_index) && (film->video_type == CODEC_ID_CINEPAK)) { pkt->pos= url_ftell(pb); if (av_new_packet(pkt, sample->sample_size)) return AVERROR(ENOMEM); get_buffer(pb, pkt->data, sample->sample_size); } else if ((sample->stream == film->audio_stream_index) && (film->audio_channels == 2)) { /* stereo PCM needs to be interleaved */ if (av_new_packet(pkt, sample->sample_size)) return AVERROR(ENOMEM); /* make sure the interleave buffer is large enough */ if (sample->sample_size > film->stereo_buffer_size) { av_free(film->stereo_buffer); film->stereo_buffer_size = sample->sample_size; film->stereo_buffer = av_malloc(film->stereo_buffer_size); } pkt->pos= url_ftell(pb); ret = get_buffer(pb, film->stereo_buffer, sample->sample_size); if (ret != sample->sample_size) ret = AVERROR(EIO); left = 0; right = sample->sample_size / 2; for (i = 0; i < sample->sample_size; ) { if (film->audio_bits == 8) { pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[right++]; } else { pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[right++]; pkt->data[i++] = film->stereo_buffer[right++]; } } } else { ret= av_get_packet(pb, pkt, sample->sample_size); if (ret != sample->sample_size) ret = AVERROR(EIO); } pkt->stream_index = sample->stream; pkt->pts = sample->pts; film->current_sample++; return ret; }
bool AVFormatWriter::Init(void) { AVOutputFormat *fmt = av_guess_format(m_container.toAscii().constData(), NULL, NULL); if (!fmt) { LOG(VB_RECORD, LOG_ERR, LOC + QString("Init(): Unable to guess AVOutputFormat from container %1") .arg(m_container)); return false; } m_fmt = *fmt; if (m_width && m_height) { m_avVideoCodec = avcodec_find_encoder_by_name( m_videoCodec.toAscii().constData()); if (!m_avVideoCodec) { LOG(VB_RECORD, LOG_ERR, LOC + QString("Init(): Unable to find video codec %1").arg(m_videoCodec)); return false; } m_fmt.video_codec = m_avVideoCodec->id; } else m_fmt.video_codec = CODEC_ID_NONE; m_avAudioCodec = avcodec_find_encoder_by_name( m_audioCodec.toAscii().constData()); if (!m_avAudioCodec) { LOG(VB_RECORD, LOG_ERR, LOC + QString("Init(): Unable to find audio codec %1").arg(m_audioCodec)); return false; } m_fmt.audio_codec = m_avAudioCodec->id; m_ctx = avformat_alloc_context(); if (!m_ctx) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): Unable to allocate AVFormatContext"); return false; } m_ctx->oformat = &m_fmt; if (m_container == "mpegts") m_ctx->packet_size = 2324; snprintf(m_ctx->filename, sizeof(m_ctx->filename), "%s", m_filename.toAscii().constData()); if (m_fmt.video_codec != CODEC_ID_NONE) m_videoStream = AddVideoStream(); if (m_fmt.audio_codec != CODEC_ID_NONE) m_audioStream = AddAudioStream(); m_pkt = new AVPacket; if (!m_pkt) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): error allocating AVPacket"); return false; } av_new_packet(m_pkt, m_ctx->packet_size); m_audPkt = new AVPacket; if (!m_audPkt) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): error allocating AVPacket"); return false; } av_new_packet(m_audPkt, m_ctx->packet_size); if ((m_videoStream) && (!OpenVideo())) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenVideo() failed"); return false; } if ((m_audioStream) && (!OpenAudio())) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenAudio() failed"); return false; } return true; }
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet) { AVPacket new_pkt = { 0 }; mfxBitstream *bs; mfxFrameSurface1 *surf = NULL; mfxSyncPoint sync = NULL; int ret; if (frame) { ret = submit_frame(q, frame, &surf); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error submitting the frame for encoding.\n"); return ret; } } ret = av_new_packet(&new_pkt, q->packet_size); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error allocating the output packet\n"); return ret; } bs = av_mallocz(sizeof(*bs)); if (!bs) { av_packet_unref(&new_pkt); return AVERROR(ENOMEM); } bs->Data = new_pkt.data; bs->MaxLength = new_pkt.size; do { ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, bs, &sync); if (ret == MFX_WRN_DEVICE_BUSY) av_usleep(1); } while (ret > 0); if (ret < 0) { av_packet_unref(&new_pkt); av_freep(&bs); return (ret == MFX_ERR_MORE_DATA) ? 0 : ff_qsv_error(ret); } if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM && frame->interlaced_frame) print_interlace_msg(avctx, q); if (sync) { av_fifo_generic_write(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL); av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL); av_fifo_generic_write(q->async_fifo, &bs, sizeof(bs), NULL); } else { av_packet_unref(&new_pkt); av_freep(&bs); } if (!av_fifo_space(q->async_fifo) || (!frame && av_fifo_size(q->async_fifo))) { av_fifo_generic_read(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL); av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL); av_fifo_generic_read(q->async_fifo, &bs, sizeof(bs), NULL); MFXVideoCORE_SyncOperation(q->session, sync, 60000); new_pkt.dts = av_rescale_q(bs->DecodeTimeStamp, (AVRational){1, 90000}, avctx->time_base); new_pkt.pts = av_rescale_q(bs->TimeStamp, (AVRational){1, 90000}, avctx->time_base); new_pkt.size = bs->DataLength; if (bs->FrameType & MFX_FRAMETYPE_IDR || bs->FrameType & MFX_FRAMETYPE_xIDR) new_pkt.flags |= AV_PKT_FLAG_KEY; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS if (bs->FrameType & MFX_FRAMETYPE_I || bs->FrameType & MFX_FRAMETYPE_xI) avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; else if (bs->FrameType & MFX_FRAMETYPE_P || bs->FrameType & MFX_FRAMETYPE_xP) avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P; else if (bs->FrameType & MFX_FRAMETYPE_B || bs->FrameType & MFX_FRAMETYPE_xB) avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B; FF_ENABLE_DEPRECATION_WARNINGS #endif av_freep(&bs); if (pkt->data) { if (pkt->size < new_pkt.size) { av_log(avctx, AV_LOG_ERROR, "Submitted buffer not large enough: %d < %d\n", pkt->size, new_pkt.size); av_packet_unref(&new_pkt); return AVERROR(EINVAL); } memcpy(pkt->data, new_pkt.data, new_pkt.size); pkt->size = new_pkt.size; ret = av_packet_copy_props(pkt, &new_pkt); av_packet_unref(&new_pkt); if (ret < 0) return ret; } else *pkt = new_pkt; *got_packet = 1; }
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt) { AudioData *s = s1->priv_data; int ret, bdelay; int64_t cur_time; struct audio_buf_info abufi; if (av_new_packet(pkt, s->frame_size) < 0) return AVERROR(EIO); for(;;) { struct timeval tv; fd_set fds; tv.tv_sec = 0; tv.tv_usec = 30 * 1000; /* 30 msecs -- a bit shorter than 1 frame at 30fps */ FD_ZERO(&fds); FD_SET(s->fd, &fds); /* This will block until data is available or we get a timeout */ (void) select(s->fd + 1, &fds, 0, 0, &tv); ret = read(s->fd, pkt->data, pkt->size); if (ret > 0) break; if (ret == -1 && (errno == EAGAIN || errno == EINTR)) { av_free_packet(pkt); pkt->size = 0; pkt->pts = av_gettime(); return 0; } if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) { av_free_packet(pkt); return AVERROR(EIO); } } pkt->size = ret; /* compute pts of the start of the packet */ cur_time = av_gettime(); bdelay = ret; if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) { bdelay += abufi.bytes; } /* subtract time represented by the number of bytes in the audio fifo */ cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels); /* convert to wanted units */ pkt->pts = cur_time; if (s->flip_left && s->channels == 2) { int i; short *p = (short *) pkt->data; for (i = 0; i < ret; i += 4) { *p = ~*p; p += 2; } } return 0; }
static int microdvd_read_packet(AVFormatContext *s, AVPacket *pkt) { MicroDVDContext *microdvd = s->priv_data; char buffer[MAX_LINESIZE]; int64_t pos = avio_tell(s->pb); int i, len = 0, res = AVERROR_EOF; // last packet has its duration set but couldn't be raised earlier if (microdvd->last_pkt_ready) { *pkt = microdvd->last_pkt; microdvd->last_pkt_ready = 0; return 0; } for (i=0; i<FF_ARRAY_ELEMS(microdvd->lines); i++) { if (microdvd->lines[i][0]) { strcpy(buffer, microdvd->lines[i]); pos = microdvd->pos[i]; len = strlen(buffer); microdvd->lines[i][0] = 0; break; } } if (!len) len = ff_get_line(s->pb, buffer, sizeof(buffer)); if (microdvd->last_pkt.duration == -1 && !buffer[0]) { // if the previous subtitle line had no duration, last until the end of // the presentation microdvd->last_pkt.duration = 0; *pkt = microdvd->last_pkt; pkt->duration = -1; res = 0; } else if (buffer[0] && !(res = av_new_packet(pkt, len))) { memcpy(pkt->data, buffer, len); pkt->flags |= AV_PKT_FLAG_KEY; pkt->pos = pos; pkt->pts = pkt->dts = get_pts(buffer); if (pkt->pts != AV_NOPTS_VALUE) { pkt->duration = get_duration(buffer); if (microdvd->last_pkt.duration == -1) { // previous packet wasn't raised because it was lacking the // duration info, so set its duration with the new packet pts // and raise it AVPacket tmp_pkt; tmp_pkt = microdvd->last_pkt; tmp_pkt.duration = pkt->pts - tmp_pkt.pts; microdvd->last_pkt = *pkt; microdvd->last_pkt_ready = pkt->duration != -1; *pkt = tmp_pkt; } else if (pkt->duration == -1) { // no packet without duration queued, and current one is // lacking the duration info, we need to parse another subtitle // event. microdvd->last_pkt = *pkt; res = AVERROR(EAGAIN); } } } return res; }
// return 0 on packet, no more left, 1 on packet, 1 on partial packet... static int h264_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket * pkt, uint32_t * timestamp, const uint8_t * buf, int len, int flags) { uint8_t nal = buf[0]; uint8_t type = (nal & 0x1f); int result= 0; uint8_t start_sequence[]= {0, 0, 1}; #ifdef DEBUG assert(data); assert(data->cookie == MAGIC_COOKIE); #endif assert(buf); if (type >= 1 && type <= 23) type = 1; // simplify the case. (these are all the nal types used internally by the h264 codec) switch (type) { case 0: // undefined; result= -1; break; case 1: av_new_packet(pkt, len+sizeof(start_sequence)); memcpy(pkt->data, start_sequence, sizeof(start_sequence)); memcpy(pkt->data+sizeof(start_sequence), buf, len); #ifdef DEBUG data->packet_types_received[nal & 0x1f]++; #endif break; case 24: // STAP-A (one packet, multiple nals) // consume the STAP-A NAL buf++; len--; // first we are going to figure out the total size.... { int pass= 0; int total_length= 0; uint8_t *dst= NULL; for(pass= 0; pass<2; pass++) { const uint8_t *src= buf; int src_len= len; do { uint16_t nal_size = AV_RB16(src); // this going to be a problem if unaligned (can it be?) // consume the length of the aggregate... src += 2; src_len -= 2; if (nal_size <= src_len) { if(pass==0) { // counting... total_length+= sizeof(start_sequence)+nal_size; } else { // copying assert(dst); memcpy(dst, start_sequence, sizeof(start_sequence)); dst+= sizeof(start_sequence); memcpy(dst, src, nal_size); #ifdef DEBUG data->packet_types_received[*src & 0x1f]++; #endif dst+= nal_size; } } else { av_log(ctx, AV_LOG_ERROR, "nal size exceeds length: %d %d\n", nal_size, src_len); } // eat what we handled... src += nal_size; src_len -= nal_size; if (src_len < 0) av_log(ctx, AV_LOG_ERROR, "Consumed more bytes than we got! (%d)\n", src_len); } while (src_len > 2); // because there could be rtp padding.. if(pass==0) { // now we know the total size of the packet (with the start sequences added) av_new_packet(pkt, total_length); dst= pkt->data; } else { assert(dst-pkt->data==total_length); } } } break; case 25: // STAP-B case 26: // MTAP-16 case 27: // MTAP-24 case 29: // FU-B av_log(ctx, AV_LOG_ERROR, "Unhandled type (%d) (See RFC for implementation details\n", type); result= -1; break; case 28: // FU-A (fragmented nal) buf++; len--; // skip the fu_indicator { // these are the same as above, we just redo them here for clarity... uint8_t fu_indicator = nal; uint8_t fu_header = *buf; // read the fu_header. uint8_t start_bit = fu_header >> 7; // uint8_t end_bit = (fu_header & 0x40) >> 6; uint8_t nal_type = (fu_header & 0x1f); uint8_t reconstructed_nal; // reconstruct this packet's true nal; only the data follows.. reconstructed_nal = fu_indicator & (0xe0); // the original nal forbidden bit and NRI are stored in this packet's nal; reconstructed_nal |= nal_type; // skip the fu_header... buf++; len--; #ifdef DEBUG if (start_bit) data->packet_types_received[nal_type]++; #endif if(start_bit) { // copy in the start sequence, and the reconstructed nal.... av_new_packet(pkt, sizeof(start_sequence)+sizeof(nal)+len); memcpy(pkt->data, start_sequence, sizeof(start_sequence)); pkt->data[sizeof(start_sequence)]= reconstructed_nal; memcpy(pkt->data+sizeof(start_sequence)+sizeof(nal), buf, len); } else { av_new_packet(pkt, len); memcpy(pkt->data, buf, len); } } break; case 30: // undefined case 31: // undefined default: av_log(ctx, AV_LOG_ERROR, "Undefined type (%d)", type); result= -1; break; } pkt->stream_index = st->index; return result; }
//保存帧为图片 int save_frame(AVPicture pict,int width, int height) { AVFormatContext* pFormatCtx; AVOutputFormat* fmt; AVStream* video_st; AVCodecContext* pCodecCtx; AVCodec* pCodec; uint8_t* picture_buf; AVFrame* picture; int size; int in_w=width,in_h=height; char out_file[32]; sprintf(out_file, "./JPEG/cuc_view_encode%d.jpg", ++iFrame); //const char* out_file = "cuc_view_encode.jpg"; //方法1.组合使用几个函数 pFormatCtx = avformat_alloc_context(); //猜格式。用MJPEG编码 fmt = av_guess_format("mjpeg", NULL, NULL); pFormatCtx->oformat = fmt; //注意:输出路径 if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0) { printf("输出文件打开失败"); return -1; } //video_st = av_new_stream(pFormatCtx, 0); video_st=avformat_new_stream(pFormatCtx,NULL); if (video_st==NULL) { return -1; } pCodecCtx = video_st->codec; pCodecCtx->codec_id = fmt->video_codec; pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pCodecCtx->pix_fmt = PIX_FMT_YUVJ420P; pCodecCtx->width = in_w; pCodecCtx->height = in_h; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; // //输出格式信息 // av_dump_format(pFormatCtx, 0, out_file, 1); pCodec = avcodec_find_encoder(pCodecCtx->codec_id); if (!pCodec) { printf("没有找到合适的编码器!"); return -1; } if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0) { printf("编码器打开失败!"); return -1; } //写文件头 avformat_write_header(pFormatCtx,NULL); AVPacket pkt; int y_size = pCodecCtx->width * pCodecCtx->height; av_new_packet(&pkt,y_size*3); int got_picture=0; //编码 int ret = avcodec_encode_video2(pCodecCtx, &pkt,(AVFrame *)&pict, &got_picture); if(ret < 0) { printf("编码错误!\n"); return -1; } if (got_picture==1) { pkt.stream_index = video_st->index; ret = av_write_frame(pFormatCtx, &pkt); } av_free_packet(&pkt); //写文件尾 av_write_trailer(pFormatCtx); // printf("编码成功!\n"); if (video_st) { avcodec_close(video_st->codec); } avio_close(pFormatCtx->pb); avformat_free_context(pFormatCtx); }
static int fourxm_read_packet(AVFormatContext *s, AVPacket *pkt) { FourxmDemuxContext *fourxm = s->priv_data; AVIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int ret = 0; unsigned int track_number; int packet_read = 0; unsigned char header[8]; int audio_frame_count; while (!packet_read) { if ((ret = avio_read(s->pb, header, 8)) < 0) return ret; fourcc_tag = AV_RL32(&header[0]); size = AV_RL32(&header[4]); if (pb->eof_reached) return AVERROR(EIO); switch (fourcc_tag) { case LIST_TAG: /* this is a good time to bump the video pts */ fourxm->video_pts ++; /* skip the LIST-* tag and move on to the next fourcc */ avio_rl32(pb); break; case ifrm_TAG: case pfrm_TAG: case cfrm_TAG: case ifr2_TAG: case pfr2_TAG: case cfr2_TAG: /* allocate 8 more bytes than 'size' to account for fourcc * and size */ if (size + 8 < size || av_new_packet(pkt, size + 8)) return AVERROR(EIO); pkt->stream_index = fourxm->video_stream_index; pkt->pts = fourxm->video_pts; pkt->pos = avio_tell(s->pb); memcpy(pkt->data, header, 8); ret = avio_read(s->pb, &pkt->data[8], size); if (ret < 0){ av_free_packet(pkt); }else packet_read = 1; break; case snd__TAG: track_number = avio_rl32(pb); avio_skip(pb, 4); size-=8; if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) { ret= av_get_packet(s->pb, pkt, size); if(ret<0) return AVERROR(EIO); pkt->stream_index = fourxm->tracks[track_number].stream_index; pkt->pts = fourxm->tracks[track_number].audio_pts; packet_read = 1; /* pts accounting */ audio_frame_count = size; if (fourxm->tracks[track_number].adpcm) audio_frame_count -= 2 * (fourxm->tracks[track_number].channels); audio_frame_count /= fourxm->tracks[track_number].channels; if (fourxm->tracks[track_number].adpcm){ audio_frame_count *= 2; }else audio_frame_count /= (fourxm->tracks[track_number].bits / 8); fourxm->tracks[track_number].audio_pts += audio_frame_count; } else { avio_skip(pb, size); } break; default: avio_skip(pb, size); break; } } return ret; }
static int str_read_packet(AVFormatContext *s, AVPacket *ret_pkt) { AVIOContext *pb = s->pb; StrDemuxContext *str = s->priv_data; unsigned char sector[RAW_CD_SECTOR_SIZE]; int channel; AVPacket *pkt; AVStream *st; while (1) { if (avio_read(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE) return AVERROR(EIO); channel = sector[0x11]; if (channel >= 32) return AVERROR_INVALIDDATA; switch (sector[0x12] & CDXA_TYPE_MASK) { case CDXA_TYPE_DATA: case CDXA_TYPE_VIDEO: { int current_sector = AV_RL16(§or[0x1C]); int sector_count = AV_RL16(§or[0x1E]); int frame_size = AV_RL32(§or[0x24]); if(!( frame_size>=0 && current_sector < sector_count && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){ av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size); break; } if(str->channels[channel].video_stream_index < 0){ /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 64, 1, 15); str->channels[channel].video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_MDEC; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = AV_RL16(§or[0x28]); st->codec->height = AV_RL16(§or[0x2A]); } /* if this is the first sector of the frame, allocate a pkt */ pkt = &str->channels[channel].tmp_pkt; if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){ if(pkt->data) av_log(s, AV_LOG_ERROR, "missmatching sector_count\n"); av_free_packet(pkt); if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE)) return AVERROR(EIO); pkt->pos= avio_tell(pb) - RAW_CD_SECTOR_SIZE; pkt->stream_index = str->channels[channel].video_stream_index; } memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE, sector + VIDEO_DATA_HEADER_SIZE, VIDEO_DATA_CHUNK_SIZE); if (current_sector == sector_count-1) { pkt->size= frame_size; *ret_pkt = *pkt; pkt->data= NULL; pkt->size= -1; return 0; } } break; case CDXA_TYPE_AUDIO: if(str->channels[channel].audio_stream_index < 0){ int fmt = sector[0x13]; /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); str->channels[channel].audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ADPCM_XA; st->codec->codec_tag = 0; /* no fourcc */ st->codec->channels = (fmt&1)?2:1; st->codec->sample_rate = (fmt&4)?18900:37800; // st->codec->bit_rate = 0; //FIXME; st->codec->block_align = 128; av_set_pts_info(st, 64, 128, st->codec->sample_rate); } pkt = ret_pkt; if (av_new_packet(pkt, 2304)) return AVERROR(EIO); memcpy(pkt->data,sector+24,2304); pkt->stream_index = str->channels[channel].audio_stream_index; return 0; default: av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]); /* drop the sector and move on */ break; } if (pb->eof_reached) return AVERROR(EIO); } }
int main(int argc, char* argv[]) { AVFormatContext* pFormatCtx; AVOutputFormat* fmt; AVStream* audio_st; AVCodecContext* pCodecCtx; AVCodec* pCodec; uint8_t* frame_buf; AVFrame* pFrame; AVPacket pkt; int got_frame=0; int ret=0; int size=0; FILE *in_file=NULL; //Raw PCM data int framenum=1000; //Audio frame number const char* out_file = "tdjm.aac"; //Output URL int i; in_file= fopen("tdjm.pcm", "rb"); av_register_all(); //Method 1. pFormatCtx = avformat_alloc_context(); fmt = av_guess_format(NULL, out_file, NULL); pFormatCtx->oformat = fmt; //Method 2. //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file); //fmt = pFormatCtx->oformat; //Open output URL if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){ printf("Failed to open output file!\n"); return -1; } audio_st = avformat_new_stream(pFormatCtx, 0); if (audio_st==NULL){ return -1; } pCodecCtx = audio_st->codec; pCodecCtx->codec_id = fmt->audio_codec; pCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO; pCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16; pCodecCtx->sample_rate= 44100; pCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO; pCodecCtx->channels = av_get_channel_layout_nb_channels(pCodecCtx->channel_layout); pCodecCtx->bit_rate = 64000; //Show some information av_dump_format(pFormatCtx, 0, out_file, 1); pCodec = avcodec_find_encoder(pCodecCtx->codec_id); if (!pCodec){ printf("Can not find encoder!\n"); return -1; } if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){ printf("Failed to open encoder!\n"); return -1; } pFrame = av_frame_alloc(); pFrame->nb_samples= pCodecCtx->frame_size; pFrame->format= pCodecCtx->sample_fmt; size = av_samples_get_buffer_size(NULL, pCodecCtx->channels,pCodecCtx->frame_size,pCodecCtx->sample_fmt, 1); frame_buf = (uint8_t *)av_malloc(size); avcodec_fill_audio_frame(pFrame, pCodecCtx->channels, pCodecCtx->sample_fmt,(const uint8_t*)frame_buf, size, 1); //Write Header avformat_write_header(pFormatCtx,NULL); av_new_packet(&pkt,size); for (i=0; i<framenum; i++){ //Read PCM if (fread(frame_buf, 1, size, in_file) <= 0){ printf("Failed to read raw data! \n"); return -1; }else if(feof(in_file)){ break; } pFrame->data[0] = frame_buf; //PCM Data pFrame->pts=i*100; got_frame=0; //Encode ret = avcodec_encode_audio2(pCodecCtx, &pkt,pFrame, &got_frame); if(ret < 0){ printf("Failed to encode!\n"); return -1; } if (got_frame==1){ printf("Succeed to encode 1 frame! \tsize:%5d\n",pkt.size); pkt.stream_index = audio_st->index; ret = av_write_frame(pFormatCtx, &pkt); av_free_packet(&pkt); } } //Flush Encoder ret = flush_encoder(pFormatCtx,0); if (ret < 0) { printf("Flushing encoder failed\n"); return -1; } //Write Trailer av_write_trailer(pFormatCtx); //Clean if (audio_st){ avcodec_close(audio_st->codec); av_free(pFrame); av_free(frame_buf); } avio_close(pFormatCtx->pb); avformat_free_context(pFormatCtx); fclose(in_file); return 0; }
static int a64_write_packet(struct AVFormatContext *s, AVPacket *pkt) { AVCodecContext *avctx = s->streams[0]->codec; A64MuxerContext *c = s->priv_data; int i, j; int ch_chunksize; int lifetime; int frame_count; int charset_size; int frame_size; int num_frames; /* fetch values from extradata */ switch (avctx->codec->id) { case CODEC_ID_A64_MULTI: case CODEC_ID_A64_MULTI5: if(c->interleaved) { /* Write interleaved, means we insert chunks of the future charset before each current frame. * Reason: if we load 1 charset + corresponding frames in one block on c64, we need to store * them first and then display frame by frame to keep in sync. Thus we would read and write * the data for colram from/to ram first and waste too much time. If we interleave and send the * charset beforehand, we assemble a new charset chunk by chunk, write current screen data to * screen-ram to be displayed and decode the colram directly to colram-location $d800 during * the overscan, while reading directly from source. * This is the only way so far, to achieve 25fps on c64 */ if(avctx->extradata) { /* fetch values from extradata */ lifetime = AV_RB32(avctx->extradata + 0); frame_count = AV_RB32(avctx->extradata + 4); charset_size = AV_RB32(avctx->extradata + 8); frame_size = AV_RB32(avctx->extradata + 12); /* TODO: sanity checks? */ } else { av_log(avctx, AV_LOG_ERROR, "extradata not set\n"); return AVERROR(EINVAL); } ch_chunksize=charset_size/lifetime; /* TODO: check if charset/size is % lifetime, but maybe check in codec */ if(pkt->data) num_frames = lifetime; else num_frames = c->prev_frame_count; for(i = 0; i < num_frames; i++) { if(pkt->data) { /* if available, put newest charset chunk into buffer */ put_buffer(s->pb, pkt->data + ch_chunksize * i, ch_chunksize); } else { /* a bit ugly, but is there an alternative to put many zeros? */ for(j = 0; j < ch_chunksize; j++) put_byte(s->pb, 0); } if(c->prev_pkt.data) { /* put frame (screen + colram) from last packet into buffer */ put_buffer(s->pb, c->prev_pkt.data + charset_size + frame_size * i, frame_size); } else { /* a bit ugly, but is there an alternative to put many zeros? */ for(j = 0; j < frame_size; j++) put_byte(s->pb, 0); } } /* backup current packet for next turn */ if(pkt->data) { /* no backup packet yet? create one! */ if(!c->prev_pkt.data) av_new_packet(&c->prev_pkt, pkt->size); /* we have a packet and data is big enough, reuse it */ if(c->prev_pkt.data && c->prev_pkt.size >= pkt->size) { memcpy(c->prev_pkt.data, pkt->data, pkt->size); c->prev_pkt.size = pkt->size; } else { av_log(avctx, AV_LOG_ERROR, "Too less memory for prev_pkt.\n"); return AVERROR(ENOMEM); } } c->prev_frame_count = frame_count; break; } default: /* Write things as is. Nice for self-contained frames from non-multicolor modes or if played * directly from ram and not from a streaming device (rrnet/mmc) */ if(pkt) put_buffer(s->pb, pkt->data, pkt->size); break; } put_flush_packet(s->pb); return 0; }
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int i; int size[3]={0}, ret[3]={0}; ByteIOContext *f[3]; AVCodecContext *codec= s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s1->loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (av_get_frame_filename(filename, sizeof(filename), s->path, s->img_number)<0 && s->img_number > 1) return AVERROR(EIO); for(i=0; i<3; i++){ if (url_fopen(&f[i], filename, URL_RDONLY) < 0) { if(i==1) break; av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n",filename); return AVERROR(EIO); } size[i]= url_fsize(f[i]); if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (url_feof(f[0])) return AVERROR(EIO); size[0]= 4096; } av_new_packet(pkt, size[0] + size[1] + size[2]); pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; pkt->size= 0; for(i=0; i<3; i++){ if(size[i]){ ret[i]= get_buffer(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) url_fclose(f[i]); if(ret[i]>0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) { av_free_packet(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }
static int wv_read_packet(AVFormatContext *s, AVPacket *pkt) { WVContext *wc = s->priv_data; int ret; int size, ver, off; if (url_feof(s->pb)) return AVERROR(EIO); if(wc->block_parsed){ if(wv_read_block_header(s, s->pb, 0) < 0) return -1; } off = wc->multichannel ? 4 : 0; if(av_new_packet(pkt, wc->blksize + WV_EXTRA_SIZE + off) < 0) return AVERROR(ENOMEM); if(wc->multichannel) AV_WL32(pkt->data, wc->blksize + WV_EXTRA_SIZE + 12); memcpy(pkt->data + off, wc->extra, WV_EXTRA_SIZE); ret = get_buffer(s->pb, pkt->data + WV_EXTRA_SIZE + off, wc->blksize); if(ret != wc->blksize){ av_free_packet(pkt); return AVERROR(EIO); } while(!(wc->flags & WV_END_BLOCK)){ if(get_le32(s->pb) != MKTAG('w', 'v', 'p', 'k')){ av_free_packet(pkt); return -1; } if((ret = av_append_packet(s->pb, pkt, 4)) < 0){ av_free_packet(pkt); return ret; } size = AV_RL32(pkt->data + pkt->size - 4); if(size < 24 || size > WV_BLOCK_LIMIT){ av_free_packet(pkt); av_log(s, AV_LOG_ERROR, "Incorrect block size %d\n", size); return -1; } wc->blksize = size; ver = get_le16(s->pb); if(ver < 0x402 || ver > 0x410){ av_free_packet(pkt); av_log(s, AV_LOG_ERROR, "Unsupported version %03X\n", ver); return -1; } get_byte(s->pb); // track no get_byte(s->pb); // track sub index wc->samples = get_le32(s->pb); // total samples in file wc->soff = get_le32(s->pb); // offset in samples of current block if((ret = av_append_packet(s->pb, pkt, WV_EXTRA_SIZE)) < 0){ av_free_packet(pkt); return ret; } memcpy(wc->extra, pkt->data + pkt->size - WV_EXTRA_SIZE, WV_EXTRA_SIZE); if(wv_read_block_header(s, s->pb, 1) < 0){ av_free_packet(pkt); return -1; } ret = av_append_packet(s->pb, pkt, wc->blksize); if(ret < 0){ av_free_packet(pkt); return ret; } } pkt->stream_index = 0; wc->block_parsed = 1; pkt->pts = wc->soff; av_add_index_entry(s->streams[0], wc->pos, pkt->pts, 0, 0, AVINDEX_KEYFRAME); return 0; }
static int roq_read_packet(AVFormatContext *s, AVPacket *pkt) { RoqDemuxContext *roq = s->priv_data; AVIOContext *pb = s->pb; int ret = 0; unsigned int chunk_size; unsigned int chunk_type; unsigned int codebook_size; unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE]; int packet_read = 0; int64_t codebook_offset; while (!packet_read) { if (url_feof(s->pb)) return AVERROR(EIO); /* get the next chunk preamble */ if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) != RoQ_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); chunk_type = AV_RL16(&preamble[0]); chunk_size = AV_RL32(&preamble[2]); if(chunk_size > INT_MAX) return AVERROR_INVALIDDATA; switch (chunk_type) { case RoQ_INFO: if (!roq->width || !roq->height) { AVStream *st = s->streams[roq->video_stream_index]; if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); st->codec->width = roq->width = AV_RL16(preamble); st->codec->height = roq->height = AV_RL16(preamble + 2); break; } /* don't care about this chunk anymore */ avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE); break; case RoQ_QUAD_CODEBOOK: /* packet needs to contain both this codebook and next VQ chunk */ codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE; codebook_size = chunk_size; avio_skip(pb, codebook_size); if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 + codebook_size; /* rewind */ avio_seek(pb, codebook_offset, SEEK_SET); /* load up the packet */ ret= av_get_packet(pb, pkt, chunk_size); if (ret != chunk_size) return AVERROR(EIO); pkt->stream_index = roq->video_stream_index; pkt->pts = roq->video_pts++; packet_read = 1; break; case RoQ_SOUND_MONO: case RoQ_SOUND_STEREO: if (roq->audio_stream_index == -1) { AVStream *st = av_new_stream(s, 1); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE); roq->audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ROQ_DPCM; st->codec->codec_tag = 0; /* no tag */ st->codec->channels = roq->audio_channels = chunk_type == RoQ_SOUND_STEREO ? 2 : 1; st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE; st->codec->bits_per_coded_sample = 16; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; } case RoQ_QUAD_VQ: /* load up the packet */ if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE)) return AVERROR(EIO); /* copy over preamble */ memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE); if (chunk_type == RoQ_QUAD_VQ) { pkt->stream_index = roq->video_stream_index; pkt->pts = roq->video_pts++; } else { pkt->stream_index = roq->audio_stream_index; pkt->pts = roq->audio_frame_count; roq->audio_frame_count += (chunk_size / roq->audio_channels); } pkt->pos= avio_tell(pb); ret = avio_read(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE, chunk_size); if (ret != chunk_size) ret = AVERROR(EIO); packet_read = 1; break; default: av_log(s, AV_LOG_ERROR, " unknown RoQ chunk (%04X)\n", chunk_type); return AVERROR_INVALIDDATA; break; } } return ret; }
static int nuv_packet(AVFormatContext *s, AVPacket *pkt) { NUVContext *ctx = s->priv_data; AVIOContext *pb = s->pb; uint8_t hdr[HDRSIZE]; nuv_frametype frametype; int ret, size; while (!url_feof(pb)) { int copyhdrsize = ctx->rtjpg_video ? HDRSIZE : 0; uint64_t pos = avio_tell(pb); ret = avio_read(pb, hdr, HDRSIZE); if (ret < HDRSIZE) return ret < 0 ? ret : AVERROR(EIO); frametype = hdr[0]; size = PKTSIZE(AV_RL32(&hdr[8])); switch (frametype) { case NUV_EXTRADATA: if (!ctx->rtjpg_video) { avio_skip(pb, size); break; } case NUV_VIDEO: if (ctx->v_id < 0) { av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n"); avio_skip(pb, size); break; } ret = av_new_packet(pkt, copyhdrsize + size); if (ret < 0) return ret; pkt->pos = pos; pkt->flags |= hdr[2] == 0 ? AV_PKT_FLAG_KEY : 0; pkt->pts = AV_RL32(&hdr[4]); pkt->stream_index = ctx->v_id; memcpy(pkt->data, hdr, copyhdrsize); ret = avio_read(pb, pkt->data + copyhdrsize, size); if (ret < 0) { av_free_packet(pkt); return ret; } if (ret < size) av_shrink_packet(pkt, copyhdrsize + ret); return 0; case NUV_AUDIO: if (ctx->a_id < 0) { av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n"); avio_skip(pb, size); break; } ret = av_get_packet(pb, pkt, size); pkt->flags |= AV_PKT_FLAG_KEY; pkt->pos = pos; pkt->pts = AV_RL32(&hdr[4]); pkt->stream_index = ctx->a_id; if (ret < 0) return ret; return 0; case NUV_SEEKP: // contains no data, size value is invalid break; default: avio_skip(pb, size); break; } } return AVERROR(EIO); }
static int mm_read_packet(AVFormatContext *s, AVPacket *pkt) { MmDemuxContext *mm = (MmDemuxContext *)s->priv_data; ByteIOContext *pb = &s->pb; unsigned char preamble[MM_PREAMBLE_SIZE]; unsigned char pal[MM_PALETTE_SIZE]; unsigned int type, length; int i; while(1) { if (get_buffer(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) { return AVERROR_IO; } type = LE_16(&preamble[0]); length = LE_16(&preamble[2]); switch(type) { case MM_TYPE_PALETTE : url_fseek(pb, 4, SEEK_CUR); /* unknown data */ if (get_buffer(pb, pal, MM_PALETTE_SIZE) != MM_PALETTE_SIZE) return AVERROR_IO; url_fseek(pb, length - (4 + MM_PALETTE_SIZE), SEEK_CUR); for (i=0; i<MM_PALETTE_COUNT; i++) { int r = pal[i*3 + 0]; int g = pal[i*3 + 1]; int b = pal[i*3 + 2]; mm->palette_control.palette[i] = (r << 16) | (g << 8) | (b); /* repeat palette, where each components is multiplied by four */ mm->palette_control.palette[i+128] = (r << 18) | (g << 10) | (b<<2); } mm->palette_control.palette_changed = 1; break; case MM_TYPE_INTER : case MM_TYPE_INTRA : case MM_TYPE_INTRA_HH : case MM_TYPE_INTER_HH : case MM_TYPE_INTRA_HHV : case MM_TYPE_INTER_HHV : /* output preamble + data */ if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE)) return AVERROR_NOMEM; memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE); if (get_buffer(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length) return AVERROR_IO; pkt->size = length + MM_PREAMBLE_SIZE; pkt->stream_index = 0; pkt->pts = mm->video_pts++; return 0; case MM_TYPE_AUDIO : if (av_get_packet(&s->pb, pkt, length)<0) return AVERROR_NOMEM; pkt->size = length; pkt->stream_index = 1; pkt->pts = mm->audio_pts++; return 0; default : av_log(NULL, AV_LOG_INFO, "mm: unknown chunk type 0x%x\n", type); url_fseek(pb, length, SEEK_CUR); } } return 0; }
static int str_read_packet(AVFormatContext *s, AVPacket *ret_pkt) { ByteIOContext *pb = &s->pb; StrDemuxContext *str = s->priv_data; unsigned char sector[RAW_CD_SECTOR_SIZE]; int channel; int packet_read = 0; int ret = 0; AVPacket *pkt; while (!packet_read) { if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE) return AVERROR_IO; channel = sector[0x11]; if (channel >= 32) return AVERROR_INVALIDDATA; switch (sector[0x12] & CDXA_TYPE_MASK) { case CDXA_TYPE_DATA: case CDXA_TYPE_VIDEO: /* check if this the video channel we care about */ if (channel == str->video_channel) { int current_sector = AV_RL16(§or[0x1C]); int sector_count = AV_RL16(§or[0x1E]); int frame_size = AV_RL32(§or[0x24]); int bytes_to_copy; // printf("%d %d %d\n",current_sector,sector_count,frame_size); /* if this is the first sector of the frame, allocate a pkt */ pkt = &str->tmp_pkt; if (current_sector == 0) { if (av_new_packet(pkt, frame_size)) return AVERROR_IO; pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE; pkt->stream_index = str->channels[channel].video_stream_index; // pkt->pts = str->pts; /* if there is no audio, adjust the pts after every video * frame; assume 15 fps */ if (str->audio_channel != -1) str->pts += (90000 / 15); } /* load all the constituent chunks in the video packet */ bytes_to_copy = frame_size - current_sector*VIDEO_DATA_CHUNK_SIZE; if (bytes_to_copy>0) { if (bytes_to_copy>VIDEO_DATA_CHUNK_SIZE) bytes_to_copy=VIDEO_DATA_CHUNK_SIZE; memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE, sector + VIDEO_DATA_HEADER_SIZE, bytes_to_copy); } if (current_sector == sector_count-1) { *ret_pkt = *pkt; return 0; } } break; case CDXA_TYPE_AUDIO: #ifdef PRINTSTUFF printf (" dropping audio sector\n"); #endif #if 1 /* check if this the video channel we care about */ if (channel == str->audio_channel) { pkt = ret_pkt; if (av_new_packet(pkt, 2304)) return AVERROR_IO; memcpy(pkt->data,sector+24,2304); pkt->stream_index = str->channels[channel].audio_stream_index; //pkt->pts = str->pts; return 0; } #endif break; default: /* drop the sector and move on */ #ifdef PRINTSTUFF printf (" dropping other sector\n"); #endif break; } if (url_feof(pb)) return AVERROR_IO; } return ret; }
static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) { SmackerContext *smk = s->priv_data; int flags; int ret; int i; int frame_size = 0; int palchange = 0; int pos; if (url_feof(s->pb) || smk->cur_frame >= smk->frames) return AVERROR(EIO); /* if we demuxed all streams, pass another frame */ if(smk->curstream < 0) { url_fseek(s->pb, smk->nextpos, 0); frame_size = smk->frm_size[smk->cur_frame] & (~3); flags = smk->frm_flags[smk->cur_frame]; /* handle palette change event */ pos = url_ftell(s->pb); if(flags & SMACKER_PAL){ int size, sz, t, off, j, pos; uint8_t *pal = smk->pal; uint8_t oldpal[768]; memcpy(oldpal, pal, 768); size = get_byte(s->pb); size = size * 4 - 1; frame_size -= size; frame_size--; sz = 0; pos = url_ftell(s->pb) + size; while(sz < 256){ t = get_byte(s->pb); if(t & 0x80){ /* skip palette entries */ sz += (t & 0x7F) + 1; pal += ((t & 0x7F) + 1) * 3; } else if(t & 0x40){ /* copy with offset */ off = get_byte(s->pb) * 3; j = (t & 0x3F) + 1; while(j-- && sz < 256) { *pal++ = oldpal[off + 0]; *pal++ = oldpal[off + 1]; *pal++ = oldpal[off + 2]; sz++; off += 3; } } else { /* new entries */ *pal++ = smk_pal[t]; *pal++ = smk_pal[get_byte(s->pb) & 0x3F]; *pal++ = smk_pal[get_byte(s->pb) & 0x3F]; sz++; } } url_fseek(s->pb, pos, 0); palchange |= 1; } flags >>= 1; smk->curstream = -1; /* if audio chunks are present, put them to stack and retrieve later */ for(i = 0; i < 7; i++) { if(flags & 1) { int size; size = get_le32(s->pb) - 4; frame_size -= size; frame_size -= 4; smk->curstream++; smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size); smk->buf_sizes[smk->curstream] = size; ret = get_buffer(s->pb, smk->bufs[smk->curstream], size); if(ret != size) return AVERROR(EIO); smk->stream_id[smk->curstream] = smk->indexes[i]; } flags >>= 1; } if (av_new_packet(pkt, frame_size + 768)) return AVERROR(ENOMEM); if(smk->frm_size[smk->cur_frame] & 1) palchange |= 2; pkt->data[0] = palchange; memcpy(pkt->data + 1, smk->pal, 768); ret = get_buffer(s->pb, pkt->data + 769, frame_size); if(ret != frame_size) return AVERROR(EIO); pkt->stream_index = smk->videoindex; pkt->size = ret + 769; smk->cur_frame++; smk->nextpos = url_ftell(s->pb); } else {
static int store_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len) { int interleave_size, interleave_index; int frame_size, ret; InterleavePacket* ip; if (len < 2) return AVERROR_INVALIDDATA; interleave_size = buf[0] >> 3 & 7; interleave_index = buf[0] & 7; if (interleave_size > 5) { av_log(ctx, AV_LOG_ERROR, "Invalid interleave size %d\n", interleave_size); return AVERROR_INVALIDDATA; } if (interleave_index > interleave_size) { av_log(ctx, AV_LOG_ERROR, "Invalid interleave index %d/%d\n", interleave_index, interleave_size); return AVERROR_INVALIDDATA; } if (interleave_size != data->interleave_size) { int i; /* First packet, or changed interleave size */ data->interleave_size = interleave_size; data->interleave_index = 0; for (i = 0; i < 6; i++) data->group[i].size = 0; } if (interleave_index < data->interleave_index) { /* Wrapped around - missed the last packet of the previous group. */ if (data->group_finished) { /* No more data in the packets in this interleaving group, just * start processing the next one */ data->interleave_index = 0; } else { /* Stash away the current packet, emit everything we have of the * previous group. */ for (; data->interleave_index <= interleave_size; data->interleave_index++) data->group[data->interleave_index].size = 0; if (len > sizeof(data->next_data)) return AVERROR_INVALIDDATA; memcpy(data->next_data, buf, len); data->next_size = len; data->next_timestamp = *timestamp; *timestamp = RTP_NOTS_VALUE; data->interleave_index = 0; return return_stored_frame(ctx, data, st, pkt, timestamp, buf, len); } } if (interleave_index > data->interleave_index) { /* We missed a packet */ for (; data->interleave_index < interleave_index; data->interleave_index++) data->group[data->interleave_index].size = 0; } data->interleave_index = interleave_index; if (buf[1] >= FF_ARRAY_ELEMS(frame_sizes)) return AVERROR_INVALIDDATA; frame_size = frame_sizes[buf[1]]; if (1 + frame_size > len) return AVERROR_INVALIDDATA; if (len - 1 - frame_size > sizeof(data->group[0].data)) return AVERROR_INVALIDDATA; if ((ret = av_new_packet(pkt, frame_size)) < 0) return ret; memcpy(pkt->data, &buf[1], frame_size); pkt->stream_index = st->index; ip = &data->group[data->interleave_index]; ip->size = len - 1 - frame_size; ip->pos = 0; memcpy(ip->data, &buf[1 + frame_size], ip->size); /* Each packet must contain the same number of frames according to the * RFC. If there's no data left in this packet, there shouldn't be any * in any of the other frames in the interleaving group either. */ data->group_finished = ip->size == 0; if (interleave_index == interleave_size) { data->interleave_index = 0; return !data->group_finished; } else { data->interleave_index++; return 0; } }
static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb, RMDemuxContext *rm, RMStream *vst, AVPacket *pkt, int len, int *pseq, int64_t *timestamp) { int hdr; int seq = 0, pic_num = 0, len2 = 0, pos = 0; //init to silcense compiler warning int type; int ret; hdr = avio_r8(pb); len--; type = hdr >> 6; if(type != 3){ // not frame as a part of packet seq = avio_r8(pb); len--; } if(type != 1){ // not whole frame len2 = get_num(pb, &len); pos = get_num(pb, &len); pic_num = avio_r8(pb); len--; } if(len<0) { av_log(s, AV_LOG_ERROR, "Insufficient data\n"); return -1; } rm->remaining_len = len; if(type&1){ // frame, not slice if(type == 3){ // frame as a part of packet len= len2; *timestamp = pos; } if(rm->remaining_len < len) { av_log(s, AV_LOG_ERROR, "Insufficient remaining len\n"); return -1; } rm->remaining_len -= len; if(av_new_packet(pkt, len + 9) < 0) return AVERROR(EIO); pkt->data[0] = 0; AV_WL32(pkt->data + 1, 1); AV_WL32(pkt->data + 5, 0); if ((ret = avio_read(pb, pkt->data + 9, len)) != len) { av_free_packet(pkt); av_log(s, AV_LOG_ERROR, "Failed to read %d bytes\n", len); return ret < 0 ? ret : AVERROR(EIO); } return 0; } //now we have to deal with single slice *pseq = seq; if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){ if (len2 > ffio_limit(pb, len2)) { av_log(s, AV_LOG_ERROR, "Impossibly sized packet\n"); return AVERROR_INVALIDDATA; } vst->slices = ((hdr & 0x3F) << 1) + 1; vst->videobufsize = len2 + 8*vst->slices + 1; av_free_packet(&vst->pkt); //FIXME this should be output. if(av_new_packet(&vst->pkt, vst->videobufsize) < 0) return AVERROR(ENOMEM); memset(vst->pkt.data, 0, vst->pkt.size); vst->videobufpos = 8*vst->slices + 1; vst->cur_slice = 0; vst->curpic_num = pic_num; vst->pktpos = avio_tell(pb); } if(type == 2) len = FFMIN(len, pos); if(++vst->cur_slice > vst->slices) { av_log(s, AV_LOG_ERROR, "cur slice %d, too large\n", vst->cur_slice); return 1; } if(!vst->pkt.data) return AVERROR(ENOMEM); AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1); AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1); if(vst->videobufpos + len > vst->videobufsize) { av_log(s, AV_LOG_ERROR, "outside videobufsize\n"); return 1; } if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len) return AVERROR(EIO); vst->videobufpos += len; rm->remaining_len-= len; if (type == 2 || vst->videobufpos == vst->videobufsize) { vst->pkt.data[0] = vst->cur_slice-1; *pkt= vst->pkt; vst->pkt.data= NULL; vst->pkt.size= 0; vst->pkt.buf = NULL; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS vst->pkt.destruct = NULL; FF_ENABLE_DEPRECATION_WARNINGS #endif if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices, vst->videobufpos - 1 - 8*vst->slices); pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices); pkt->pts = AV_NOPTS_VALUE; pkt->pos = vst->pktpos; vst->slices = 0; return 0; }
static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, AVPacket *pkt) { int chunk_type; if (s->audio_chunk_offset) { /* adjust for PCM audio by skipping chunk header */ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) { s->audio_chunk_offset += 6; s->audio_chunk_size -= 6; } avio_seek(pb, s->audio_chunk_offset, SEEK_SET); s->audio_chunk_offset = 0; if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size)) return CHUNK_EOF; pkt->stream_index = s->audio_stream_index; pkt->pts = s->audio_frame_count; /* audio frame maintenance */ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) s->audio_frame_count += (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8)); else s->audio_frame_count += (s->audio_chunk_size - 6) / s->audio_channels; av_dlog(NULL, "sending audio frame with pts %"PRId64" (%d audio frames)\n", pkt->pts, s->audio_frame_count); chunk_type = CHUNK_VIDEO; } else if (s->decode_map_chunk_offset) { /* send both the decode map and the video data together */ if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size)) return CHUNK_NOMEM; if (s->has_palette) { uint8_t *pal; pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (pal) { memcpy(pal, s->palette, AVPALETTE_SIZE); s->has_palette = 0; } } pkt->pos= s->decode_map_chunk_offset; avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET); s->decode_map_chunk_offset = 0; if (avio_read(pb, pkt->data, s->decode_map_chunk_size) != s->decode_map_chunk_size) { av_free_packet(pkt); return CHUNK_EOF; } avio_seek(pb, s->video_chunk_offset, SEEK_SET); s->video_chunk_offset = 0; if (avio_read(pb, pkt->data + s->decode_map_chunk_size, s->video_chunk_size) != s->video_chunk_size) { av_free_packet(pkt); return CHUNK_EOF; } pkt->stream_index = s->video_stream_index; pkt->pts = s->video_pts; av_dlog(NULL, "sending video frame with pts %"PRId64"\n", pkt->pts); s->video_pts += s->frame_pts_inc; chunk_type = CHUNK_VIDEO; } else { avio_seek(pb, s->next_chunk_offset, SEEK_SET); chunk_type = CHUNK_DONE; } return chunk_type; }
/* Follows RFC 3640 */ static int aac_parse_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { int ret; if (!buf) { if (data->cur_au_index > data->nb_au_headers) { av_log(ctx, AV_LOG_ERROR, "Invalid parser state\n"); return AVERROR_INVALIDDATA; } if (data->buf_size - data->buf_pos < data->au_headers[data->cur_au_index].size) { av_log(ctx, AV_LOG_ERROR, "Invalid AU size\n"); return AVERROR_INVALIDDATA; } if ((ret = av_new_packet(pkt, data->au_headers[data->cur_au_index].size)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); return ret; } memcpy(pkt->data, &data->buf[data->buf_pos], data->au_headers[data->cur_au_index].size); data->buf_pos += data->au_headers[data->cur_au_index].size; pkt->stream_index = st->index; data->cur_au_index++; if (data->cur_au_index == data->nb_au_headers) { data->buf_pos = 0; return 0; } return 1; } if (rtp_parse_mp4_au(data, buf, len)) { av_log(ctx, AV_LOG_ERROR, "Error parsing AU headers\n"); return -1; } buf += data->au_headers_length_bytes + 2; len -= data->au_headers_length_bytes + 2; if (data->nb_au_headers == 1 && len < data->au_headers[0].size) { /* Packet is fragmented */ if (!data->buf_pos) { if (data->au_headers[0].size > MAX_AAC_HBR_FRAME_SIZE) { av_log(ctx, AV_LOG_ERROR, "Invalid AU size\n"); return AVERROR_INVALIDDATA; } data->buf_size = data->au_headers[0].size; data->timestamp = *timestamp; } if (data->timestamp != *timestamp || data->au_headers[0].size != data->buf_size || data->buf_pos + len > MAX_AAC_HBR_FRAME_SIZE) { data->buf_pos = 0; data->buf_size = 0; av_log(ctx, AV_LOG_ERROR, "Invalid packet received\n"); return AVERROR_INVALIDDATA; } memcpy(&data->buf[data->buf_pos], buf, len); data->buf_pos += len; if (!(flags & RTP_FLAG_MARKER)) return AVERROR(EAGAIN); if (data->buf_pos != data->buf_size) { data->buf_pos = 0; av_log(ctx, AV_LOG_ERROR, "Missed some packets, discarding frame\n"); return AVERROR_INVALIDDATA; } data->buf_pos = 0; ret = av_new_packet(pkt, data->buf_size); if (ret < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); return ret; } pkt->stream_index = st->index; memcpy(pkt->data, data->buf, data->buf_size); return 0; } if (len < data->au_headers[0].size) { av_log(ctx, AV_LOG_ERROR, "First AU larger than packet size\n"); return AVERROR_INVALIDDATA; } if ((ret = av_new_packet(pkt, data->au_headers[0].size)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); return ret; } memcpy(pkt->data, buf, data->au_headers[0].size); len -= data->au_headers[0].size; buf += data->au_headers[0].size; pkt->stream_index = st->index; if (len > 0 && data->nb_au_headers > 1) { data->buf_size = FFMIN(len, sizeof(data->buf)); memcpy(data->buf, buf, data->buf_size); data->cur_au_index = 1; data->buf_pos = 0; return 1; } return 0; }
int _tmain(int argc, _TCHAR* argv[]) { AVFormatContext* pFormatCtx; AVOutputFormat* fmt; AVStream* video_st; AVCodecContext* pCodecCtx; AVCodec* pCodec; uint8_t* picture_buf; AVFrame* picture; int size; FILE *in_file = fopen("src01_480x272.yuv", "rb"); //视频YUV源文件 int in_w=480,in_h=272;//宽高 int framenum=50; const char* out_file = "src01.h264"; //输出文件路径 av_register_all(); //方法1.组合使用几个函数 pFormatCtx = avformat_alloc_context(); //猜格式 fmt = av_guess_format(NULL, out_file, NULL); pFormatCtx->oformat = fmt; //方法2.更加自动化一些 //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file); //fmt = pFormatCtx->oformat; //注意输出路径 if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0) { printf("输出文件打开失败"); return -1; } video_st = av_new_stream(pFormatCtx, 0); if (video_st==NULL) { return -1; } pCodecCtx = video_st->codec; pCodecCtx->codec_id = fmt->video_codec; pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pCodecCtx->pix_fmt = PIX_FMT_YUV420P; pCodecCtx->width = in_w; pCodecCtx->height = in_h; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; pCodecCtx->bit_rate = 400000; pCodecCtx->gop_size=250; //H264 //pCodecCtx->me_range = 16; //pCodecCtx->max_qdiff = 4; pCodecCtx->qmin = 10; pCodecCtx->qmax = 51; //pCodecCtx->qcompress = 0.6; //输出格式信息 av_dump_format(pFormatCtx, 0, out_file, 1); pCodec = avcodec_find_encoder(pCodecCtx->codec_id); if (!pCodec) { printf("没有找到合适的编码器!\n"); return -1; } if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0) { printf("编码器打开失败!\n"); return -1; } picture = avcodec_alloc_frame(); size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); picture_buf = (uint8_t *)av_malloc(size); avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); //写文件头 avformat_write_header(pFormatCtx,NULL); AVPacket pkt; int y_size = pCodecCtx->width * pCodecCtx->height; av_new_packet(&pkt,y_size*3); for (int i=0; i<framenum; i++){ //读入YUV if (fread(picture_buf, 1, y_size*3/2, in_file) < 0) { printf("文件读取错误\n"); return -1; }else if(feof(in_file)){ break; } picture->data[0] = picture_buf; // 亮度Y picture->data[1] = picture_buf+ y_size; // U picture->data[2] = picture_buf+ y_size*5/4; // V //PTS picture->pts=i; int got_picture=0; //编码 int ret = avcodec_encode_video2(pCodecCtx, &pkt,picture, &got_picture); if(ret < 0) { printf("编码错误!\n"); return -1; } if (got_picture==1) { printf("编码成功1帧!\n"); pkt.stream_index = video_st->index; ret = av_write_frame(pFormatCtx, &pkt); av_free_packet(&pkt); } } //Flush Encoder int ret = flush_encoder(pFormatCtx,0); if (ret < 0) { printf("Flushing encoder failed\n"); return -1; } //写文件尾 av_write_trailer(pFormatCtx); //清理 if (video_st) { avcodec_close(video_st->codec); av_free(picture); av_free(picture_buf); } avio_close(pFormatCtx->pb); avformat_free_context(pFormatCtx); fclose(in_file); return 0; }