static int libschroedinger_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int64_t pts = avpkt->pts; SchroTag *tag; SchroDecoderParams *p_schro_params = avctx->priv_data; SchroDecoder *decoder = p_schro_params->decoder; SchroBuffer *enc_buf; SchroFrame* frame; AVFrame *avframe = data; int state; int go = 1; int outer = 1; SchroParseUnitContext parse_ctx; LibSchroFrameContext *framewithpts = NULL; int ret; *got_frame = 0; parse_context_init(&parse_ctx, buf, buf_size); if (!buf_size) { if (!p_schro_params->eos_signalled) { state = schro_decoder_push_end_of_stream(decoder); p_schro_params->eos_signalled = 1; } } /* Loop through all the individual parse units in the input buffer */ do { if ((enc_buf = find_next_parse_unit(&parse_ctx))) { /* Set Schrotag with the pts to be recovered after decoding*/ enc_buf->tag = schro_tag_new(av_malloc(sizeof(int64_t)), av_free); if (!enc_buf->tag->value) { av_log(avctx, AV_LOG_ERROR, "Unable to allocate SchroTag\n"); return AVERROR(ENOMEM); } AV_WN(64, enc_buf->tag->value, pts); /* Push buffer into decoder. */ if (SCHRO_PARSE_CODE_IS_PICTURE(enc_buf->data[4]) && SCHRO_PARSE_CODE_NUM_REFS(enc_buf->data[4]) > 0) avctx->has_b_frames = 1; state = schro_decoder_push(decoder, enc_buf); if (state == SCHRO_DECODER_FIRST_ACCESS_UNIT) libschroedinger_handle_first_access_unit(avctx); go = 1; } else outer = 0; while (go) { /* Parse data and process result. */ state = schro_decoder_wait(decoder); switch (state) { case SCHRO_DECODER_FIRST_ACCESS_UNIT: libschroedinger_handle_first_access_unit(avctx); break; case SCHRO_DECODER_NEED_BITS: /* Need more input data - stop iterating over what we have. */ go = 0; break; case SCHRO_DECODER_NEED_FRAME: /* Decoder needs a frame - create one and push it in. */ frame = ff_create_schro_frame(avctx, p_schro_params->frame_format); if (!frame) return AVERROR(ENOMEM); schro_decoder_add_output_picture(decoder, frame); break; case SCHRO_DECODER_OK: /* Pull a frame out of the decoder. */ tag = schro_decoder_get_picture_tag(decoder); frame = schro_decoder_pull(decoder); if (frame) { /* Add relation between schroframe and pts. */ framewithpts = av_malloc(sizeof(LibSchroFrameContext)); if (!framewithpts) { av_log(avctx, AV_LOG_ERROR, "Unable to allocate FrameWithPts\n"); return AVERROR(ENOMEM); } framewithpts->frame = frame; framewithpts->pts = AV_RN64(tag->value); ff_schro_queue_push_back(&p_schro_params->dec_frame_queue, framewithpts); } break; case SCHRO_DECODER_EOS: go = 0; p_schro_params->eos_pulled = 1; schro_decoder_reset(decoder); outer = 0; break; case SCHRO_DECODER_ERROR: return -1; break; } } } while (outer); /* Grab next frame to be returned from the top of the queue. */ framewithpts = ff_schro_queue_pop(&p_schro_params->dec_frame_queue); if (framewithpts && framewithpts->frame && framewithpts->frame->components[0].stride) { if ((ret = ff_get_buffer(avctx, avframe, 0)) < 0) goto end; memcpy(avframe->data[0], framewithpts->frame->components[0].data, framewithpts->frame->components[0].length); memcpy(avframe->data[1], framewithpts->frame->components[1].data, framewithpts->frame->components[1].length); memcpy(avframe->data[2], framewithpts->frame->components[2].data, framewithpts->frame->components[2].length); /* Fill frame with current buffer data from Schroedinger. */ avframe->pkt_pts = framewithpts->pts; avframe->linesize[0] = framewithpts->frame->components[0].stride; avframe->linesize[1] = framewithpts->frame->components[1].stride; avframe->linesize[2] = framewithpts->frame->components[2].stride; *got_frame = 1; } else { data = NULL; *got_frame = 0; } ret = buf_size; end: /* Now free the frame resources. */ if (framewithpts && framewithpts->frame) libschroedinger_decode_frame_free(framewithpts->frame); av_freep(&framewithpts); return ret; }
//h264数据流序列0x67 0x68 0x65 0x61 0x61...0x67 x68 0x65 0x61 0x61... // SPS PPS I P P ... SPS PPS I P P ... //ph264buf缓冲区中的数据SPS+PPS+I或者P //参数值:decObjIdx--解码对象索引值 // ph264buf--h264缓冲区 // h264buflen--h264缓冲区长度 // isfinished--该帧数据是否解码完成 // pyuvbuf--yuv缓冲区 // yuvbuflen--yuv缓冲区长度 //将多个输入参赛封装进结构中去 int h264dec(int decObjIdx, SH264decParams* pSH264decParams) { printf("h264dec_yuv()--here!\n"); if (decObjIdx < 0 || decObjIdx > max_channel) { return -1; } if (NULL == pSH264decParams->ph264buf || pSH264decParams->h264buflen <= 0 ) { return -2; } uint8_t *p = pSH264decParams->ph264buf; CH264Decode *pCH264Decode = &g_CH264Decodes[decObjIdx]; //如果解码对象未初始化,返回失败 if (pCH264Decode->bInitOK == 0 || pCH264Decode->bUsed == 0) { return -3; } //从h264缓冲区中解析出宽高 if (pCH264Decode->nImageHeight == 0 || pCH264Decode->nImageWidth == 0) { if (1 != h264dec_getInfo(decObjIdx, p, pSH264decParams->h264buflen, &pCH264Decode->nImageWidth, &pCH264Decode->nImageHeight)) return -4; } pSH264decParams->decwidth = pCH264Decode->nImageWidth; pSH264decParams->decheight = pCH264Decode->nImageHeight; if (pSH264decParams->pyuvbuf == NULL && pSH264decParams->prgbbuf == NULL) { return 1; } pCH264Decode->packet.data = p; pCH264Decode->packet.size = pSH264decParams->h264buflen; //解码 int ret = avcodec_decode_video2(pCH264Decode->pCodecCtx, pCH264Decode->pFrameYUV, &pSH264decParams->isfinished, &pCH264Decode->packet); if (ret < 0) { //解码失败 return -5; } //未解码完一帧数据 if(pSH264decParams->isfinished == 0) { return -6; } //将解码出的yuv数据拷贝到外部缓冲区 if (pSH264decParams->pyuvbuf != NULL && pSH264decParams->yuvbuflen > 0) { int i = 0; int j = 0; int yuvwidth = 0; int yuvheight = 0; int k = 0; for (; i < 3; ++i) { if (i == 0)//Y { yuvwidth = pCH264Decode->pFrameYUV->width; yuvheight = pCH264Decode->pFrameYUV->height; } else//U V { yuvwidth = pCH264Decode->pFrameYUV->width/2; yuvheight = pCH264Decode->pFrameYUV->height/2; } //从data[i]中以固定长度linesize为间隔拷贝有效长度为yuvwidth的数据到pyuvbuf中 int blocksize = yuvheight * pCH264Decode->pFrameYUV->linesize[i]; for (k = 0;k < blocksize;) { memcpy(&pSH264decParams->pyuvbuf[j], &pCH264Decode->pFrameYUV->data[i][k], yuvwidth); k+=pCH264Decode->pFrameYUV->linesize[i]; j+=yuvwidth; } } pSH264decParams->yuvbuflen = j; } if (pSH264decParams->prgbbuf == NULL || pSH264decParams->rgbbuflen <= 0) { return 2; } int src_w, src_h, dst_w, dst_h; src_w = dst_w = pCH264Decode->nImageWidth; src_h = dst_h = pCH264Decode->nImageHeight; if (NULL == pCH264Decode->prgbBuf || 0 == pCH264Decode->rgbbuflen) { pCH264Decode->rgbbuflen = avpicture_get_size(AV_PIX_FMT_BGR24, pCH264Decode->pCodecCtx->width, pCH264Decode->pCodecCtx->height); pCH264Decode->prgbBuf = av_malloc(pCH264Decode->rgbbuflen); ret = avpicture_fill((AVPicture *)(pCH264Decode->pFrameRGB), pCH264Decode->prgbBuf, AV_PIX_FMT_BGR24, pCH264Decode->pCodecCtx->width, pCH264Decode->pCodecCtx->height); } /* create scaling context */ if (NULL == pCH264Decode->sws_ctx) { pCH264Decode->sws_ctx = sws_getCachedContext(pCH264Decode->sws_ctx, src_w, src_h, pCH264Decode->pCodecCtx->pix_fmt, dst_w, dst_h, AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); if (NULL == pCH264Decode->sws_ctx) { printf("sws_getCachedContext failed!\n"); return -7; } } //避免rgb倒置,变换yuv数据的起始地址 pCH264Decode->pFrameYUV->data[0] += pCH264Decode->pFrameYUV->linesize[0] * (pCH264Decode->pCodecCtx->height-1); pCH264Decode->pFrameYUV->linesize[0] *= -1; pCH264Decode->pFrameYUV->data[1] += pCH264Decode->pFrameYUV->linesize[1] * (pCH264Decode->pCodecCtx->height/2 - 1);; pCH264Decode->pFrameYUV->linesize[1] *= -1; pCH264Decode->pFrameYUV->data[2] += pCH264Decode->pFrameYUV->linesize[2] * (pCH264Decode->pCodecCtx->height/2 - 1);; pCH264Decode->pFrameYUV->linesize[2] *= -1; /* convert to destination format */ sws_scale(pCH264Decode->sws_ctx, (const uint8_t * const*)pCH264Decode->pFrameYUV->data, pCH264Decode->pFrameYUV->linesize, 0, src_h, pCH264Decode->pFrameRGB->data, pCH264Decode->pFrameRGB->linesize); //拷贝rbg缓冲区 memcpy(pSH264decParams->prgbbuf, pCH264Decode->prgbBuf, pCH264Decode->rgbbuflen); pSH264decParams->rgbbuflen = pCH264Decode->rgbbuflen; return 3; }
int av_frame_ref(AVFrame *dst, const AVFrame *src) { int i, ret = 0; dst->format = src->format; dst->width = src->width; dst->height = src->height; dst->channel_layout = src->channel_layout; dst->nb_samples = src->nb_samples; ret = av_frame_copy_props(dst, src); if (ret < 0) return ret; /* duplicate the frame data if it's not refcounted */ if (!src->buf[0]) { ret = av_frame_get_buffer(dst, 32); if (ret < 0) return ret; if (src->nb_samples) { int ch = av_get_channel_layout_nb_channels(src->channel_layout); av_samples_copy(dst->extended_data, src->extended_data, 0, 0, dst->nb_samples, ch, dst->format); } else { av_image_copy(dst->data, dst->linesize, src->data, src->linesize, dst->format, dst->width, dst->height); } return 0; } /* ref the buffers */ for (i = 0; i < FF_ARRAY_ELEMS(src->buf) && src->buf[i]; i++) { dst->buf[i] = av_buffer_ref(src->buf[i]); if (!dst->buf[i]) { ret = AVERROR(ENOMEM); goto fail; } } if (src->extended_buf) { dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) * src->nb_extended_buf); if (!dst->extended_buf) { ret = AVERROR(ENOMEM); goto fail; } dst->nb_extended_buf = src->nb_extended_buf; for (i = 0; i < src->nb_extended_buf; i++) { dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]); if (!dst->extended_buf[i]) { ret = AVERROR(ENOMEM); goto fail; } } } /* duplicate extended data */ if (src->extended_data != src->data) { int ch = av_get_channel_layout_nb_channels(src->channel_layout); if (!ch) { ret = AVERROR(EINVAL); goto fail; } dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch); if (!dst->extended_data) { ret = AVERROR(ENOMEM); goto fail; } memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch); } else dst->extended_data = dst->data; memcpy(dst->data, src->data, sizeof(src->data)); memcpy(dst->linesize, src->linesize, sizeof(src->linesize)); return 0; fail: av_frame_unref(dst); return ret; }
static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt, uint8_t block_type, AVFormatContext *s) { uint8_t * vidbuf_start = NULL; int vidbuf_nbytes = 0; int code; int bytes_copied = 0; int position, duration, npixels; unsigned int vidbuf_capacity; int ret = 0; AVStream *st; if (vid->video_index < 0) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vid->video_index = st->index; if (vid->audio_index < 0) { av_log_ask_for_sample(s, "No audio packet before first video " "packet. Using default video time base.\n"); } avpriv_set_pts_info(st, 64, 185, vid->sample_rate); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_BETHSOFTVID; st->codec->width = vid->width; st->codec->height = vid->height; } st = s->streams[vid->video_index]; npixels = st->codec->width * st->codec->height; vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); // save the file position for the packet, include block type position = avio_tell(pb) - 1; vidbuf_start[vidbuf_nbytes++] = block_type; // get the current packet duration duration = vid->bethsoft_global_delay + avio_rl16(pb); // set the y offset if it exists (decoder header data should be in data section) if(block_type == VIDEO_YOFF_P_FRAME){ if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += 2; } do{ vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); code = avio_r8(pb); vidbuf_start[vidbuf_nbytes++] = code; if(code >= 0x80){ // rle sequence if(block_type == VIDEO_I_FRAME) vidbuf_start[vidbuf_nbytes++] = avio_r8(pb); } else if(code){ // plain sequence if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += code; } bytes_copied += code & 0x7F; if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied // may contain a 0 byte even if read all pixels if(avio_r8(pb)) avio_seek(pb, -1, SEEK_CUR); break; } if (bytes_copied > npixels) { ret = AVERROR_INVALIDDATA; goto fail; } } while(code); // copy data into packet if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0) goto fail; memcpy(pkt->data, vidbuf_start, vidbuf_nbytes); av_free(vidbuf_start); pkt->pos = position; pkt->stream_index = vid->video_index; pkt->duration = duration; if (block_type == VIDEO_I_FRAME) pkt->flags |= AV_PKT_FLAG_KEY; /* if there is a new palette available, add it to packet side data */ if (vid->palette) { uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, BVID_PALETTE_SIZE); memcpy(pdata, vid->palette, BVID_PALETTE_SIZE); av_freep(&vid->palette); } vid->nframes--; // used to check if all the frames were read return 0; fail: av_free(vidbuf_start); return ret; }
//初始化音频解码器与播放器 int init_audio(JNIEnv * env, jclass jthiz){ //获取音频流索引位置 int i; for(i=0; i < pFormatCtx->nb_streams;i++){ if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){ audio_stream_index = i; break; } } //获取音频解码器 audioCodecCtx = pFormatCtx->streams[audio_stream_index]->codec; AVCodec *codec = avcodec_find_decoder(audioCodecCtx->codec_id); if(codec == NULL){ LOGI("%s","无法获取音频解码器"); return -1; } //打开音频解码器 if(avcodec_open2(audioCodecCtx,codec,NULL) < 0){ LOGI("%s","无法打开音频解码器"); return -1; } //frame->16bit 44100 PCM 统一音频采样格式与采样率 audio_swr_ctx = swr_alloc(); //输入的采样格式 enum AVSampleFormat in_sample_fmt = audioCodecCtx->sample_fmt; //输出采样格式16bit PCM out_sample_fmt = AV_SAMPLE_FMT_S16; //输入采样率 int in_sample_rate = audioCodecCtx->sample_rate; //输出采样率 int out_sample_rate = in_sample_rate; //声道布局(2个声道,默认立体声stereo) uint64_t in_ch_layout = audioCodecCtx->channel_layout; //输出的声道布局(立体声) uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO; swr_alloc_set_opts(audio_swr_ctx, out_ch_layout,out_sample_fmt,out_sample_rate, in_ch_layout,in_sample_fmt,in_sample_rate, 0, NULL); swr_init(audio_swr_ctx); //输出的声道个数 out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout); jclass player_class = (*env)->GetObjectClass(env,jthiz); if(!player_class){ LOGE("player_class not found..."); return -1; } //AudioTrack对象 jmethodID audio_track_method = (*env)->GetMethodID(env,player_class,"createAudioTrack","(II)Landroid/media/AudioTrack;"); if(!audio_track_method){ LOGE("audio_track_method not found..."); return -1; } audio_track = (*env)->CallObjectMethod(env,jthiz,audio_track_method,out_sample_rate,out_channel_nb); //调用play方法 jclass audio_track_class = (*env)->GetObjectClass(env,audio_track); jmethodID audio_track_play_mid = (*env)->GetMethodID(env,audio_track_class,"play","()V"); (*env)->CallVoidMethod(env,audio_track,audio_track_play_mid); //获取write()方法 audio_track_write_mid = (*env)->GetMethodID(env,audio_track_class,"write","([BII)I"); //16bit 44100 PCM 数据 out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE); return 0; }
static void * vencoder_threadproc(void *arg) { // arg is pointer to source pipename int iid, outputW, outputH; pooldata_t *data = NULL; vsource_frame_t *frame = NULL; char *pipename = (char*) arg; pipeline *pipe = pipeline::lookup(pipename); AVCodecContext *encoder = NULL; // AVFrame *pic_in = NULL; unsigned char *pic_in_buf = NULL; int pic_in_size; unsigned char *nalbuf = NULL, *nalbuf_a = NULL; int nalbuf_size = 0, nalign = 0; long long basePts = -1LL, newpts = 0LL, pts = -1LL, ptsSync = 0LL; pthread_mutex_t condMutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; // int video_written = 0; // if(pipe == NULL) { ga_error("video encoder: invalid pipeline specified (%s).\n", pipename); goto video_quit; } // rtspconf = rtspconf_global(); // init variables iid = ((vsource_t*) pipe->get_privdata())->channel; encoder = vencoder[iid]; // outputW = video_source_out_width(iid); outputH = video_source_out_height(iid); // nalbuf_size = 100000+12 * outputW * outputH; if(ga_malloc(nalbuf_size, (void**) &nalbuf, &nalign) < 0) { ga_error("video encoder: buffer allocation failed, terminated.\n"); goto video_quit; } nalbuf_a = nalbuf + nalign; // if((pic_in = av_frame_alloc()) == NULL) { ga_error("video encoder: picture allocation failed, terminated.\n"); goto video_quit; } pic_in_size = avpicture_get_size(PIX_FMT_YUV420P, outputW, outputH); if((pic_in_buf = (unsigned char*) av_malloc(pic_in_size)) == NULL) { ga_error("video encoder: picture buffer allocation failed, terminated.\n"); goto video_quit; } avpicture_fill((AVPicture*) pic_in, pic_in_buf, PIX_FMT_YUV420P, outputW, outputH); //ga_error("video encoder: linesize = %d|%d|%d\n", pic_in->linesize[0], pic_in->linesize[1], pic_in->linesize[2]); // start encoding ga_error("video encoding started: tid=%ld %dx%d@%dfps, nalbuf_size=%d, pic_in_size=%d.\n", ga_gettid(), outputW, outputH, rtspconf->video_fps, nalbuf_size, pic_in_size); // pipe->client_register(ga_gettid(), &cond); // while(vencoder_started != 0 && encoder_running() > 0) { AVPacket pkt; int got_packet = 0; // wait for notification data = pipe->load_data(); if(data == NULL) { int err; struct timeval tv; struct timespec to; gettimeofday(&tv, NULL); to.tv_sec = tv.tv_sec+1; to.tv_nsec = tv.tv_usec * 1000; // if((err = pipe->timedwait(&cond, &condMutex, &to)) != 0) { ga_error("viedo encoder: image source timed out.\n"); continue; } data = pipe->load_data(); if(data == NULL) { ga_error("viedo encoder: unexpected NULL frame received (from '%s', data=%d, buf=%d).\n", pipe->name(), pipe->data_count(), pipe->buf_count()); continue; } } frame = (vsource_frame_t*) data->ptr; // handle pts if(basePts == -1LL) { basePts = frame->imgpts; ptsSync = encoder_pts_sync(rtspconf->video_fps); newpts = ptsSync; } else { newpts = ptsSync + frame->imgpts - basePts; } // XXX: assume always YUV420P if(pic_in->linesize[0] == frame->linesize[0] && pic_in->linesize[1] == frame->linesize[1] && pic_in->linesize[2] == frame->linesize[2]) { bcopy(frame->imgbuf, pic_in_buf, pic_in_size); } else { ga_error("video encoder: YUV mode failed - mismatched linesize(s) (src:%d,%d,%d; dst:%d,%d,%d)\n", frame->linesize[0], frame->linesize[1], frame->linesize[2], pic_in->linesize[0], pic_in->linesize[1], pic_in->linesize[2]); pipe->release_data(data); goto video_quit; } pipe->release_data(data); // pts must be monotonically increasing if(newpts > pts) { pts = newpts; } else { pts++; } // encode pic_in->pts = pts; av_init_packet(&pkt); pkt.data = nalbuf_a; pkt.size = nalbuf_size; if(avcodec_encode_video2(encoder, &pkt, pic_in, &got_packet) < 0) { ga_error("video encoder: encode failed, terminated.\n"); goto video_quit; } if(got_packet) { if(pkt.pts == (int64_t) AV_NOPTS_VALUE) { pkt.pts = pts; } pkt.stream_index = 0; // send the packet if(encoder_send_packet_all("video-encoder", iid/*rtspconf->video_id*/, &pkt, pkt.pts, NULL) < 0) { goto video_quit; } // free unused side-data if(pkt.side_data_elems > 0) { int i; for (i = 0; i < pkt.side_data_elems; i++) av_free(pkt.side_data[i].data); av_freep(&pkt.side_data); pkt.side_data_elems = 0; } // if(video_written == 0) { video_written = 1; ga_error("first video frame written (pts=%lld)\n", pts); } } } // video_quit: if(pipe) { pipe->client_unregister(ga_gettid()); pipe = NULL; } // if(pic_in_buf) av_free(pic_in_buf); if(pic_in) av_free(pic_in); if(nalbuf) free(nalbuf); // ga_error("video encoder: thread terminated (tid=%ld).\n", ga_gettid()); // return NULL; }
static int flic_read_header(AVFormatContext *s, AVFormatParameters *ap) { FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data; AVIOContext *pb = s->pb; unsigned char header[FLIC_HEADER_SIZE]; AVStream *st, *ast; int speed; int magic_number; unsigned char preamble[FLIC_PREAMBLE_SIZE]; flic->frame_number = 0; /* load the whole header and pull out the width and height */ if (avio_read(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE) return AVERROR(EIO); magic_number = AV_RL16(&header[4]); speed = AV_RL32(&header[0x10]); if (speed == 0) speed = FLIC_DEFAULT_SPEED; /* initialize the decoder streams */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); flic->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_FLIC; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = AV_RL16(&header[0x08]); st->codec->height = AV_RL16(&header[0x0A]); if (!st->codec->width || !st->codec->height) { /* Ugly hack needed for the following sample: */ /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */ av_log(s, AV_LOG_WARNING, "File with no specified width/height. Trying 640x480.\n"); st->codec->width = 640; st->codec->height = 480; } /* send over the whole 128-byte FLIC header */ st->codec->extradata_size = FLIC_HEADER_SIZE; st->codec->extradata = (uint8_t *)av_malloc(FLIC_HEADER_SIZE); memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE); /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */ if (avio_read(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) { av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n"); return AVERROR(EIO); } avio_seek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR); /* Time to figure out the framerate: * If the first preamble's magic number is 0xAAAA then this file is from * X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk * magic number at offset 0x10 assume this file is from Magic Carpet instead. * If neither of the above is true then this is a normal FLIC file. */ if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) { /* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */ ast = avformat_new_stream(s, NULL); if (!ast) return AVERROR(ENOMEM); flic->audio_stream_index = ast->index; /* all audio frames are the same size, so use the size of the first chunk for block_align */ ast->codec->block_align = AV_RL32(&preamble[0]); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_id = CODEC_ID_PCM_U8; ast->codec->codec_tag = 0; ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE; ast->codec->channels = 1; ast->codec->sample_fmt = AV_SAMPLE_FMT_U8; ast->codec->bit_rate = st->codec->sample_rate * 8; ast->codec->bits_per_coded_sample = 8; ast->codec->channel_layout = AV_CH_LAYOUT_MONO; ast->codec->extradata_size = 0; /* Since the header information is incorrect we have to figure out the * framerate using block_align and the fact that the audio is 22050 Hz. * We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */ avpriv_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE); avpriv_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE); } else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) { avpriv_set_pts_info(st, 64, FLIC_MC_SPEED, 70); /* rewind the stream since the first chunk is at offset 12 */ avio_seek(pb, 12, SEEK_SET); /* send over abbreviated FLIC header chunk */ av_free(st->codec->extradata); st->codec->extradata_size = 12; st->codec->extradata = (uint8_t *)av_malloc(12); memcpy(st->codec->extradata, header, 12); } else if (magic_number == FLIC_FILE_MAGIC_1) { avpriv_set_pts_info(st, 64, speed, 70); } else if ((magic_number == FLIC_FILE_MAGIC_2) || (magic_number == FLIC_FILE_MAGIC_3)) { avpriv_set_pts_info(st, 64, speed, 1000); } else { av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n"); return AVERROR_INVALIDDATA; } return 0; }
int ReadFunc2(void *opaque, uint8_t *buf, int size) { DPRINTF(E_DBG, L_XCODE, "requested a read of %d bytes\n", size); struct icy_ctx *s = (struct icy_ctx *) opaque; URLContext *context = s->context; if ( !s->icy_metaint) return url_read_complete( context, buf, size ); int len = url_read_complete( context, buf, size ); int ldata = s->icy_metaint - s->data_pos; DPRINTF(E_DBG, L_XCODE, "current data pos %d\n", s->data_pos); /* pull out the Icy Metadata */ if ( s->data_pos + len > s->icy_metaint ) { DPRINTF(E_DBG, L_XCODE, "current packet has metadata. ldata %d len %d data_pos %d\n", ldata, len, s->data_pos); while ( ldata < len ) { int md_size = *(buf + ldata) * 16; /* determine how much data to copy initially */ DPRINTF(E_DBG, L_XCODE, "found a metatag of %d bytes\n", md_size); int md_len = (ldata + md_size + 1 > len) ? len - ldata - 1: md_size; if (md_size > 0) { /* parse out the metadata */ uint8_t *md = (uint8_t*)av_malloc(sizeof(uint8_t) * (md_size + 1)); if (md != NULL) { int md_total = md_len; /* copy in whatever metadata we already have */ memcpy(md, buf + ldata + 1, md_len); /* get the rest of the metadata */ if (md_total < md_size) { do { md_total += url_read(context, md + md_total, md_size - md_total); } while (md_total < md_size); } } md[md_size] = 0; http_parse_metadata(s, md, md_size); av_free(md); } s->data_pos = 0; if (ldata + md_len + 1 < len) { /* copy any leftover data */ int ridx = ldata + md_len + 1; int rlen = len - ridx; memcpy(buf + ldata, buf + ridx, rlen); s->data_pos = rlen; } ldata+= s->icy_metaint; /* zero length returns cause problems (metadata only bufs) */ if ((len -= (md_len + 1)) == 0) { len = url_read(context, buf, size); } } } else if (s->icy_metaint) { s->data_pos += len; } DPRINTF(E_DBG, L_XCODE, "returning %d\n",len); return len; }
static int fourxm_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int header_size; FourxmDemuxContext *fourxm = s->priv_data; unsigned char *header; int i, ret; AVStream *st; fourxm->track_count = 0; fourxm->tracks = NULL; fourxm->fps = 1.0; /* skip the first 3 32-bit numbers */ avio_skip(pb, 12); /* check for LIST-HEAD */ GET_LIST_HEADER(); header_size = size - 4; if (fourcc_tag != HEAD_TAG || header_size < 0) return AVERROR_INVALIDDATA; /* allocate space for the header and load the whole thing */ header = av_malloc(header_size); if (!header) return AVERROR(ENOMEM); if (avio_read(pb, header, header_size) != header_size){ av_free(header); return AVERROR(EIO); } /* take the lazy approach and search for any and all vtrk and strk chunks */ for (i = 0; i < header_size - 8; i++) { fourcc_tag = AV_RL32(&header[i]); size = AV_RL32(&header[i + 4]); if (fourcc_tag == std__TAG) { fourxm->fps = av_int2flt(AV_RL32(&header[i + 12])); } else if (fourcc_tag == vtrk_TAG) { /* check that there is enough data */ if (size != vtrk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } fourxm->width = AV_RL32(&header[i + 36]); fourxm->height = AV_RL32(&header[i + 40]); /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st){ ret= AVERROR(ENOMEM); goto fail; } avpriv_set_pts_info(st, 60, 1, fourxm->fps); fourxm->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_4XM; st->codec->extradata_size = 4; st->codec->extradata = av_malloc(4); AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16])); st->codec->width = fourxm->width; st->codec->height = fourxm->height; i += 8 + size; } else if (fourcc_tag == strk_TAG) { int current_track; /* check that there is enough data */ if (size != strk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } current_track = AV_RL32(&header[i + 8]); if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){ av_log(s, AV_LOG_ERROR, "current_track too large\n"); ret= -1; goto fail; } if (current_track + 1 > fourxm->track_count) { fourxm->tracks = av_realloc_f(fourxm->tracks, sizeof(AudioTrack), current_track + 1); if (!fourxm->tracks) { ret = AVERROR(ENOMEM); goto fail; } memset(&fourxm->tracks[fourxm->track_count], 0, sizeof(AudioTrack) * (current_track + 1 - fourxm->track_count)); fourxm->track_count = current_track + 1; } fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]); fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]); fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]); fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]); fourxm->tracks[current_track].audio_pts = 0; if( fourxm->tracks[current_track].channels <= 0 || fourxm->tracks[current_track].sample_rate <= 0 || fourxm->tracks[current_track].bits < 0){ av_log(s, AV_LOG_ERROR, "audio header invalid\n"); ret= -1; goto fail; } i += 8 + size; /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st){ ret= AVERROR(ENOMEM); goto fail; } st->id = current_track; avpriv_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate); fourxm->tracks[current_track].stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 0; st->codec->channels = fourxm->tracks[current_track].channels; st->codec->sample_rate = fourxm->tracks[current_track].sample_rate; st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; if (fourxm->tracks[current_track].adpcm){ st->codec->codec_id = CODEC_ID_ADPCM_4XM; }else if (st->codec->bits_per_coded_sample == 8){ st->codec->codec_id = CODEC_ID_PCM_U8; }else st->codec->codec_id = CODEC_ID_PCM_S16LE; } } /* skip over the LIST-MOVI chunk (which is where the stream should be */ GET_LIST_HEADER(); if (fourcc_tag != MOVI_TAG){ ret= AVERROR_INVALIDDATA; goto fail; } av_free(header); /* initialize context members */ fourxm->video_pts = -1; /* first frame will push to 0 */ return 0; fail: av_freep(&fourxm->tracks); av_free(header); return ret; }
static int init(sh_audio_t *sh_audio) { int tries = 0; int x; AVCodecContext *lavc_context; AVCodec *lavc_codec; AVDictionary *opts = NULL; char tmpstr[50]; mp_msg(MSGT_DECAUDIO,MSGL_V,"FFmpeg's libavcodec audio codec\n"); init_avcodec(); lavc_codec = avcodec_find_decoder_by_name(sh_audio->codec->dll); if(!lavc_codec) { mp_msg(MSGT_DECAUDIO,MSGL_ERR,MSGTR_MissingLAVCcodec,sh_audio->codec->dll); return 0; } lavc_context = avcodec_alloc_context3(lavc_codec); sh_audio->context=lavc_context; snprintf(tmpstr, sizeof(tmpstr), "%f", drc_level); av_dict_set(&opts, "drc_scale", tmpstr, 0); lavc_context->sample_rate = sh_audio->samplerate; lavc_context->bit_rate = sh_audio->i_bps * 8; if(sh_audio->wf) { lavc_context->channels = sh_audio->wf->nChannels; lavc_context->sample_rate = sh_audio->wf->nSamplesPerSec; lavc_context->bit_rate = sh_audio->wf->nAvgBytesPerSec * 8; lavc_context->block_align = sh_audio->wf->nBlockAlign; lavc_context->bits_per_coded_sample = sh_audio->wf->wBitsPerSample; } lavc_context->request_channels = audio_output_channels; lavc_context->codec_tag = sh_audio->format; //FOURCC lavc_context->codec_id = lavc_codec->id; // not sure if required, imho not --A'rpi /* alloc extra data */ if (sh_audio->wf && sh_audio->wf->cbSize > 0) { lavc_context->extradata = av_mallocz(sh_audio->wf->cbSize + FF_INPUT_BUFFER_PADDING_SIZE); lavc_context->extradata_size = sh_audio->wf->cbSize; memcpy(lavc_context->extradata, sh_audio->wf + 1, lavc_context->extradata_size); } // for QDM2 if (sh_audio->codecdata_len && sh_audio->codecdata && !lavc_context->extradata) { lavc_context->extradata = av_malloc(sh_audio->codecdata_len); lavc_context->extradata_size = sh_audio->codecdata_len; memcpy(lavc_context->extradata, (char *)sh_audio->codecdata, lavc_context->extradata_size); } /* open it */ if (avcodec_open2(lavc_context, lavc_codec, &opts) < 0) { mp_msg(MSGT_DECAUDIO,MSGL_ERR, MSGTR_CantOpenCodec); return 0; } av_dict_free(&opts); mp_msg(MSGT_DECAUDIO,MSGL_V,"INFO: libavcodec \"%s\" init OK!\n", lavc_codec->name); // printf("\nFOURCC: 0x%X\n",sh_audio->format); if(sh_audio->format==0x3343414D) { // MACE 3:1 sh_audio->ds->ss_div = 2*3; // 1 samples/packet sh_audio->ds->ss_mul = sh_audio->wf ? 2*sh_audio->wf->nChannels : 2; // 1 byte*ch/packet } else if(sh_audio->format==0x3643414D) { // MACE 6:1 sh_audio->ds->ss_div = 2*6; // 1 samples/packet sh_audio->ds->ss_mul = sh_audio->wf ? 2*sh_audio->wf->nChannels : 2; // 1 byte*ch/packet } // Decode at least 1 byte: (to get header filled) do { x=decode_audio(sh_audio,sh_audio->a_buffer,1,sh_audio->a_buffer_size); } while (x <= 0 && tries++ < 5); if(x>0) sh_audio->a_buffer_len=x; sh_audio->i_bps=lavc_context->bit_rate/8; if (sh_audio->wf && sh_audio->wf->nAvgBytesPerSec) sh_audio->i_bps=sh_audio->wf->nAvgBytesPerSec; switch (lavc_context->sample_fmt) { case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8P: case AV_SAMPLE_FMT_S16: case AV_SAMPLE_FMT_S16P: case AV_SAMPLE_FMT_S32: case AV_SAMPLE_FMT_S32P: case AV_SAMPLE_FMT_FLT: case AV_SAMPLE_FMT_FLTP: break; default: return 0; } return 1; }
static int iff_read_header(AVFormatContext *s, AVFormatParameters *ap) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; uint8_t *buf; uint32_t chunk_id, data_size; uint32_t screenmode = 0; unsigned transparency = 0; unsigned masking = 0; // no mask st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->channels = 1; avio_skip(pb, 8); // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content st->codec->codec_tag = avio_rl32(pb); while(!url_feof(pb)) { uint64_t orig_pos; int res; const char *metadata_tag = NULL; chunk_id = avio_rl32(pb); data_size = avio_rb32(pb); orig_pos = avio_tell(pb); switch(chunk_id) { case ID_VHDR: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 14) return AVERROR_INVALIDDATA; avio_skip(pb, 12); st->codec->sample_rate = avio_rb16(pb); if (data_size >= 16) { avio_skip(pb, 1); iff->svx8_compression = avio_r8(pb); } break; case ID_BODY: iff->body_pos = avio_tell(pb); iff->body_size = data_size; break; case ID_CHAN: if (data_size < 4) return AVERROR_INVALIDDATA; st->codec->channels = (avio_rb32(pb) < 6) ? 1 : 2; break; case ID_CAMG: if (data_size < 4) return AVERROR_INVALIDDATA; screenmode = avio_rb32(pb); break; case ID_CMAP: st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE; st->codec->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0) return AVERROR(EIO); break; case ID_BMHD: iff->bitmap_compression = -1; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size <= 8) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); avio_skip(pb, 4); // x, y offset st->codec->bits_per_coded_sample = avio_r8(pb); if (data_size >= 10) masking = avio_r8(pb); if (data_size >= 11) iff->bitmap_compression = avio_r8(pb); if (data_size >= 14) { avio_skip(pb, 1); // padding transparency = avio_rb16(pb); } if (data_size >= 16) { st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); } break; case ID_ANNO: case ID_TEXT: metadata_tag = "comment"; break; case ID_AUTH: metadata_tag = "artist"; break; case ID_COPYRIGHT: metadata_tag = "copyright"; break; case ID_NAME: metadata_tag = "title"; break; } if (metadata_tag) { if ((res = get_metadata(s, metadata_tag, data_size)) < 0) { av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!", metadata_tag); return res; } } avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1)); } avio_seek(pb, iff->body_pos, SEEK_SET); switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: av_set_pts_info(st, 32, 1, st->codec->sample_rate); switch (iff->svx8_compression) { case COMP_NONE: st->codec->codec_id = CODEC_ID_8SVX_RAW; break; case COMP_FIB: st->codec->codec_id = CODEC_ID_8SVX_FIB; break; case COMP_EXP: st->codec->codec_id = CODEC_ID_8SVX_EXP; break; default: av_log(s, AV_LOG_ERROR, "Unknown SVX8 compression method '%d'\n", iff->svx8_compression); return -1; } st->codec->bits_per_coded_sample = iff->svx8_compression == COMP_NONE ? 8 : 4; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; break; case AVMEDIA_TYPE_VIDEO: iff->bpp = st->codec->bits_per_coded_sample; if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) { iff->ham = iff->bpp > 6 ? 6 : 4; st->codec->bits_per_coded_sample = 24; } iff->flags = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8; iff->masking = masking; iff->transparency = transparency; if (!st->codec->extradata) { st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE; st->codec->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); } buf = st->codec->extradata; bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE); bytestream_put_byte(&buf, iff->bitmap_compression); bytestream_put_byte(&buf, iff->bpp); bytestream_put_byte(&buf, iff->ham); bytestream_put_byte(&buf, iff->flags); bytestream_put_be16(&buf, iff->transparency); bytestream_put_byte(&buf, iff->masking); switch (iff->bitmap_compression) { case BITMAP_RAW: st->codec->codec_id = CODEC_ID_IFF_ILBM; break; case BITMAP_BYTERUN1: st->codec->codec_id = CODEC_ID_IFF_BYTERUN1; break; default: av_log(s, AV_LOG_ERROR, "Unknown bitmap compression method '%d'\n", iff->bitmap_compression); return AVERROR_INVALIDDATA; } break; default: return -1; } return 0; }
void VideoPlay::Decode() { AVFrame *pFrameRGB = av_frame_alloc(); AVFrame *pFrame = av_frame_alloc(); AVPacket *packet = (AVPacket *) av_malloc(sizeof(AVPacket)); int viedeoBuffer_size = avpicture_get_size(PIX_FMT_RGB24, m_nWidth, m_height); uint8_t * viedeoBuffer = (uint8_t *) av_malloc(viedeoBuffer_size); avpicture_fill((AVPicture *) pFrameRGB, viedeoBuffer, PIX_FMT_RGB24, m_nWidth, m_height); SwsContext* img_convert_ctx = sws_getContext(m_nWidth, m_height, m_pVideoCodecCtx->pix_fmt, m_nWidth, m_height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL); int ret = 0; LOGI("pCodecCtx->width=%d,pCodecCtx->height=%d", m_nWidth, m_height); LOGI("m_audioindex=%d,m_videoindex=%d", m_audioindex, m_videoindex); LOGI("m_audioCodeID=%d,m_videoCodeID=%d", m_audioCodeID, m_videoCodeID); //Out Audio Param uint64_t out_channel_layout = AV_CH_FRONT_CENTER; //nb_samples: AAC-1024 MP3-1152 int out_nb_samples = m_pAudioCodecCtx->frame_size; AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; int out_sample_rate = m_pAudioCodecCtx->sample_rate; int out_channels = av_get_channel_layout_nb_channels(out_channel_layout); //Out Buffer Size int Audiobuffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1); uint8_t*Audiobuffer = (uint8_t *) av_malloc(MAX_AUDIO_FRAME_SIZE * 2); //FIX:Some Codec's Context Information is missing int64_t in_channel_layout; struct SwrContext *au_convert_ctx; in_channel_layout = av_get_default_channel_layout( m_pAudioCodecCtx->channels); //Swr au_convert_ctx = swr_alloc(); au_convert_ctx = swr_alloc_set_opts(au_convert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate, in_channel_layout, m_pAudioCodecCtx->sample_fmt, m_pAudioCodecCtx->sample_rate, 0, NULL); swr_init(au_convert_ctx); AVFrame *pAudioFrame; pAudioFrame = av_frame_alloc(); m_audioLen = Audiobuffer_size; LOGI("m_pFormatCtx->duration=%lld", m_pFormatCtx->duration); LOGI("den=%d,num=%d", m_pVideoCodecCtx->time_base.den, m_pVideoCodecCtx->time_base.num); m_ptm = 40000; //av_q2d(m_pVideoCodecCtx->time_base)*1000000; int flag_start = 0; LOGI(" bit_rate = %d ", m_pAudioCodecCtx->bit_rate); LOGI(" sample_rate = %d ", m_pAudioCodecCtx->sample_rate); LOGI(" channels = %d ", m_pAudioCodecCtx->channels); LOGI(" code_name = %s ", m_pAudioCodecCtx->codec->name); LOGI(" block_align = %d", m_pAudioCodecCtx->block_align); m_audioPlay.init(); m_audioPlay.createBufferQueueAudioPlayer(out_sample_rate, out_channels, SL_PCMSAMPLEFORMAT_FIXED_16, bqPlayerCallback); while (av_read_frame(m_pFormatCtx, packet) >= 0) { if (m_eState == State_Stop) { LOGI("STOP"); break; } int got_picture = 0; if (packet->stream_index == m_videoindex) { ret = avcodec_decode_video2(m_pVideoCodecCtx, pFrame, &got_picture, packet); if (ret < 0) { LOGE("Decode Error.\n"); return; } if (got_picture) { sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data, pFrame->linesize, 0, m_height, pFrameRGB->data, pFrameRGB->linesize); sem_wait(&semVideoEmpty); pthread_mutex_lock(&mutexVideo); unsigned char* tmp = new unsigned char[m_nWidth * m_height * 3]; memcpy(tmp, pFrameRGB->data[0], m_nWidth * m_height * 3); m_videoBuff.push((int) tmp); pthread_mutex_unlock(&mutexVideo); sem_post(&semVideoFull); } } int AudioFinished = 0; if (packet->stream_index == m_audioindex) { ret = avcodec_decode_audio4(m_pAudioCodecCtx, pAudioFrame, &AudioFinished, packet); if (ret > 0 && AudioFinished) { swr_convert(au_convert_ctx, &Audiobuffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **) pAudioFrame->data, pAudioFrame->nb_samples); if (flag_start == 0) { flag_start = 1; m_audioPlay.PlayBuff(Audiobuffer, Audiobuffer_size); } uint8_t *tmp = (uint8_t *) av_malloc( MAX_AUDIO_FRAME_SIZE * 2); memcpy(tmp, Audiobuffer, Audiobuffer_size); m_audioBuff.push((int) tmp); tmp = NULL; } } } av_free(viedeoBuffer); av_free(Audiobuffer); sws_freeContext(img_convert_ctx); swr_free(&au_convert_ctx); av_frame_free(&pAudioFrame); av_frame_free(&pFrameRGB); av_frame_free(&pFrame); av_free_packet(packet); m_bDecodeFinish = true; LOGI("Decode File Finish!"); }
CaptureData(const char* filename, surface surf) { av_register_all(); AVOutputFormat * fmt = av_guess_format("mp4", NULL, NULL); if (fmt == nullptr) { fprintf(stderr, "Failed to select mp4 format.\n"); exit(1); } oc = avformat_alloc_context(); if (oc == nullptr) { fprintf(stderr, "Failed to allocate format context.\n"); exit(1); } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename); // add video stream video_st = avformat_new_stream(oc, 0); if (video_st == nullptr) { fprintf(stderr, "Failed to create video stream.\n"); exit(1); } c = video_st->codec; c->codec_id = fmt->video_codec; c->codec_type = AVMEDIA_TYPE_VIDEO; c->bit_rate = 4000000; c->width = surf.width; c->height = surf.height; c->gop_size = 25; c->pix_fmt = PIX_FMT_YUV420P; c->flags |= CODEC_FLAG_GLOBAL_HEADER; c->time_base.den = video_st->time_base.den = 10; c->time_base.num = video_st->time_base.num = 1; av_dump_format(oc, 0, oc->filename, 1); /* now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers */ /* find the video encoder */ AVCodec *codec = avcodec_find_encoder(c->codec_id); if (codec == nullptr) { fprintf(stderr, "Failed to find video codec.\n"); exit(1); } /* open the codec */ int ret = avcodec_open2(c, codec, nullptr); check_return(ret, "Could not open audio codec"); /* allocate output buffer */ video_outbuf_size = 1000000; video_outbuf = (uint8_t*)av_malloc(video_outbuf_size); /* allocate the encoded raw picture */ picture = av_frame_alloc(); picture->format = c->pix_fmt; picture->width = c->width; picture->height = c->height; int size = c->width * c->height; picture->data[0] = (uint8_t*)av_malloc((size * 3) / 2); /* size for YUV 420 */ picture->data[1] = picture->data[0] + size; picture->data[2] = picture->data[1] + size / 4; picture->linesize[0] = c->width; picture->linesize[1] = c->width / 2; picture->linesize[2] = c->width / 2; img_convert_ctx = sws_getContext( c->width, c->height, PIX_FMT_BGRA, // <- from c->width, c->height, c->pix_fmt, // <- to SWS_BICUBIC, NULL, NULL, NULL); if (img_convert_ctx == NULL) { fprintf(stderr, "capture.cpp:Cannot initialize the conversion context\n"); exit(1); } buffer = (uint8_t*)surf.data; /* open the output file, if needed */ #ifndef AVIO_FLAG_WRITE #define AVIO_FLAG_WRITE 2 #endif ret = avio_open(&oc->pb, oc->filename, AVIO_FLAG_WRITE); check_return(ret, "Could not open file"); /* write the stream header, if any */ ret = avformat_write_header(oc, nullptr); check_return(ret, "Failed to write header"); frame_count=0; printf("capture.cpp: Movie capture started: %s\n", filename); pkt.data= video_outbuf; pkt.size= video_outbuf_size; }
static int smjpeg_read_header(AVFormatContext *s) { SMJPEGContext *sc = s->priv_data; AVStream *ast = NULL, *vst = NULL; AVIOContext *pb = s->pb; uint32_t version, htype, hlength, duration; char *comment; avio_skip(pb, 8); // magic version = avio_rb32(pb); if (version) avpriv_request_sample(s, "Unknown version %"PRIu32, version); duration = avio_rb32(pb); // in msec while (!avio_feof(pb)) { htype = avio_rl32(pb); switch (htype) { case SMJPEG_TXT: hlength = avio_rb32(pb); if (!hlength || hlength > 512) return AVERROR_INVALIDDATA; comment = av_malloc(hlength + 1); if (!comment) return AVERROR(ENOMEM); if (avio_read(pb, comment, hlength) != hlength) { av_freep(&comment); av_log(s, AV_LOG_ERROR, "error when reading comment\n"); return AVERROR_INVALIDDATA; } comment[hlength] = 0; av_dict_set(&s->metadata, "comment", comment, AV_DICT_DONT_STRDUP_VAL); break; case SMJPEG_SND: if (ast) { avpriv_request_sample(s, "Multiple audio streams"); return AVERROR_PATCHWELCOME; } hlength = avio_rb32(pb); if (hlength < 8) return AVERROR_INVALIDDATA; ast = avformat_new_stream(s, 0); if (!ast) return AVERROR(ENOMEM); ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; ast->codecpar->sample_rate = avio_rb16(pb); ast->codecpar->bits_per_coded_sample = avio_r8(pb); ast->codecpar->channels = avio_r8(pb); ast->codecpar->codec_tag = avio_rl32(pb); ast->codecpar->codec_id = ff_codec_get_id(ff_codec_smjpeg_audio_tags, ast->codecpar->codec_tag); ast->duration = duration; sc->audio_stream_index = ast->index; avpriv_set_pts_info(ast, 32, 1, 1000); avio_skip(pb, hlength - 8); break; case SMJPEG_VID: if (vst) { avpriv_request_sample(s, "Multiple video streams"); return AVERROR_INVALIDDATA; } hlength = avio_rb32(pb); if (hlength < 12) return AVERROR_INVALIDDATA; vst = avformat_new_stream(s, 0); if (!vst) return AVERROR(ENOMEM); vst->nb_frames = avio_rb32(pb); vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; vst->codecpar->width = avio_rb16(pb); vst->codecpar->height = avio_rb16(pb); vst->codecpar->codec_tag = avio_rl32(pb); vst->codecpar->codec_id = ff_codec_get_id(ff_codec_smjpeg_video_tags, vst->codecpar->codec_tag); vst->duration = duration; sc->video_stream_index = vst->index; avpriv_set_pts_info(vst, 32, 1, 1000); avio_skip(pb, hlength - 12); break; case SMJPEG_HEND: return 0; default: av_log(s, AV_LOG_ERROR, "unknown header %"PRIx32"\n", htype); return AVERROR_INVALIDDATA; } } return AVERROR_EOF; }
static int film_read_header(AVFormatContext *s) { FilmDemuxContext *film = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; unsigned char scratch[256]; int i; unsigned int data_offset; unsigned int audio_frame_counter; film->sample_table = NULL; film->stereo_buffer = NULL; film->stereo_buffer_size = 0; /* load the main FILM header */ if (avio_read(pb, scratch, 16) != 16) return AVERROR(EIO); data_offset = AV_RB32(&scratch[4]); film->version = AV_RB32(&scratch[8]); /* load the FDSC chunk */ if (film->version == 0) { /* special case for Lemmings .film files; 20-byte header */ if (avio_read(pb, scratch, 20) != 20) return AVERROR(EIO); /* make some assumptions about the audio parameters */ film->audio_type = AV_CODEC_ID_PCM_S8; film->audio_samplerate = 22050; film->audio_channels = 1; film->audio_bits = 8; } else { /* normal Saturn .cpk files; 32-byte header */ if (avio_read(pb, scratch, 32) != 32) return AVERROR(EIO); film->audio_samplerate = AV_RB16(&scratch[24]); film->audio_channels = scratch[21]; film->audio_bits = scratch[22]; if (scratch[23] == 2 && film->audio_channels > 0) film->audio_type = AV_CODEC_ID_ADPCM_ADX; else if (film->audio_channels > 0) { if (film->audio_bits == 8) film->audio_type = AV_CODEC_ID_PCM_S8; else if (film->audio_bits == 16) film->audio_type = AV_CODEC_ID_PCM_S16BE; else film->audio_type = AV_CODEC_ID_NONE; } else film->audio_type = AV_CODEC_ID_NONE; } if (AV_RB32(&scratch[0]) != FDSC_TAG) return AVERROR_INVALIDDATA; if (AV_RB32(&scratch[8]) == CVID_TAG) { film->video_type = AV_CODEC_ID_CINEPAK; } else if (AV_RB32(&scratch[8]) == RAW_TAG) { film->video_type = AV_CODEC_ID_RAWVIDEO; } else { film->video_type = AV_CODEC_ID_NONE; } /* initialize the decoder streams */ if (film->video_type) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); film->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = film->video_type; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = AV_RB32(&scratch[16]); st->codec->height = AV_RB32(&scratch[12]); if (film->video_type == AV_CODEC_ID_RAWVIDEO) { if (scratch[20] == 24) { st->codec->pix_fmt = AV_PIX_FMT_RGB24; } else { av_log(s, AV_LOG_ERROR, "raw video is using unhandled %dbpp\n", scratch[20]); return -1; } } } if (film->audio_type) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); film->audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = film->audio_type; st->codec->codec_tag = 1; st->codec->channels = film->audio_channels; st->codec->sample_rate = film->audio_samplerate; if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) { st->codec->bits_per_coded_sample = 18 * 8 / 32; st->codec->block_align = st->codec->channels * 18; st->need_parsing = AVSTREAM_PARSE_FULL; } else { st->codec->bits_per_coded_sample = film->audio_bits; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample / 8; } st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; } /* load the sample table */ if (avio_read(pb, scratch, 16) != 16) return AVERROR(EIO); if (AV_RB32(&scratch[0]) != STAB_TAG) return AVERROR_INVALIDDATA; film->base_clock = AV_RB32(&scratch[8]); film->sample_count = AV_RB32(&scratch[12]); if(film->sample_count >= UINT_MAX / sizeof(film_sample)) return -1; film->sample_table = av_malloc(film->sample_count * sizeof(film_sample)); if (!film->sample_table) return AVERROR(ENOMEM); for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) avpriv_set_pts_info(st, 33, 1, film->base_clock); else avpriv_set_pts_info(st, 64, 1, film->audio_samplerate); } audio_frame_counter = 0; for (i = 0; i < film->sample_count; i++) { /* load the next sample record and transfer it to an internal struct */ if (avio_read(pb, scratch, 16) != 16) { av_free(film->sample_table); return AVERROR(EIO); } film->sample_table[i].sample_offset = data_offset + AV_RB32(&scratch[0]); film->sample_table[i].sample_size = AV_RB32(&scratch[4]); if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) { film->sample_table[i].stream = film->audio_stream_index; film->sample_table[i].pts = audio_frame_counter; if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) audio_frame_counter += (film->sample_table[i].sample_size * 32 / (18 * film->audio_channels)); else if (film->audio_type != AV_CODEC_ID_NONE) audio_frame_counter += (film->sample_table[i].sample_size / (film->audio_channels * film->audio_bits / 8)); } else { film->sample_table[i].stream = film->video_stream_index; film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF; film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1; } } film->current_sample = 0; return 0; }
/* return non zero if error */ static int tcp_open(URLContext *h, const char *uri, int flags) { struct addrinfo hints, *ai, *cur_ai; int port, fd = -1; TCPContext *s = NULL; int listen_socket = 0; const char *p; char buf[256]; int ret; socklen_t optlen; char hostname[1024],proto[1024],path[1024]; char portstr[10]; av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port, path, sizeof(path), uri); if (strcmp(proto,"tcp") || port <= 0 || port >= 65536) return AVERROR(EINVAL); p = strchr(uri, '?'); if (p) { if (av_find_info_tag(buf, sizeof(buf), "listen", p)) listen_socket = 1; } memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; snprintf(portstr, sizeof(portstr), "%d", port); ret = getaddrinfo(hostname, portstr, &hints, &ai); if (ret) { av_log(h, AV_LOG_ERROR, "Failed to resolve hostname %s: %s\n", hostname, gai_strerror(ret)); return AVERROR(EIO); } cur_ai = ai; restart: fd = socket(cur_ai->ai_family, cur_ai->ai_socktype, cur_ai->ai_protocol); if (fd < 0) goto fail; if (listen_socket) { int fd1; ret = bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen); listen(fd, 1); fd1 = accept(fd, NULL, NULL); closesocket(fd); fd = fd1; } else { redo: ret = connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen); } ff_socket_nonblock(fd, 1); if (ret < 0) { struct pollfd p = {fd, POLLOUT, 0}; if (ff_neterrno() == AVERROR(EINTR)) { if (url_interrupt_cb()) { ret = AVERROR_EXIT; goto fail1; } goto redo; } if (ff_neterrno() != AVERROR(EINPROGRESS) && ff_neterrno() != AVERROR(EAGAIN)) goto fail; /* wait until we are connected or until abort */ for(;;) { if (url_interrupt_cb()) { ret = AVERROR_EXIT; goto fail1; } ret = poll(&p, 1, 100); if (ret > 0) break; } /* test error */ optlen = sizeof(ret); getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen); if (ret != 0) { av_log(h, AV_LOG_ERROR, "TCP connection to %s:%d failed: %s\n", hostname, port, strerror(ret)); goto fail; } } s = av_malloc(sizeof(TCPContext)); if (!s) { freeaddrinfo(ai); return AVERROR(ENOMEM); } h->priv_data = s; h->is_streamed = 1; s->fd = fd; freeaddrinfo(ai); return 0; fail: if (cur_ai->ai_next) { /* Retry with the next sockaddr */ cur_ai = cur_ai->ai_next; if (fd >= 0) closesocket(fd); goto restart; } ret = AVERROR(EIO); fail1: if (fd >= 0) closesocket(fd); freeaddrinfo(ai); return ret; }
static int config_input(AVFilterLink *inlink) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFilterContext *ctx = inlink->dst; BoxBlurContext *boxblur = ctx->priv; int w = inlink->w, h = inlink->h; int cw, ch; double var_values[VARS_NB], res; char *expr; int ret; av_freep(&boxblur->temp[0]); av_freep(&boxblur->temp[1]); if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h)))) return AVERROR(ENOMEM); if (!(boxblur->temp[1] = av_malloc(FFMAX(w, h)))) { av_freep(&boxblur->temp[0]); return AVERROR(ENOMEM); } boxblur->hsub = desc->log2_chroma_w; boxblur->vsub = desc->log2_chroma_h; var_values[VAR_W] = inlink->w; var_values[VAR_H] = inlink->h; var_values[VAR_CW] = cw = w>>boxblur->hsub; var_values[VAR_CH] = ch = h>>boxblur->vsub; var_values[VAR_HSUB] = 1<<boxblur->hsub; var_values[VAR_VSUB] = 1<<boxblur->vsub; #define EVAL_RADIUS_EXPR(comp) \ expr = boxblur->comp##_radius_expr; \ ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \ NULL, NULL, NULL, NULL, NULL, 0, ctx); \ boxblur->comp##_param.radius = res; \ if (ret < 0) { \ av_log(NULL, AV_LOG_ERROR, \ "Error when evaluating " #comp " radius expression '%s'\n", expr); \ return ret; \ } EVAL_RADIUS_EXPR(luma); EVAL_RADIUS_EXPR(chroma); EVAL_RADIUS_EXPR(alpha); av_log(ctx, AV_LOG_DEBUG, "luma_radius:%d luma_power:%d " "chroma_radius:%d chroma_power:%d " "alpha_radius:%d alpha_power:%d " "w:%d chroma_w:%d h:%d chroma_h:%d\n", boxblur->luma_param .radius, boxblur->luma_param .power, boxblur->chroma_param.radius, boxblur->chroma_param.power, boxblur->alpha_param .radius, boxblur->alpha_param .power, w, cw, h, ch); #define CHECK_RADIUS_VAL(w_, h_, comp) \ if (boxblur->comp##_param.radius < 0 || \ 2*boxblur->comp##_param.radius > FFMIN(w_, h_)) { \ av_log(ctx, AV_LOG_ERROR, \ "Invalid " #comp " radius value %d, must be >= 0 and <= %d\n", \ boxblur->comp##_param.radius, FFMIN(w_, h_)/2); \ return AVERROR(EINVAL); \ } CHECK_RADIUS_VAL(w, h, luma); CHECK_RADIUS_VAL(cw, ch, chroma); CHECK_RADIUS_VAL(w, h, alpha); boxblur->radius[Y] = boxblur->luma_param.radius; boxblur->radius[U] = boxblur->radius[V] = boxblur->chroma_param.radius; boxblur->radius[A] = boxblur->alpha_param.radius; boxblur->power[Y] = boxblur->luma_param.power; boxblur->power[U] = boxblur->power[V] = boxblur->chroma_param.power; boxblur->power[A] = boxblur->alpha_param.power; return 0; }
static int idcin_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; IdcinDemuxContext *idcin = s->priv_data; AVStream *st; unsigned int width, height; unsigned int sample_rate, bytes_per_sample, channels; /* get the 5 header parameters */ width = avio_rl32(pb); height = avio_rl32(pb); sample_rate = avio_rl32(pb); bytes_per_sample = avio_rl32(pb); channels = avio_rl32(pb); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 33, 1, IDCIN_FPS); idcin->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_IDCIN; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = width; st->codec->height = height; /* load up the Huffman tables into extradata */ st->codec->extradata_size = HUFFMAN_TABLE_SIZE; st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE); if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) != HUFFMAN_TABLE_SIZE) return AVERROR(EIO); /* if sample rate is 0, assume no audio */ if (sample_rate) { idcin->audio_present = 1; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 33, 1, IDCIN_FPS); idcin->audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 1; st->codec->channels = channels; st->codec->sample_rate = sample_rate; st->codec->bits_per_coded_sample = bytes_per_sample * 8; st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels; st->codec->block_align = bytes_per_sample * channels; if (bytes_per_sample == 1) st->codec->codec_id = AV_CODEC_ID_PCM_U8; else st->codec->codec_id = AV_CODEC_ID_PCM_S16LE; if (sample_rate % 14 != 0) { idcin->audio_chunk_size1 = (sample_rate / 14) * bytes_per_sample * channels; idcin->audio_chunk_size2 = (sample_rate / 14 + 1) * bytes_per_sample * channels; } else { idcin->audio_chunk_size1 = idcin->audio_chunk_size2 = (sample_rate / 14) * bytes_per_sample * channels; } idcin->current_audio_chunk = 0; } else idcin->audio_present = 1; idcin->next_chunk_is_video = 1; idcin->pts = 0; return 0; }
static int nsv_parse_NSVf_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; unsigned int file_size; unsigned int size; int64_t duration; int strings_size; int table_entries; int table_entries_used; av_dlog(s, "%s()\n", __FUNCTION__); nsv->state = NSV_UNSYNC; /* in case we fail */ size = avio_rl32(pb); if (size < 28) return -1; nsv->NSVf_end = size; //s->file_size = (uint32_t)avio_rl32(pb); file_size = (uint32_t)avio_rl32(pb); av_dlog(s, "NSV NSVf chunk_size %u\n", size); av_dlog(s, "NSV NSVf file_size %u\n", file_size); nsv->duration = duration = avio_rl32(pb); /* in ms */ av_dlog(s, "NSV NSVf duration %"PRId64" ms\n", duration); // XXX: store it in AVStreams strings_size = avio_rl32(pb); table_entries = avio_rl32(pb); table_entries_used = avio_rl32(pb); av_dlog(s, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n", strings_size, table_entries, table_entries_used); if (pb->eof_reached) return -1; av_dlog(s, "NSV got header; filepos %"PRId64"\n", avio_tell(pb)); if (strings_size > 0) { char *strings; /* last byte will be '\0' to play safe with str*() */ char *p, *endp; char *token, *value; char quote; p = strings = av_mallocz((size_t)strings_size + 1); if (!p) return AVERROR(ENOMEM); endp = strings + strings_size; avio_read(pb, strings, strings_size); while (p < endp) { while (*p == ' ') p++; /* strip out spaces */ if (p >= endp-2) break; token = p; p = strchr(p, '='); if (!p || p >= endp-2) break; *p++ = '\0'; quote = *p++; value = p; p = strchr(p, quote); if (!p || p >= endp) break; *p++ = '\0'; av_dlog(s, "NSV NSVf INFO: %s='%s'\n", token, value); av_dict_set(&s->metadata, token, value, 0); } av_free(strings); } if (pb->eof_reached) return -1; av_dlog(s, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb)); if (table_entries_used > 0) { int i; nsv->index_entries = table_entries_used; if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t)) return -1; nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t)); if (!nsv->nsvs_file_offset) return AVERROR(ENOMEM); for(i=0;i<table_entries_used;i++) nsv->nsvs_file_offset[i] = avio_rl32(pb) + size; if(table_entries > table_entries_used && avio_rl32(pb) == MKTAG('T','O','C','2')) { nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t)); if (!nsv->nsvs_timestamps) return AVERROR(ENOMEM); for(i=0;i<table_entries_used;i++) { nsv->nsvs_timestamps[i] = avio_rl32(pb); } } } av_dlog(s, "NSV got index; filepos %"PRId64"\n", avio_tell(pb)); #ifdef DEBUG_DUMP_INDEX #define V(v) ((v<0x20 || v > 127)?'.':v) /* dump index */ av_dlog(s, "NSV %d INDEX ENTRIES:\n", table_entries); av_dlog(s, "NSV [dataoffset][fileoffset]\n", table_entries); for (i = 0; i < table_entries; i++) { unsigned char b[8]; avio_seek(pb, size + nsv->nsvs_file_offset[i], SEEK_SET); avio_read(pb, b, 8); av_dlog(s, "NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x" "%c%c%c%c%c%c%c%c\n", nsv->nsvs_file_offset[i], size + nsv->nsvs_file_offset[i], b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) ); } //avio_seek(pb, size, SEEK_SET); /* go back to end of header */ #undef V #endif avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */ if (pb->eof_reached) return -1; nsv->state = NSV_HAS_READ_NSVF; return 0; }
static int film_read_packet(AVFormatContext *s, AVPacket *pkt) { FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data; ByteIOContext *pb = &s->pb; film_sample_t *sample; int ret = 0; int i; int left, right; if (film->current_sample >= film->sample_count) return AVERROR_IO; sample = &film->sample_table[film->current_sample]; /* position the stream (will probably be there anyway) */ url_fseek(pb, sample->sample_offset, SEEK_SET); /* do a special song and dance when loading FILM Cinepak chunks */ if ((sample->stream == film->video_stream_index) && (film->video_type == CODEC_ID_CINEPAK)) { if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes)) return AVERROR_NOMEM; if(pkt->size < 10) return -1; pkt->pos= url_ftell(pb); ret = get_buffer(pb, pkt->data, 10); /* skip the non-spec CVID bytes */ url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR); ret += get_buffer(pb, pkt->data + 10, sample->sample_size - 10 - film->cvid_extra_bytes); if (ret != sample->sample_size - film->cvid_extra_bytes) ret = AVERROR_IO; } else if ((sample->stream == film->audio_stream_index) && (film->audio_channels == 2)) { /* stereo PCM needs to be interleaved */ if (av_new_packet(pkt, sample->sample_size)) return AVERROR_NOMEM; /* make sure the interleave buffer is large enough */ if (sample->sample_size > film->stereo_buffer_size) { av_free(film->stereo_buffer); film->stereo_buffer_size = sample->sample_size; film->stereo_buffer = av_malloc(film->stereo_buffer_size); } pkt->pos= url_ftell(pb); ret = get_buffer(pb, film->stereo_buffer, sample->sample_size); if (ret != sample->sample_size) ret = AVERROR_IO; left = 0; right = sample->sample_size / 2; for (i = 0; i < sample->sample_size; ) { if (film->audio_bits == 8) { pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[right++]; } else { pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[right++]; pkt->data[i++] = film->stereo_buffer[right++]; } } } else { ret= av_get_packet(pb, pkt, sample->sample_size); if (ret != sample->sample_size) ret = AVERROR_IO; } pkt->stream_index = sample->stream; pkt->pts = sample->pts; film->current_sample++; return ret; }
static int vid_read_packet(AVFormatContext *s, AVPacket *pkt) { BVID_DemuxContext *vid = s->priv_data; AVIOContext *pb = s->pb; unsigned char block_type; int audio_length; int ret_value; if(vid->is_finished || pb->eof_reached) return AVERROR(EIO); block_type = avio_r8(pb); switch(block_type){ case PALETTE_BLOCK: if (vid->palette) { av_log(s, AV_LOG_WARNING, "discarding unused palette\n"); av_freep(&vid->palette); } vid->palette = av_malloc(BVID_PALETTE_SIZE); if (!vid->palette) return AVERROR(ENOMEM); if (avio_read(pb, vid->palette, BVID_PALETTE_SIZE) != BVID_PALETTE_SIZE) { av_freep(&vid->palette); return AVERROR(EIO); } return vid_read_packet(s, pkt); case FIRST_AUDIO_BLOCK: avio_rl16(pb); // soundblaster DAC used for sample rate, as on specification page (link above) vid->sample_rate = 1000000 / (256 - avio_r8(pb)); case AUDIO_BLOCK: if (vid->audio_index < 0) { AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vid->audio_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_PCM_U8; st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; st->codec->bits_per_coded_sample = 8; st->codec->sample_rate = vid->sample_rate; st->codec->bit_rate = 8 * st->codec->sample_rate; st->start_time = 0; avpriv_set_pts_info(st, 64, 1, vid->sample_rate); } audio_length = avio_rl16(pb); if ((ret_value = av_get_packet(pb, pkt, audio_length)) != audio_length) { if (ret_value < 0) return ret_value; av_log(s, AV_LOG_ERROR, "incomplete audio block\n"); return AVERROR(EIO); } pkt->stream_index = vid->audio_index; pkt->duration = audio_length; pkt->flags |= AV_PKT_FLAG_KEY; return 0; case VIDEO_P_FRAME: case VIDEO_YOFF_P_FRAME: case VIDEO_I_FRAME: return read_frame(vid, pb, pkt, block_type, s); case EOF_BLOCK: if(vid->nframes != 0) av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n"); vid->is_finished = 1; return AVERROR(EIO); default: av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n", block_type, block_type, block_type); return AVERROR_INVALIDDATA; } }
static int film_read_header(AVFormatContext *s, AVFormatParameters *ap) { FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data; ByteIOContext *pb = &s->pb; AVStream *st; unsigned char scratch[256]; int i; unsigned int data_offset; unsigned int audio_frame_counter; film->sample_table = NULL; film->stereo_buffer = NULL; film->stereo_buffer_size = 0; /* load the main FILM header */ if (get_buffer(pb, scratch, 16) != 16) return AVERROR_IO; data_offset = BE_32(&scratch[4]); film->version = BE_32(&scratch[8]); /* load the FDSC chunk */ if (film->version == 0) { /* special case for Lemmings .film files; 20-byte header */ if (get_buffer(pb, scratch, 20) != 20) return AVERROR_IO; /* make some assumptions about the audio parameters */ film->audio_type = CODEC_ID_PCM_S8; film->audio_samplerate = 22050; film->audio_channels = 1; film->audio_bits = 8; } else { /* normal Saturn .cpk files; 32-byte header */ if (get_buffer(pb, scratch, 32) != 32) return AVERROR_IO; film->audio_samplerate = BE_16(&scratch[24]);; film->audio_channels = scratch[21]; film->audio_bits = scratch[22]; if (film->audio_bits == 8) film->audio_type = CODEC_ID_PCM_S8; else if (film->audio_bits == 16) film->audio_type = CODEC_ID_PCM_S16BE; else film->audio_type = 0; } if (BE_32(&scratch[0]) != FDSC_TAG) return AVERROR_INVALIDDATA; film->cvid_extra_bytes = 0; if (BE_32(&scratch[8]) == CVID_TAG) { film->video_type = CODEC_ID_CINEPAK; if (film->version) film->cvid_extra_bytes = 2; else film->cvid_extra_bytes = 6; /* Lemmings 3DO case */ } else film->video_type = 0; /* initialize the decoder streams */ if (film->video_type) { st = av_new_stream(s, 0); if (!st) return AVERROR_NOMEM; film->video_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = film->video_type; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = BE_32(&scratch[16]); st->codec->height = BE_32(&scratch[12]); } if (film->audio_type) { st = av_new_stream(s, 0); if (!st) return AVERROR_NOMEM; film->audio_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = film->audio_type; st->codec->codec_tag = 1; st->codec->channels = film->audio_channels; st->codec->bits_per_sample = film->audio_bits; st->codec->sample_rate = film->audio_samplerate; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_sample / 8; } /* load the sample table */ if (get_buffer(pb, scratch, 16) != 16) return AVERROR_IO; if (BE_32(&scratch[0]) != STAB_TAG) return AVERROR_INVALIDDATA; film->base_clock = BE_32(&scratch[8]); film->sample_count = BE_32(&scratch[12]); if(film->sample_count >= UINT_MAX / sizeof(film_sample_t)) return -1; film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t)); for(i=0; i<s->nb_streams; i++) av_set_pts_info(s->streams[i], 33, 1, film->base_clock); audio_frame_counter = 0; for (i = 0; i < film->sample_count; i++) { /* load the next sample record and transfer it to an internal struct */ if (get_buffer(pb, scratch, 16) != 16) { av_free(film->sample_table); return AVERROR_IO; } film->sample_table[i].sample_offset = data_offset + BE_32(&scratch[0]); film->sample_table[i].sample_size = BE_32(&scratch[4]); if (BE_32(&scratch[8]) == 0xFFFFFFFF) { film->sample_table[i].stream = film->audio_stream_index; film->sample_table[i].pts = audio_frame_counter; film->sample_table[i].pts *= film->base_clock; film->sample_table[i].pts /= film->audio_samplerate; audio_frame_counter += (film->sample_table[i].sample_size / (film->audio_channels * film->audio_bits / 8)); } else { film->sample_table[i].stream = film->video_stream_index; film->sample_table[i].pts = BE_32(&scratch[8]) & 0x7FFFFFFF; film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1; } } film->current_sample = 0; return 0; }
//初始化视频解码器与播放器 int open_input(JNIEnv * env, const char* file_name, jobject surface){ LOGI("open file:%s\n", file_name); //注册所有组件 av_register_all(); //分配上下文 pFormatCtx = avformat_alloc_context(); //打开视频文件 if(avformat_open_input(&pFormatCtx, file_name, NULL, NULL)!=0) { LOGE("Couldn't open file:%s\n", file_name); return -1; } //检索多媒体流信息 if(avformat_find_stream_info(pFormatCtx, NULL)<0) { LOGE("Couldn't find stream information."); return -1; } //寻找视频流的第一帧 int i; for (i = 0; i < pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_stream_index < 0) { video_stream_index = i; } } if(video_stream_index == -1) { LOGE("couldn't find a video stream."); return -1; } //获取codec上下文指针 pCodecCtx = pFormatCtx->streams[video_stream_index]->codec; //寻找视频流的解码器 AVCodec * pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { LOGE("couldn't find Codec."); return -1; } if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { LOGE("Couldn't open codec."); return -1; } // 获取native window nativeWindow = ANativeWindow_fromSurface(env, surface); // 设置native window的buffer大小,可自动拉伸 ANativeWindow_setBuffersGeometry(nativeWindow, pCodecCtx->width, pCodecCtx->height, WINDOW_FORMAT_RGBA_8888); //申请内存 pFrame = av_frame_alloc(); pFrameRGBA = av_frame_alloc(); if(pFrameRGBA == NULL || pFrame == NULL) { LOGE("Couldn't allocate video frame."); return -1; } // buffer中数据用于渲染,且格式为RGBA int numBytes=av_image_get_buffer_size(AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height, 1); buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); av_image_fill_arrays(pFrameRGBA->data, pFrameRGBA->linesize, buffer, AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height, 1); // 由于解码出来的帧格式不是RGBA的,在渲染之前需要进行格式转换 sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL); return 0; }
static int rtp_write_header(AVFormatContext *s1) { RTPMuxContext *s = s1->priv_data; int n, ret = AVERROR(EINVAL); AVStream *st; if (s1->nb_streams != 1) { av_log(s1, AV_LOG_ERROR, "Only one stream supported in the RTP muxer\n"); return AVERROR(EINVAL); } st = s1->streams[0]; if (!is_supported(st->codecpar->codec_id)) { av_log(s1, AV_LOG_ERROR, "Unsupported codec %s\n", avcodec_get_name(st->codecpar->codec_id)); return -1; } if (s->payload_type < 0) { /* Re-validate non-dynamic payload types */ if (st->id < RTP_PT_PRIVATE) st->id = ff_rtp_get_payload_type(s1, st->codecpar, -1); s->payload_type = st->id; } else { /* private option takes priority */ st->id = s->payload_type; } s->base_timestamp = av_get_random_seed(); s->timestamp = s->base_timestamp; s->cur_timestamp = 0; if (!s->ssrc) s->ssrc = av_get_random_seed(); s->first_packet = 1; s->first_rtcp_ntp_time = ff_ntp_time(); if (s1->start_time_realtime != 0 && s1->start_time_realtime != AV_NOPTS_VALUE) /* Round the NTP time to whole milliseconds. */ s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 + NTP_OFFSET_US; // Pick a random sequence start number, but in the lower end of the // available range, so that any wraparound doesn't happen immediately. // (Immediate wraparound would be an issue for SRTP.) if (s->seq < 0) { if (s1->flags & AVFMT_FLAG_BITEXACT) { s->seq = 0; } else s->seq = av_get_random_seed() & 0x0fff; } else s->seq &= 0xffff; // Use the given parameter, wrapped to the right interval if (s1->packet_size) { if (s1->pb->max_packet_size) s1->packet_size = FFMIN(s1->packet_size, s1->pb->max_packet_size); } else s1->packet_size = s1->pb->max_packet_size; if (s1->packet_size <= 12) { av_log(s1, AV_LOG_ERROR, "Max packet size %u too low\n", s1->packet_size); return AVERROR(EIO); } s->buf = av_malloc(s1->packet_size); if (!s->buf) { return AVERROR(ENOMEM); } s->max_payload_size = s1->packet_size - 12; if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { avpriv_set_pts_info(st, 32, 1, st->codecpar->sample_rate); } else { avpriv_set_pts_info(st, 32, 1, 90000); } s->buf_ptr = s->buf; switch(st->codecpar->codec_id) { case AV_CODEC_ID_MP2: case AV_CODEC_ID_MP3: s->buf_ptr = s->buf + 4; avpriv_set_pts_info(st, 32, 1, 90000); break; case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: break; case AV_CODEC_ID_MPEG2TS: n = s->max_payload_size / TS_PACKET_SIZE; if (n < 1) n = 1; s->max_payload_size = n * TS_PACKET_SIZE; break; case AV_CODEC_ID_DIRAC: if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(s, AV_LOG_ERROR, "Packetizing VC-2 is experimental and does not use all values " "of the specification " "(even though most receivers may handle it just fine). " "Please set -strict experimental in order to enable it.\n"); ret = AVERROR_EXPERIMENTAL; goto fail; } break; case AV_CODEC_ID_H261: if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(s, AV_LOG_ERROR, "Packetizing H.261 is experimental and produces incorrect " "packetization for cases where GOBs don't fit into packets " "(even though most receivers may handle it just fine). " "Please set -f_strict experimental in order to enable it.\n"); ret = AVERROR_EXPERIMENTAL; goto fail; } break; case AV_CODEC_ID_H264: /* check for H.264 MP4 syntax */ if (st->codecpar->extradata_size > 4 && st->codecpar->extradata[0] == 1) { s->nal_length_size = (st->codecpar->extradata[4] & 0x03) + 1; } break; case AV_CODEC_ID_HEVC: /* Only check for the standardized hvcC version of extradata, keeping * things simple and similar to the avcC/H.264 case above, instead * of trying to handle the pre-standardization versions (as in * libavcodec/hevc.c). */ if (st->codecpar->extradata_size > 21 && st->codecpar->extradata[0] == 1) { s->nal_length_size = (st->codecpar->extradata[21] & 0x03) + 1; } break; case AV_CODEC_ID_VP9: if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(s, AV_LOG_ERROR, "Packetizing VP9 is experimental and its specification is " "still in draft state. " "Please set -strict experimental in order to enable it.\n"); ret = AVERROR_EXPERIMENTAL; goto fail; } break; case AV_CODEC_ID_VORBIS: case AV_CODEC_ID_THEORA: s->max_frames_per_packet = 15; break; case AV_CODEC_ID_ADPCM_G722: /* Due to a historical error, the clock rate for G722 in RTP is * 8000, even if the sample rate is 16000. See RFC 3551. */ avpriv_set_pts_info(st, 32, 1, 8000); break; case AV_CODEC_ID_OPUS: if (st->codecpar->channels > 2) { av_log(s1, AV_LOG_ERROR, "Multistream opus not supported in RTP\n"); goto fail; } /* The opus RTP RFC says that all opus streams should use 48000 Hz * as clock rate, since all opus sample rates can be expressed in * this clock rate, and sample rate changes on the fly are supported. */ avpriv_set_pts_info(st, 32, 1, 48000); break; case AV_CODEC_ID_ILBC: if (st->codecpar->block_align != 38 && st->codecpar->block_align != 50) { av_log(s1, AV_LOG_ERROR, "Incorrect iLBC block size specified\n"); goto fail; } s->max_frames_per_packet = s->max_payload_size / st->codecpar->block_align; break; case AV_CODEC_ID_AMR_NB: case AV_CODEC_ID_AMR_WB: s->max_frames_per_packet = 50; if (st->codecpar->codec_id == AV_CODEC_ID_AMR_NB) n = 31; else n = 61; /* max_header_toc_size + the largest AMR payload must fit */ if (1 + s->max_frames_per_packet + n > s->max_payload_size) { av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n"); goto fail; } if (st->codecpar->channels != 1) { av_log(s1, AV_LOG_ERROR, "Only mono is supported\n"); goto fail; } break; case AV_CODEC_ID_AAC: s->max_frames_per_packet = 50; break; default: break; } return 0; fail: av_freep(&s->buf); return ret; }
static int libdirac_decode_frame(AVCodecContext *avccontext, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data; AVPicture *picture = data; AVPicture pic; int pict_size; unsigned char *buffer[3]; *data_size = 0; if (buf_size > 0) { /* set data to decode into buffer */ dirac_buffer(p_dirac_params->p_decoder, buf, buf + buf_size); if ((buf[4] & 0x08) == 0x08 && (buf[4] & 0x03)) avccontext->has_b_frames = 1; } while (1) { /* parse data and process result */ DecoderState state = dirac_parse(p_dirac_params->p_decoder); switch (state) { case STATE_BUFFER: return buf_size; case STATE_SEQUENCE: { /* tell FFmpeg about sequence details */ dirac_sourceparams_t *src_params = &p_dirac_params->p_decoder->src_params; if (av_image_check_size(src_params->width, src_params->height, 0, avccontext) < 0) { av_log(avccontext, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", src_params->width, src_params->height); avccontext->height = avccontext->width = 0; return -1; } avccontext->height = src_params->height; avccontext->width = src_params->width; avccontext->pix_fmt = GetFfmpegChromaFormat(src_params->chroma); if (avccontext->pix_fmt == PIX_FMT_NONE) { av_log(avccontext, AV_LOG_ERROR, "Dirac chroma format %d not supported currently\n", src_params->chroma); return -1; } avccontext->time_base.den = src_params->frame_rate.numerator; avccontext->time_base.num = src_params->frame_rate.denominator; /* calculate output dimensions */ avpicture_fill(&pic, NULL, avccontext->pix_fmt, avccontext->width, avccontext->height); pict_size = avpicture_get_size(avccontext->pix_fmt, avccontext->width, avccontext->height); /* allocate output buffer */ if (!p_dirac_params->p_out_frame_buf) p_dirac_params->p_out_frame_buf = av_malloc(pict_size); buffer[0] = p_dirac_params->p_out_frame_buf; buffer[1] = p_dirac_params->p_out_frame_buf + pic.linesize[0] * avccontext->height; buffer[2] = buffer[1] + pic.linesize[1] * src_params->chroma_height; /* tell Dirac about output destination */ dirac_set_buf(p_dirac_params->p_decoder, buffer, NULL); break; } case STATE_SEQUENCE_END: break; case STATE_PICTURE_AVAIL: /* fill picture with current buffer data from Dirac */ avpicture_fill(picture, p_dirac_params->p_out_frame_buf, avccontext->pix_fmt, avccontext->width, avccontext->height); *data_size = sizeof(AVPicture); return buf_size; case STATE_INVALID: return -1; default: break; } } return buf_size; }
void VideoThread::run(){ /* alloco i frame YVU e RGB */ pFrame = avcodec_alloc_frame(); pFrameRGB = avcodec_alloc_frame(); /* da questo momento in poi permetto alla finestra di resfreshare */ _is->window->startdisplay(); //Calculate the size in bytes that a picture of the given width and height would occupy if stored in the given picture format. bytes = avpicture_get_size(CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height); uint8_t *video_buffer = (uint8_t*)av_malloc( bytes * sizeof(uint8_t) ); avpicture_fill((AVPicture *)pFrameRGB, video_buffer, CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height); /* ciclo di lettura dei frame prelevo dalla coda dei pkt decodifico il frame YUV trasformo il frame in RGB aggiungo il frameRGB alla nuova coda */ while(1) { if(_is->ut.getPauseValue() && !_is->ut.getStopValue()){ continue; //this->usleep(10000); }; // leggo i paccehtti dalla coda if(_is->videoq.Get(packet, 1) < 0){ // means we quit getting packets //qDebug() << "quitting getting packets - videothread"; break; } //controllo se ho letto pacchetto di FLUSH if(packet->data == _is->flush_pkt->data){ //qDebug() << "VideoThread - letto FLUSH PKT"; avcodec_flush_buffers(_is->video_st->codec); _is->pictq.Flush(); _is->frame_last_pts = AV_NOPTS_VALUE; _is->frame_last_delay = 0; _is->frame_timer = (double)av_gettime() / 1000000.0; continue; } pts = 0; //resetto il pts a 0, ovvero non trovato //Save global pts to be stored in pFrame in first call _is->global_video_pkt_pts = packet->pts; // Decode video frame avcodec_decode_video2(_is->video_st->codec, pFrame, &frameFinished, packet); //nota: opaque è una variabile interna a pFrame lasciata libera //per essere usata dall'utente come variabile di appoggio per dei dati /* caso in cui NON RIESCO a reperire DTS, ma sho allocato il buffer */ if (packet->dts == (int64_t)AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { //vado a reperire il PTS del primo pacchetto, messo in opaque dalla nostra funzione //di allocazione del buffer pts = *(uint64_t *) pFrame->opaque; } /* caso in cui RIESCO a reperire DTS */ else if (packet->dts != (int64_t)AV_NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } /** PTS = PTS * (time_base convertito in double) ottengo cosi il PTS in secondi */ pts *= av_q2d(_is->video_st->time_base); // Did we get a video frame? if(frameFinished) { synchronize_video(); //sincronizzazione del PTS /* conversione pFrame -> pFrameRGB */ sws_scale(_is->sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, _is->video_st->codec->height, pFrameRGB->data, pFrameRGB->linesize); while(_is->pictq.getSize() > VIDEO_PICTURE_QUEUE_SIZE && (_is->ut.getStopValue() == false)){ this->usleep(1000); } /* aggiunta del frame RGB alla nuova coda */ if(_is->pictq.Put(pFrameRGB, pts) < 0) { //qDebug() << "quitting putting frame - videothread"; break; } } av_free_packet(packet); } av_free(pFrame); av_free(pFrameRGB); return; }
/* The encoder doesn't know anything about interlacing, the halve height * needs to be passed and the double rowstride. Which field gets encoded * is decided by what buffers are passed to mjpeg_encode_frame */ jpeg_enc_t *jpeg_enc_init(int w, int h, int y_psize, int y_rsize, int u_psize, int u_rsize, int v_psize, int v_rsize, int cu, int q, int b) { jpeg_enc_t *j; int i = 0; mp_msg(MSGT_VO, MSGL_V, "JPEnc init: %dx%d %d %d %d %d %d %d\n", w, h, y_psize, y_rsize, u_psize, u_rsize, v_psize, v_rsize); j = av_malloc(sizeof(jpeg_enc_t)); if (j == NULL) return NULL; j->s = av_malloc(sizeof(MpegEncContext)); memset(j->s,0x00,sizeof(MpegEncContext)); if (j->s == NULL) { av_free(j); return NULL; } /* info on how to access the pixels */ j->y_ps = y_psize; j->u_ps = u_psize; j->v_ps = v_psize; j->y_rs = y_rsize; j->u_rs = u_rsize; j->v_rs = v_rsize; j->s->width = w; j->s->height = h; j->s->qscale = q; j->s->out_format = FMT_MJPEG; j->s->intra_only = 1; j->s->encoding = 1; j->s->pict_type = I_TYPE; j->s->y_dc_scale = 8; j->s->c_dc_scale = 8; //FIXME j->s->mjpeg_write_tables = 1; j->s->mjpeg_vsample[0] = 1; j->s->mjpeg_vsample[1] = 1; j->s->mjpeg_vsample[2] = 1; j->s->mjpeg_hsample[0] = 2; j->s->mjpeg_hsample[1] = 1; j->s->mjpeg_hsample[2] = 1; j->cheap_upsample = cu; j->bw = b; /* if libavcodec is used by the decoder then we must not * initialize again, but if it is not initialized then we must * initialize it here. */ if (!avcodec_inited) { /* we need to initialize libavcodec */ avcodec_init(); avcodec_register_all(); avcodec_inited=1; } if (ff_mjpeg_encode_init(j->s) < 0) { av_free(j->s); av_free(j); return NULL; } /* alloc bogus avctx to keep MPV_common_init from segfaulting */ j->s->avctx = calloc(sizeof(*j->s->avctx), 1); /* Set up to encode mjpeg */ j->s->avctx->codec_id = CODEC_ID_MJPEG; /* make MPV_common_init allocate important buffers, like s->block */ j->s->avctx->thread_count = 1; if (MPV_common_init(j->s) < 0) { av_free(j->s); av_free(j); return NULL; } /* correct the value for sc->mb_height */ j->s->mb_height = j->s->height/8; j->s->mb_intra = 1; j->s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; for (i = 1; i < 64; i++) j->s->intra_matrix[i] = av_clip_uint8( (ff_mpeg1_default_intra_matrix[i]*j->s->qscale) >> 3); convert_matrix(j->s, j->s->q_intra_matrix, j->s->q_intra_matrix16, j->s->intra_matrix, j->s->intra_quant_bias, 8, 8); return j; }
static int film_read_packet(AVFormatContext *s, AVPacket *pkt) { FilmDemuxContext *film = s->priv_data; AVIOContext *pb = s->pb; film_sample *sample; int ret = 0; int i; int left, right; if (film->current_sample >= film->sample_count) return AVERROR_EOF; sample = &film->sample_table[film->current_sample]; /* position the stream (will probably be there anyway) */ avio_seek(pb, sample->sample_offset, SEEK_SET); /* do a special song and dance when loading FILM Cinepak chunks */ if ((sample->stream == film->video_stream_index) && (film->video_type == AV_CODEC_ID_CINEPAK)) { pkt->pos= avio_tell(pb); if (av_new_packet(pkt, sample->sample_size)) return AVERROR(ENOMEM); avio_read(pb, pkt->data, sample->sample_size); } else if ((sample->stream == film->audio_stream_index) && (film->audio_channels == 2) && (film->audio_type != AV_CODEC_ID_ADPCM_ADX)) { /* stereo PCM needs to be interleaved */ if (ffio_limit(pb, sample->sample_size) != sample->sample_size) return AVERROR(EIO); if (av_new_packet(pkt, sample->sample_size)) return AVERROR(ENOMEM); /* make sure the interleave buffer is large enough */ if (sample->sample_size > film->stereo_buffer_size) { av_free(film->stereo_buffer); film->stereo_buffer_size = sample->sample_size; film->stereo_buffer = av_malloc(film->stereo_buffer_size); if (!film->stereo_buffer) { film->stereo_buffer_size = 0; return AVERROR(ENOMEM); } } pkt->pos= avio_tell(pb); ret = avio_read(pb, film->stereo_buffer, sample->sample_size); if (ret != sample->sample_size) ret = AVERROR(EIO); left = 0; right = sample->sample_size / 2; for (i = 0; i + 1 + 2*(film->audio_bits != 8) < sample->sample_size; ) { if (film->audio_bits == 8) { pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[right++]; } else { pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[left++]; pkt->data[i++] = film->stereo_buffer[right++]; pkt->data[i++] = film->stereo_buffer[right++]; } } } else { ret= av_get_packet(pb, pkt, sample->sample_size); if (ret != sample->sample_size) ret = AVERROR(EIO); } pkt->stream_index = sample->stream; pkt->pts = sample->pts; film->current_sample++; return ret; }
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name) { AVFilterContext *ret; if (!filter) return NULL; ret = av_mallocz(sizeof(AVFilterContext)); if (!ret) return NULL; ret->av_class = &avfilter_class; ret->filter = filter; ret->name = inst_name ? av_strdup(inst_name) : NULL; if (filter->priv_size) { ret->priv = av_mallocz(filter->priv_size); if (!ret->priv) goto err; } av_opt_set_defaults(ret); if (filter->priv_class) { *(const AVClass**)ret->priv = filter->priv_class; av_opt_set_defaults(ret->priv); } ret->internal = av_mallocz(sizeof(*ret->internal)); if (!ret->internal) goto err; ret->internal->execute = default_execute; ret->nb_inputs = avfilter_pad_count(filter->inputs); if (ret->nb_inputs ) { ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs); if (!ret->input_pads) goto err; memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs); ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs); if (!ret->inputs) goto err; } ret->nb_outputs = avfilter_pad_count(filter->outputs); if (ret->nb_outputs) { ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs); if (!ret->output_pads) goto err; memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs); ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs); if (!ret->outputs) goto err; } #if FF_API_FOO_COUNT ret->output_count = ret->nb_outputs; ret->input_count = ret->nb_inputs; #endif return ret; err: av_freep(&ret->inputs); av_freep(&ret->input_pads); ret->nb_inputs = 0; av_freep(&ret->outputs); av_freep(&ret->output_pads); ret->nb_outputs = 0; av_freep(&ret->priv); av_freep(&ret->internal); av_free(ret); return NULL; }
void VideoStream::OpenStream( ) { int avRet; /* now that all the parameters are set, we can open the video codecs and allocate the necessary encode buffers */ if ( ost ) { AVCodecContext *c = ost->codec; /* open the codec */ #if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0) if ( (avRet = avcodec_open( c, codec )) < 0 ) #else if ( (avRet = avcodec_open2( c, codec, 0 )) < 0 ) #endif { Fatal( "Could not open codec. Error code %d \"%s\"", avRet, av_err2str( avRet ) ); } Debug( 1, "Opened codec" ); /* allocate the encoded raw picture */ #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) opicture = av_frame_alloc( ); #else opicture = avcodec_alloc_frame( ); #endif if ( !opicture ) { Panic( "Could not allocate opicture" ); } int size = avpicture_get_size( c->pix_fmt, c->width, c->height ); uint8_t *opicture_buf = (uint8_t *)av_malloc( size ); if ( !opicture_buf ) { av_free( opicture ); Panic( "Could not allocate opicture_buf" ); } avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt, c->width, c->height ); /* if the output format is not identical to the input format, then a temporary picture is needed too. It is then converted to the required output format */ tmp_opicture = NULL; if ( c->pix_fmt != pf ) { #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) tmp_opicture = av_frame_alloc( ); #else tmp_opicture = avcodec_alloc_frame( ); #endif if ( !tmp_opicture ) { Panic( "Could not allocate tmp_opicture" ); } int size = avpicture_get_size( pf, c->width, c->height ); uint8_t *tmp_opicture_buf = (uint8_t *)av_malloc( size ); if ( !tmp_opicture_buf ) { av_free( tmp_opicture ); Panic( "Could not allocate tmp_opicture_buf" ); } avpicture_fill( (AVPicture *)tmp_opicture, tmp_opicture_buf, pf, c->width, c->height ); } } /* open the output file, if needed */ if ( !(of->flags & AVFMT_NOFILE) ) { int ret; #if LIBAVFORMAT_VERSION_CHECK(53, 15, 0, 21, 0) ret = avio_open2( &ofc->pb, filename, AVIO_FLAG_WRITE, NULL, NULL ); #elif LIBAVFORMAT_VERSION_CHECK(52, 102, 0, 102, 0) ret = avio_open( &ofc->pb, filename, AVIO_FLAG_WRITE ); #else ret = url_fopen( &ofc->pb, filename, AVIO_FLAG_WRITE ); #endif if ( ret < 0 ) { Fatal( "Could not open '%s'", filename ); } Debug( 1, "Opened output \"%s\"", filename ); } else { Fatal( "of->flags & AVFMT_NOFILE" ); } video_outbuf = NULL; if ( !(of->flags & AVFMT_RAWPICTURE) ) { /* allocate output buffer */ /* XXX: API change will be done */ // TODO: Make buffer dynamic. video_outbuf_size = 4000000; video_outbuf = (uint8_t *)malloc( video_outbuf_size ); if ( video_outbuf == NULL ) { Fatal("Unable to malloc memory for outbuf"); } } #if LIBAVFORMAT_VERSION_CHECK(52, 101, 0, 101, 0) av_dump_format(ofc, 0, filename, 1); #else dump_format(ofc, 0, filename, 1); #endif #if !LIBAVFORMAT_VERSION_CHECK(53, 2, 0, 4, 0) int ret = av_write_header( ofc ); #else int ret = avformat_write_header( ofc, NULL ); #endif if ( ret < 0 ) { Fatal( "?_write_header failed with error %d \"%s\"", ret, av_err2str( ret ) ); } }