static void nextPacketAudio( Vineo *v ) { AVPacket pkt; //double pts = 0.0f; //while( getNextPacket( stream->fmt_ctx, stream->index, &pkt ) ) while( nextPacket( v, v->idx_audio, &pkt ) ) { //if( stream->index != pkt.stream_index ) //{ // av_av_free_packet( &pkt ); // continue; //} /* if( pkt.pts != AV_NOPTS_VALUE ) { //printf( "FROM PACKET DTS, " ); pts = pkt.pts; } else { //printf( "NO PTS, " ); pts = 0.0f; } pts *= av_q2d( stream->fmt_ctx->streams[stream->index]->time_base ); pts -= g_start_timeOffset; if( pts >= g_timeCur ) { printf( "getNextAudioPacket pts: %f, gpts: %f\n", pts, g_timeCur ); */ size_t idx = v->data_size; // Found the stream. Grow the input data buffer as needed to // hold the new packet's data. Additionally, some ffmpeg codecs // need some padding so they don't overread the allocated buffer if( idx + pkt.size > v->data_size_max ) { void *tmp = av_realloc( v->data, idx + pkt.size + FF_INPUT_BUFFER_PADDING_SIZE ); if( !tmp ) { break; } v->data = tmp; v->data_size_max = idx + pkt.size; } // Copy the packet and av_free it memcpy( &v->data[idx], pkt.data, pkt.size ); v->data_size += pkt.size; av_free_packet( &pkt ); break; /* } av_free_packet( &pkt ); */ } }
int CAEEncoderFFmpeg::Encode(float *data, unsigned int frames) { int got_output; AVFrame *frame; const uint8_t *input = (const uint8_t*) data; if (!m_CodecCtx || frames < m_NeededFrames) return 0; /* size of the buffer sent to the encoder: either from the input data or after * conversion, in all cases it is in the m_CodecCtx->sample_fmt format */ int buf_size = av_samples_get_buffer_size(NULL, m_CodecCtx->channels, frames, m_CodecCtx->sample_fmt, 0); assert(buf_size>0); /* allocate the input frame * sadly, we have to alloc/dealloc it everytime since we have no guarantee the * data argument will be constant over iterated calls and the frame needs to * setup pointers inside data */ frame = av_frame_alloc(); if (!frame) return 0; frame->nb_samples = m_CodecCtx->frame_size; frame->format = m_CodecCtx->sample_fmt; frame->channel_layout = m_CodecCtx->channel_layout; if (m_NeedConversion) { if (!m_ResampBuffer || buf_size > m_ResampBufferSize) { m_ResampBuffer = (uint8_t*)av_realloc(m_ResampBuffer, buf_size); if (!m_ResampBuffer) { CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Encode - Failed to allocate %i bytes buffer for resampling", buf_size); av_frame_free(&frame); return 0; } m_ResampBufferSize = buf_size; } avcodec_fill_audio_frame(frame, m_CodecCtx->channels, m_CodecCtx->sample_fmt, m_ResampBuffer, buf_size, 0); /* important note: the '&input' here works because we convert from a packed * format (ie, interleaved). If it were to be used to convert from planar * formats (ie, non-interleaved, which is not currently supported by AE), * we would need to adapt it or it would segfault. */ if (swr_convert(m_SwrCtx, frame->extended_data, frames, &input, frames) < 0) { CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Encode - Resampling failed"); av_frame_free(&frame); return 0; } } else avcodec_fill_audio_frame(frame, m_CodecCtx->channels, m_CodecCtx->sample_fmt, input, buf_size, 0); /* initialize the output packet */ av_init_packet(&m_Pkt); m_Pkt.size = sizeof(m_Buffer) - IEC61937_DATA_OFFSET; m_Pkt.data = m_Buffer + IEC61937_DATA_OFFSET; /* encode it */ int ret = avcodec_encode_audio2(m_CodecCtx, &m_Pkt, frame, &got_output); /* free temporary data */ av_frame_free(&frame); if (ret < 0 || !got_output) { CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Encode - Encoding failed"); return 0; } /* pack it into an IEC958 frame */ m_BufferSize = m_PackFunc(NULL, m_Pkt.size, m_Buffer); if (m_BufferSize != m_OutputSize) { m_OutputSize = m_BufferSize; m_OutputRatio = (double)m_NeededFrames / m_OutputSize; } /* free the packet */ av_free_packet(&m_Pkt); /* return the number of frames used */ return m_NeededFrames; }
static int libschroedinger_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { int enc_size = 0; SchroEncoderParams *p_schro_params = avctx->priv_data; SchroEncoder *encoder = p_schro_params->encoder; struct FFSchroEncodedFrame *p_frame_output = NULL; int go = 1; SchroBuffer *enc_buf; int presentation_frame; int parse_code; int last_frame_in_sequence = 0; int pkt_size, ret; if (!frame) { /* Push end of sequence if not already signalled. */ if (!p_schro_params->eos_signalled) { schro_encoder_end_of_stream(encoder); p_schro_params->eos_signalled = 1; } } else { /* Allocate frame data to schro input buffer. */ SchroFrame *in_frame = libschroedinger_frame_from_data(avctx, frame); /* Load next frame. */ schro_encoder_push_frame(encoder, in_frame); } if (p_schro_params->eos_pulled) go = 0; /* Now check to see if we have any output from the encoder. */ while (go) { SchroStateEnum state; state = schro_encoder_wait(encoder); switch (state) { case SCHRO_STATE_HAVE_BUFFER: case SCHRO_STATE_END_OF_STREAM: enc_buf = schro_encoder_pull(encoder, &presentation_frame); av_assert0(enc_buf->length > 0); parse_code = enc_buf->data[4]; /* All non-frame data is prepended to actual frame data to * be able to set the pts correctly. So we don't write data * to the frame output queue until we actually have a frame */ p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf, p_schro_params->enc_buf_size + enc_buf->length); memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size, enc_buf->data, enc_buf->length); p_schro_params->enc_buf_size += enc_buf->length; if (state == SCHRO_STATE_END_OF_STREAM) { p_schro_params->eos_pulled = 1; go = 0; } if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) { schro_buffer_unref(enc_buf); break; } /* Create output frame. */ p_frame_output = av_mallocz(sizeof(FFSchroEncodedFrame)); /* Set output data. */ p_frame_output->size = p_schro_params->enc_buf_size; p_frame_output->p_encbuf = p_schro_params->enc_buf; if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) && SCHRO_PARSE_CODE_IS_REFERENCE(parse_code)) p_frame_output->key_frame = 1; /* Parse the coded frame number from the bitstream. Bytes 14 * through 17 represesent the frame number. */ p_frame_output->frame_num = AV_RB32(enc_buf->data + 13); ff_schro_queue_push_back(&p_schro_params->enc_frame_queue, p_frame_output); p_schro_params->enc_buf_size = 0; p_schro_params->enc_buf = NULL; schro_buffer_unref(enc_buf); break; case SCHRO_STATE_NEED_FRAME: go = 0; break; case SCHRO_STATE_AGAIN: break; default: av_log(avctx, AV_LOG_ERROR, "Unknown Schro Encoder state\n"); return -1; } } /* Copy 'next' frame in queue. */ if (p_schro_params->enc_frame_queue.size == 1 && p_schro_params->eos_pulled) last_frame_in_sequence = 1; p_frame_output = ff_schro_queue_pop(&p_schro_params->enc_frame_queue); if (!p_frame_output) return 0; pkt_size = p_frame_output->size; if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) pkt_size += p_schro_params->enc_buf_size; if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0) goto error; memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size); avctx->coded_frame->key_frame = p_frame_output->key_frame; /* Use the frame number of the encoded frame as the pts. It is OK to * do so since Dirac is a constant frame rate codec. It expects input * to be of constant frame rate. */ pkt->pts = avctx->coded_frame->pts = p_frame_output->frame_num; pkt->dts = p_schro_params->dts++; enc_size = p_frame_output->size; /* Append the end of sequence information to the last frame in the * sequence. */ if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) { memcpy(pkt->data + enc_size, p_schro_params->enc_buf, p_schro_params->enc_buf_size); enc_size += p_schro_params->enc_buf_size; av_freep(&p_schro_params->enc_buf); p_schro_params->enc_buf_size = 0; } if (p_frame_output->key_frame) pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; error: /* free frame */ libschroedinger_free_frame(p_frame_output); return ret; }
static void peak_write_frame(AVFormatContext *s) { WAVMuxContext *wav = s->priv_data; AVCodecContext *enc = s->streams[0]->codec; int peak_of_peaks; int c; if (!wav->peak_output) return; for (c = 0; c < enc->channels; c++) { wav->peak_maxneg[c] = -wav->peak_maxneg[c]; if (wav->peak_bps == 2 && wav->peak_format == PEAK_FORMAT_UINT8) { wav->peak_maxpos[c] = wav->peak_maxpos[c] / 256; wav->peak_maxneg[c] = wav->peak_maxneg[c] / 256; } if (wav->peak_ppv == 1) wav->peak_maxpos[c] = FFMAX(wav->peak_maxpos[c], wav->peak_maxneg[c]); peak_of_peaks = FFMAX3(wav->peak_maxpos[c], wav->peak_maxneg[c], wav->peak_pop); if (peak_of_peaks > wav->peak_pop) wav->peak_pos_pop = wav->peak_num_frames; wav->peak_pop = peak_of_peaks; if (wav->peak_outbuf_size - wav->peak_outbuf_bytes < wav->peak_format * wav->peak_ppv) { wav->peak_outbuf_size += PEAK_BUFFER_SIZE; wav->peak_output = av_realloc(wav->peak_output, wav->peak_outbuf_size); if (!wav->peak_output) { av_log(s, AV_LOG_ERROR, "No memory for peak data\n"); return; } } if (wav->peak_format == PEAK_FORMAT_UINT8) { wav->peak_output[wav->peak_outbuf_bytes++] = wav->peak_maxpos[c]; if (wav->peak_ppv == 2) { wav->peak_output[wav->peak_outbuf_bytes++] = wav->peak_maxneg[c]; } } else { AV_WL16(wav->peak_output + wav->peak_outbuf_bytes, wav->peak_maxpos[c]); wav->peak_outbuf_bytes += 2; if (wav->peak_ppv == 2) { AV_WL16(wav->peak_output + wav->peak_outbuf_bytes, wav->peak_maxneg[c]); wav->peak_outbuf_bytes += 2; } } wav->peak_maxpos[c] = 0; wav->peak_maxneg[c] = 0; } wav->peak_num_frames++; }
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIContext *avi = s->priv_data; ByteIOContext *pb = &s->pb; unsigned char tag[5]; unsigned int flags=0; const int stream_index= pkt->stream_index; AVCodecContext *enc= s->streams[stream_index]->codec; int size= pkt->size; // av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index); while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avi->packet_count[stream_index]){ AVPacket empty_packet; av_init_packet(&empty_packet); empty_packet.size= 0; empty_packet.data= NULL; empty_packet.stream_index= stream_index; avi_write_packet(s, &empty_packet); // av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]); } avi->packet_count[stream_index]++; // Make sure to put an OpenDML chunk when the file size exceeds the limits if (!url_is_streamed(pb) && (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) { avi_write_ix(s); end_tag(pb, avi->movi_list); if (avi->riff_id == 1) avi_write_idx1(s); end_tag(pb, avi->riff_start); avi->movi_list = avi_start_new_riff(avi, pb, "AVIX", "movi"); } avi_stream2fourcc(&tag[0], stream_index, enc->codec_type); if(pkt->flags&PKT_FLAG_KEY) flags = 0x10; if (enc->codec_type == CODEC_TYPE_AUDIO) { avi->audio_strm_length[stream_index] += size; } if (!url_is_streamed(&s->pb)) { AVIIndex* idx = &avi->indexes[stream_index]; int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE; int id = idx->entry % AVI_INDEX_CLUSTER_SIZE; if (idx->ents_allocated <= idx->entry) { idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*)); if (!idx->cluster) return -1; idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry)); if (!idx->cluster[cl]) return -1; idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE; } idx->cluster[cl][id].flags = flags; idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list; idx->cluster[cl][id].len = size; idx->entry++; } put_buffer(pb, tag, 4); put_le32(pb, size); put_buffer(pb, pkt->data, size); if (size & 1) put_byte(pb, 0); put_flush_packet(pb); return 0; }
static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { H264BSFContext *ctx = bsfc->priv_data; uint8_t unit_type; int32_t nal_size; uint32_t cumul_size = 0; const uint8_t *buf_end = buf + buf_size; /* nothing to filter */ if (!avctx->extradata || avctx->extradata_size < 6) { *poutbuf = (uint8_t *) buf; *poutbuf_size = buf_size; return 0; } /* retrieve sps and pps NAL units from extradata */ if (!ctx->extradata_parsed) { uint16_t unit_size; uint64_t total_size = 0; uint8_t *out = NULL, unit_nb, sps_done = 0, sps_seen = 0, pps_seen = 0; const uint8_t *extradata = avctx->extradata + 4; static const uint8_t nalu_header[4] = {0, 0, 0, 1}; /* retrieve length coded size */ ctx->length_size = (*extradata++ & 0x3) + 1; if (ctx->length_size == 3) return AVERROR(EINVAL); /* retrieve sps and pps unit(s) */ unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */ if (!unit_nb) { goto pps; } else { sps_seen = 1; } while (unit_nb--) { void *tmp; unit_size = AV_RB16(extradata); total_size += unit_size + 4; if (total_size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE || extradata + 2 + unit_size > avctx->extradata + avctx->extradata_size) { av_free(out); return AVERROR(EINVAL); } tmp = av_realloc(out, total_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!tmp) { av_free(out); return AVERROR(ENOMEM); } out = tmp; memcpy(out + total_size - unit_size - 4, nalu_header, 4); memcpy(out + total_size - unit_size, extradata + 2, unit_size); extradata += 2 + unit_size; pps: if (!unit_nb && !sps_done++) { unit_nb = *extradata++; /* number of pps unit(s) */ if (unit_nb) pps_seen = 1; } } if(out) memset(out + total_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); if (!sps_seen) av_log(avctx, AV_LOG_WARNING, "Warning: SPS NALU missing or invalid. The resulting stream may not play.\n"); if (!pps_seen) av_log(avctx, AV_LOG_WARNING, "Warning: PPS NALU missing or invalid. The resulting stream may not play.\n"); av_free(avctx->extradata); avctx->extradata = out; avctx->extradata_size = total_size; ctx->first_idr = 1; ctx->extradata_parsed = 1; } *poutbuf_size = 0; *poutbuf = NULL; do { if (buf + ctx->length_size > buf_end) goto fail; if (ctx->length_size == 1) { nal_size = buf[0]; } else if (ctx->length_size == 2) { nal_size = AV_RB16(buf); } else nal_size = AV_RB32(buf); buf += ctx->length_size; unit_type = *buf & 0x1f; if (buf + nal_size > buf_end || nal_size < 0) goto fail; /* prepend only to the first type 5 NAL unit of an IDR picture */ if (ctx->first_idr && unit_type == 5) { if (alloc_and_copy(poutbuf, poutbuf_size, avctx->extradata, avctx->extradata_size, buf, nal_size) < 0) goto fail; ctx->first_idr = 0; } else { if (alloc_and_copy(poutbuf, poutbuf_size, NULL, 0, buf, nal_size) < 0) goto fail; if (!ctx->first_idr && unit_type == 1) ctx->first_idr = 1; } buf += nal_size; cumul_size += nal_size + ctx->length_size; } while (cumul_size < buf_size); return 1; fail: av_freep(poutbuf); *poutbuf_size = 0; return AVERROR(EINVAL); }
int ff_img_read_header(AVFormatContext *s1) { VideoDemuxData *s = s1->priv_data; int first_index = 1, last_index = 1; AVStream *st; enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; s1->ctx_flags |= AVFMTCTX_NOHEADER; st = avformat_new_stream(s1, NULL); if (!st) { return AVERROR(ENOMEM); } if (s->pixel_format && (pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(s1, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } av_strlcpy(s->path, s1->filename, sizeof(s->path)); s->img_number = 0; s->img_count = 0; /* find format */ if (s1->iformat->flags & AVFMT_NOFILE) s->is_pipe = 0; else { s->is_pipe = 1; st->need_parsing = AVSTREAM_PARSE_FULL; } if (s->ts_from_file == 2) { #if !HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC av_log(s1, AV_LOG_ERROR, "POSIX.1-2008 not supported, nanosecond file timestamps unavailable\n"); return AVERROR(ENOSYS); #endif avpriv_set_pts_info(st, 64, 1, 1000000000); } else if (s->ts_from_file) avpriv_set_pts_info(st, 64, 1, 1); else avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num); if (s->width && s->height) { st->codec->width = s->width; st->codec->height = s->height; } if (!s->is_pipe) { if (s->pattern_type == PT_DEFAULT) { if (s1->pb) { s->pattern_type = PT_NONE; } else s->pattern_type = PT_GLOB_SEQUENCE; } if (s->pattern_type == PT_GLOB_SEQUENCE) { s->use_glob = is_glob(s->path); if (s->use_glob) { #if HAVE_GLOB char *p = s->path, *q, *dup; int gerr; #endif av_log(s1, AV_LOG_WARNING, "Pattern type 'glob_sequence' is deprecated: " "use pattern_type 'glob' instead\n"); #if HAVE_GLOB dup = q = av_strdup(p); while (*q) { /* Do we have room for the next char and a \ insertion? */ if ((p - s->path) >= (sizeof(s->path) - 2)) break; if (*q == '%' && strspn(q + 1, "%*?[]{}")) ++q; else if (strspn(q, "\\*?[]{}")) *p++ = '\\'; *p++ = *q++; } *p = 0; av_free(dup); gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate); if (gerr != 0) { return AVERROR(ENOENT); } first_index = 0; last_index = s->globstate.gl_pathc - 1; #endif } } if ((s->pattern_type == PT_GLOB_SEQUENCE && !s->use_glob) || s->pattern_type == PT_SEQUENCE) { if (find_image_range(s1->pb, &first_index, &last_index, s->path, s->start_number, s->start_number_range) < 0) { av_log(s1, AV_LOG_ERROR, "Could find no file with path '%s' and index in the range %d-%d\n", s->path, s->start_number, s->start_number + s->start_number_range - 1); return AVERROR(ENOENT); } } else if (s->pattern_type == PT_GLOB) { #if HAVE_GLOB int gerr; gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate); if (gerr != 0) { return AVERROR(ENOENT); } first_index = 0; last_index = s->globstate.gl_pathc - 1; s->use_glob = 1; #else av_log(s1, AV_LOG_ERROR, "Pattern type 'glob' was selected but globbing " "is not supported by this libavformat build\n"); return AVERROR(ENOSYS); #endif } else if (s->pattern_type != PT_GLOB_SEQUENCE && s->pattern_type != PT_NONE) { av_log(s1, AV_LOG_ERROR, "Unknown value '%d' for pattern_type option\n", s->pattern_type); return AVERROR(EINVAL); } s->img_first = first_index; s->img_last = last_index; s->img_number = first_index; /* compute duration */ if (!s->ts_from_file) { st->start_time = 0; st->duration = last_index - first_index + 1; } } if (s1->video_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = s1->video_codec_id; } else if (s1->audio_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = s1->audio_codec_id; } else if (s1->iformat->raw_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = s1->iformat->raw_codec_id; } else { const char *str = strrchr(s->path, '.'); s->split_planes = str && !av_strcasecmp(str + 1, "y"); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (s1->pb) { int probe_buffer_size = 2048; uint8_t *probe_buffer = av_realloc(NULL, probe_buffer_size + AVPROBE_PADDING_SIZE); AVInputFormat *fmt = NULL; AVProbeData pd = { 0 }; if (!probe_buffer) return AVERROR(ENOMEM); probe_buffer_size = avio_read(s1->pb, probe_buffer, probe_buffer_size); if (probe_buffer_size < 0) { av_free(probe_buffer); return probe_buffer_size; } memset(probe_buffer + probe_buffer_size, 0, AVPROBE_PADDING_SIZE); pd.buf = probe_buffer; pd.buf_size = probe_buffer_size; pd.filename = s1->filename; while ((fmt = av_iformat_next(fmt))) { if (fmt->read_header != ff_img_read_header || !fmt->read_probe || (fmt->flags & AVFMT_NOFILE) || !fmt->raw_codec_id) continue; if (fmt->read_probe(&pd) > 0) { st->codec->codec_id = fmt->raw_codec_id; break; } } if (s1->flags & AVFMT_FLAG_CUSTOM_IO) { avio_seek(s1->pb, 0, SEEK_SET); } else ffio_rewind_with_probe_data(s1->pb, &probe_buffer, probe_buffer_size); } if (st->codec->codec_id == AV_CODEC_ID_NONE) st->codec->codec_id = ff_guess_image2_codec(s->path); if (st->codec->codec_id == AV_CODEC_ID_LJPEG) st->codec->codec_id = AV_CODEC_ID_MJPEG; if (st->codec->codec_id == AV_CODEC_ID_ALIAS_PIX) // we cannot distingiush this from BRENDER_PIX st->codec->codec_id = AV_CODEC_ID_NONE; } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pix_fmt != AV_PIX_FMT_NONE) st->codec->pix_fmt = pix_fmt; return 0; }
static void ffmpeg_audio_resample(ffmpeg_t *handle, struct ffemu_audio_data *aud) { if (!handle->audio.use_float && !handle->audio.resampler) return; if (aud->frames > handle->audio.float_conv_frames) { handle->audio.float_conv = (float*)av_realloc(handle->audio.float_conv, aud->frames * handle->params.channels * sizeof(float)); if (!handle->audio.float_conv) return; handle->audio.float_conv_frames = aud->frames; /* To make sure we don't accidentially overflow. */ handle->audio.resample_out_frames = aud->frames * handle->audio.ratio + 16; handle->audio.resample_out = (float*)av_realloc(handle->audio.resample_out, handle->audio.resample_out_frames * handle->params.channels * sizeof(float)); if (!handle->audio.resample_out) return; handle->audio.fixed_conv_frames = MAX(handle->audio.resample_out_frames, handle->audio.float_conv_frames); handle->audio.fixed_conv = (int16_t*)av_realloc(handle->audio.fixed_conv, handle->audio.fixed_conv_frames * handle->params.channels * sizeof(int16_t)); if (!handle->audio.fixed_conv) return; } if (handle->audio.use_float || handle->audio.resampler) { audio_convert_s16_to_float(handle->audio.float_conv, (const int16_t*)aud->data, aud->frames * handle->params.channels, 1.0); aud->data = handle->audio.float_conv; } if (handle->audio.resampler) { /* It's always two channels ... */ struct resampler_data info = {0}; info.data_in = (const float*)aud->data; info.data_out = handle->audio.resample_out; info.input_frames = aud->frames; info.ratio = handle->audio.ratio; rarch_resampler_process(handle->audio.resampler, handle->audio.resampler_data, &info); aud->data = handle->audio.resample_out; aud->frames = info.output_frames; if (!handle->audio.use_float) { audio_convert_float_to_s16(handle->audio.fixed_conv, handle->audio.resample_out, aud->frames * handle->params.channels); aud->data = handle->audio.fixed_conv; } } }
static int dtext_prepare_text(AVFilterContext *ctx) { DrawTextContext *s = ctx->priv; uint32_t code = 0, prev_code = 0; int x = 0, y = 0, i = 0, ret; int text_height, baseline; char *text = s->text; uint8_t *p; int str_w = 0, len; int y_min = 32000, y_max = -32000; FT_Vector delta; Glyph *glyph = NULL, *prev_glyph = NULL; Glyph dummy = { 0 }; int width = ctx->inputs[0]->w; int height = ctx->inputs[0]->h; time_t now = time(0); struct tm ltime; uint8_t *buf = s->expanded_text; int buf_size = s->expanded_text_size; if (!buf) buf_size = 2*strlen(s->text)+1; localtime_r(&now, <ime); while ((buf = av_realloc(buf, buf_size))) { *buf = 1; if (strftime(buf, buf_size, s->text, <ime) != 0 || *buf == 0) break; buf_size *= 2; } if (!buf) return AVERROR(ENOMEM); text = s->expanded_text = buf; s->expanded_text_size = buf_size; if ((len = strlen(text)) > s->nb_positions) { FT_Vector *p = av_realloc(s->positions, len * sizeof(*s->positions)); if (!p) { av_freep(s->positions); s->nb_positions = 0; return AVERROR(ENOMEM); } else { s->positions = p; s->nb_positions = len; } } /* load and cache glyphs */ for (i = 0, p = text; *p; i++) { GET_UTF8(code, *p++, continue;); /* get glyph */ dummy.code = code; glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL); if (!glyph) { ret = load_glyph(ctx, &glyph, code); if (ret) return ret; } y_min = FFMIN(glyph->bbox.yMin, y_min); y_max = FFMAX(glyph->bbox.yMax, y_max); }
int opt_default(const char *opt, const char *arg){ int type; int ret= 0; const AVOption *o= NULL; int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0}; AVCodec *p = NULL; AVOutputFormat *oformat = NULL; AVInputFormat *iformat = NULL; while ((p = av_codec_next(p))) { AVClass *c = p->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (p) goto out; while ((oformat = av_oformat_next(oformat))) { const AVClass *c = oformat->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (oformat) goto out; while ((iformat = av_iformat_next(iformat))) { const AVClass *c = iformat->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (iformat) goto out; for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){ const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]); if(o2) ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o); } if(!o && avformat_opts) ret = av_set_string3(avformat_opts, opt, arg, 1, &o); if(!o && sws_opts) ret = av_set_string3(sws_opts, opt, arg, 1, &o); if(!o){ if (opt[0] == 'a' && avcodec_opts[AVMEDIA_TYPE_AUDIO]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o); else if(opt[0] == 'v' && avcodec_opts[AVMEDIA_TYPE_VIDEO]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o); else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o); if (ret >= 0) opt += 1; } if (o && ret < 0) { fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt); exit(1); } if (!o) { fprintf(stderr, "Unrecognized option '%s'\n", opt); exit(1); } out: // av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL)); opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1)); opt_values[opt_name_count] = av_strdup(arg); opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1)); opt_names[opt_name_count++] = av_strdup(opt); if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug)) av_log_set_level(AV_LOG_DEBUG); return 0; }
static int libdirac_encode_frame(AVCodecContext *avccontext, unsigned char *frame, int buf_size, void *data) { int enc_size = 0; dirac_encoder_state_t state; FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; FfmpegDiracSchroEncodedFrame* p_frame_output = NULL; FfmpegDiracSchroEncodedFrame* p_next_output_frame = NULL; int go = 1; int last_frame_in_sequence = 0; if (!data) { /* push end of sequence if not already signalled */ if (!p_dirac_params->eos_signalled) { dirac_encoder_end_sequence(p_dirac_params->p_encoder); p_dirac_params->eos_signalled = 1; } } else { /* Allocate frame data to Dirac input buffer. * Input line size may differ from what the codec supports, * especially when transcoding from one format to another. * So use avpicture_layout to copy the frame. */ avpicture_layout((AVPicture *)data, avccontext->pix_fmt, avccontext->width, avccontext->height, p_dirac_params->p_in_frame_buf, p_dirac_params->frame_size); /* load next frame */ if (dirac_encoder_load(p_dirac_params->p_encoder, p_dirac_params->p_in_frame_buf, p_dirac_params->frame_size) < 0) { av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error." " dirac_encoder_load failed...\n"); return -1; } } if (p_dirac_params->eos_pulled) go = 0; while (go) { p_dirac_params->p_encoder->enc_buf.buffer = frame; p_dirac_params->p_encoder->enc_buf.size = buf_size; /* process frame */ state = dirac_encoder_output(p_dirac_params->p_encoder); switch (state) { case ENC_STATE_AVAIL: case ENC_STATE_EOS: assert(p_dirac_params->p_encoder->enc_buf.size > 0); /* All non-frame data is prepended to actual frame data to * be able to set the pts correctly. So we don't write data * to the frame output queue until we actually have a frame */ p_dirac_params->enc_buf = av_realloc(p_dirac_params->enc_buf, p_dirac_params->enc_buf_size + p_dirac_params->p_encoder->enc_buf.size); memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size, p_dirac_params->p_encoder->enc_buf.buffer, p_dirac_params->p_encoder->enc_buf.size); p_dirac_params->enc_buf_size += p_dirac_params->p_encoder->enc_buf.size; if (state == ENC_STATE_EOS) { p_dirac_params->eos_pulled = 1; go = 0; } /* If non-frame data, don't output it until it we get an * encoded frame back from the encoder. */ if (p_dirac_params->p_encoder->enc_pparams.pnum == -1) break; /* create output frame */ p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame)); /* set output data */ p_frame_output->size = p_dirac_params->enc_buf_size; p_frame_output->p_encbuf = p_dirac_params->enc_buf; p_frame_output->frame_num = p_dirac_params->p_encoder->enc_pparams.pnum; if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE && p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE) p_frame_output->key_frame = 1; ff_dirac_schro_queue_push_back(&p_dirac_params->enc_frame_queue, p_frame_output); p_dirac_params->enc_buf_size = 0; p_dirac_params->enc_buf = NULL; break; case ENC_STATE_BUFFER: go = 0; break; case ENC_STATE_INVALID: av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Dirac Encoder Error. Quitting...\n"); return -1; default: av_log(avccontext, AV_LOG_ERROR, "Unknown Dirac Encoder state\n"); return -1; } } /* copy 'next' frame in queue */ if (p_dirac_params->enc_frame_queue.size == 1 && p_dirac_params->eos_pulled) last_frame_in_sequence = 1; p_next_output_frame = ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue); if (!p_next_output_frame) return 0; memcpy(frame, p_next_output_frame->p_encbuf, p_next_output_frame->size); avccontext->coded_frame->key_frame = p_next_output_frame->key_frame; /* Use the frame number of the encoded frame as the pts. It is OK to do * so since Dirac is a constant framerate codec. It expects input to be * of constant framerate. */ avccontext->coded_frame->pts = p_next_output_frame->frame_num; enc_size = p_next_output_frame->size; /* Append the end of sequence information to the last frame in the * sequence. */ if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) { memcpy(frame + enc_size, p_dirac_params->enc_buf, p_dirac_params->enc_buf_size); enc_size += p_dirac_params->enc_buf_size; av_freep(&p_dirac_params->enc_buf); p_dirac_params->enc_buf_size = 0; } /* free frame */ DiracFreeFrame(p_next_output_frame); return enc_size; }
int main(int argc, char **argv) { unsigned int p, i, type, size, retry; AVProbeData pd = { 0 }; AVLFG state; PutBitContext pb; int retry_count= 4097; int max_size = 65537; int j; for (j = i = 1; i<argc; i++) { if (!strcmp(argv[i], "-f") && i+1<argc && !single_format) { single_format = argv[++i]; } else if (read_int(argv[i])>0 && j == 1) { retry_count = read_int(argv[i]); j++; } else if (read_int(argv[i])>0 && j == 2) { max_size = read_int(argv[i]); j++; } else { fprintf(stderr, "probetest [-f <input format>] [<retry_count> [<max_size>]]\n"); return 1; } } if (max_size > 1000000000U/8) { fprintf(stderr, "max_size out of bounds\n"); return 1; } if (retry_count > 1000000000U) { fprintf(stderr, "retry_count out of bounds\n"); return 1; } av_lfg_init(&state, 0xdeadbeef); pd.buf = NULL; for (size = 1; size < max_size; size *= 2) { pd.buf_size = size; pd.buf = av_realloc(pd.buf, size + AVPROBE_PADDING_SIZE); pd.filename = ""; if (!pd.buf) { fprintf(stderr, "out of memory\n"); return 1; } memset(pd.buf, 0, size + AVPROBE_PADDING_SIZE); fprintf(stderr, "testing size=%d\n", size); for (retry = 0; retry < retry_count; retry += FFMAX(size, 32)) { for (type = 0; type < 4; type++) { for (p = 0; p < 4096; p++) { unsigned hist = 0; init_put_bits(&pb, pd.buf, size); switch (type) { case 0: for (i = 0; i < size * 8; i++) put_bits(&pb, 1, (av_lfg_get(&state) & 0xFFFFFFFF) > p << 20); break; case 1: for (i = 0; i < size * 8; i++) { unsigned int p2 = hist ? p & 0x3F : (p >> 6); unsigned int v = (av_lfg_get(&state) & 0xFFFFFFFF) > p2 << 26; put_bits(&pb, 1, v); hist = v; } break; case 2: for (i = 0; i < size * 8; i++) { unsigned int p2 = (p >> (hist * 3)) & 7; unsigned int v = (av_lfg_get(&state) & 0xFFFFFFFF) > p2 << 29; put_bits(&pb, 1, v); hist = (2 * hist + v) & 3; } break; case 3: for (i = 0; i < size; i++) { int c = 0; while (p & 63) { c = (av_lfg_get(&state) & 0xFFFFFFFF) >> 24; if (c >= 'a' && c <= 'z' && (p & 1)) break; else if (c >= 'A' && c <= 'Z' && (p & 2)) break; else if (c >= '0' && c <= '9' && (p & 4)) break; else if (c == ' ' && (p & 8)) break; else if (c == 0 && (p & 16)) break; else if (c == 1 && (p & 32)) break; } pd.buf[i] = c; } } flush_put_bits(&pb); probe(&pd, type, p, size); } } } } if(AV_READ_TIME()) print_times(); return failures; }
/** * @return 0 when a packet was written into /p pkt, and no more data is left; * 1 when a packet was written into /p pkt, and more packets might be left; * <0 when not enough data was provided to return a full packet, or on error. */ static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, int flags) { AVIOContext *pb = &asf->pb; int res, mflags, len_off; RTSPState *rt = s->priv_data; if (!rt->asf_ctx) return -1; if (len > 0) { int off, out_len = 0; if (len < 4) return -1; av_freep(&asf->buf); ffio_init_context(pb, buf, len, 0, NULL, NULL, NULL, NULL); while (avio_tell(pb) + 4 < len) { int start_off = avio_tell(pb); mflags = avio_r8(pb); if (mflags & 0x80) flags |= RTP_FLAG_KEY; len_off = avio_rb24(pb); if (mflags & 0x20) /**< relative timestamp */ avio_skip(pb, 4); if (mflags & 0x10) /**< has duration */ avio_skip(pb, 4); if (mflags & 0x8) /**< has location ID */ avio_skip(pb, 4); off = avio_tell(pb); if (!(mflags & 0x40)) { /** * If 0x40 is not set, the len_off field specifies an offset * of this packet's payload data in the complete (reassembled) * ASF packet. This is used to spread one ASF packet over * multiple RTP packets. */ if (asf->pktbuf && len_off != avio_tell(asf->pktbuf)) { uint8_t *p; avio_close_dyn_buf(asf->pktbuf, &p); asf->pktbuf = NULL; av_free(p); } if (!len_off && !asf->pktbuf && (res = avio_open_dyn_buf(&asf->pktbuf)) < 0) return res; if (!asf->pktbuf) return AVERROR(EIO); avio_write(asf->pktbuf, buf + off, len - off); avio_skip(pb, len - off); if (!(flags & RTP_FLAG_MARKER)) return -1; out_len = avio_close_dyn_buf(asf->pktbuf, &asf->buf); asf->pktbuf = NULL; } else { /** * If 0x40 is set, the len_off field specifies the length of * the next ASF packet that can be read from this payload * data alone. This is commonly the same as the payload size, * but could be less in case of packet splitting (i.e. * multiple ASF packets in one RTP packet). */ int cur_len = start_off + len_off - off; int prev_len = out_len; void *newmem; out_len += cur_len; if (FFMIN(cur_len, len - off) < 0) return -1; newmem = av_realloc(asf->buf, out_len); if (!newmem) return -1; asf->buf = newmem; memcpy(asf->buf + prev_len, buf + off, FFMIN(cur_len, len - off)); avio_skip(pb, cur_len); } } init_packetizer(pb, asf->buf, out_len); pb->pos += rt->asf_pb_pos; pb->eof_reached = 0; rt->asf_ctx->pb = pb; } for (;;) { int i; res = ff_read_packet(rt->asf_ctx, pkt); rt->asf_pb_pos = avio_tell(pb); if (res != 0) break; for (i = 0; i < s->nb_streams; i++) { if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) { pkt->stream_index = i; return 1; // FIXME: return 0 if last packet } } av_free_packet(pkt); } return res == 1 ? -1 : res; }
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags) { AVDictionary *m = *pm; AVDictionaryEntry *tag = av_dict_get(m, key, NULL, flags); char *oldval = NULL; if (!m) m = *pm = av_mallocz(sizeof(*m)); if (!m) goto err_out; if (tag) { if (flags & AV_DICT_DONT_OVERWRITE) { if (flags & AV_DICT_DONT_STRDUP_KEY) av_free((void*)key); if (flags & AV_DICT_DONT_STRDUP_VAL) av_free((void*)value); return 0; } if (flags & AV_DICT_APPEND) oldval = tag->value; else av_free(tag->value); av_free(tag->key); *tag = m->elems[--m->count]; } else { AVDictionaryEntry *tmp = av_realloc(m->elems, (m->count + 1) * sizeof(*m->elems)); if (!tmp) goto err_out; m->elems = tmp; } if (value) { if (flags & AV_DICT_DONT_STRDUP_KEY) m->elems[m->count].key = (char*)(intptr_t)key; else m->elems[m->count].key = av_strdup(key); if (!m->elems[m->count].key) goto err_out; if (flags & AV_DICT_DONT_STRDUP_VAL) { m->elems[m->count].value = (char*)(intptr_t)value; } else if (oldval && flags & AV_DICT_APPEND) { int len = strlen(oldval) + strlen(value) + 1; char *newval = av_mallocz(len); if (!newval) goto err_out; av_strlcat(newval, oldval, len); av_freep(&oldval); av_strlcat(newval, value, len); m->elems[m->count].value = newval; } else m->elems[m->count].value = av_strdup(value); m->count++; } if (!m->count) { av_freep(&m->elems); av_freep(pm); } return 0; err_out: if (m && !m->count) { av_freep(&m->elems); av_freep(pm); } if (flags & AV_DICT_DONT_STRDUP_KEY) av_free((void*)key); if (flags & AV_DICT_DONT_STRDUP_VAL) av_free((void*)value); return AVERROR(ENOMEM); }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; VmncContext * const c = avctx->priv_data; uint8_t *outptr; const uint8_t *src = buf; int dx, dy, w, h, depth, enc, chunks, res, size_left; c->pic.reference = 1; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if(avctx->reget_buffer(avctx, &c->pic) < 0){ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } c->pic.key_frame = 0; c->pic.pict_type = FF_P_TYPE; //restore screen after cursor if(c->screendta) { int i; w = c->cur_w; if(c->width < c->cur_x + w) w = c->width - c->cur_x; h = c->cur_h; if(c->height < c->cur_y + h) h = c->height - c->cur_y; dx = c->cur_x; if(dx < 0) { w += dx; dx = 0; } dy = c->cur_y; if(dy < 0) { h += dy; dy = 0; } if((w > 0) && (h > 0)) { outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0]; for(i = 0; i < h; i++) { memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, w * c->bpp2); outptr += c->pic.linesize[0]; } } } src += 2; chunks = AV_RB16(src); src += 2; while(chunks--) { dx = AV_RB16(src); src += 2; dy = AV_RB16(src); src += 2; w = AV_RB16(src); src += 2; h = AV_RB16(src); src += 2; enc = AV_RB32(src); src += 4; outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0]; size_left = buf_size - (src - buf); switch(enc) { case MAGIC_WMVd: // cursor if(size_left < 2 + w * h * c->bpp2 * 2) { av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", 2 + w * h * c->bpp2 * 2, size_left); return -1; } src += 2; c->cur_w = w; c->cur_h = h; c->cur_hx = dx; c->cur_hy = dy; if((c->cur_hx > c->cur_w) || (c->cur_hy > c->cur_h)) { av_log(avctx, AV_LOG_ERROR, "Cursor hot spot is not in image: %ix%i of %ix%i cursor size\n", c->cur_hx, c->cur_hy, c->cur_w, c->cur_h); c->cur_hx = c->cur_hy = 0; } c->curbits = av_realloc(c->curbits, c->cur_w * c->cur_h * c->bpp2); c->curmask = av_realloc(c->curmask, c->cur_w * c->cur_h * c->bpp2); c->screendta = av_realloc(c->screendta, c->cur_w * c->cur_h * c->bpp2); load_cursor(c, src); src += w * h * c->bpp2 * 2; break; case MAGIC_WMVe: // unknown src += 2; break; case MAGIC_WMVf: // update cursor position c->cur_x = dx - c->cur_hx; c->cur_y = dy - c->cur_hy; break; case MAGIC_WMVg: // unknown src += 10; break; case MAGIC_WMVh: // unknown src += 4; break; case MAGIC_WMVi: // ServerInitialization struct c->pic.key_frame = 1; c->pic.pict_type = FF_I_TYPE; depth = *src++; if(depth != c->bpp) { av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth); } src++; c->bigendian = *src++; if(c->bigendian & (~1)) { av_log(avctx, AV_LOG_INFO, "Invalid header: bigendian flag = %i\n", c->bigendian); return -1; } //skip the rest of pixel format data src += 13; break; case MAGIC_WMVj: // unknown src += 2; break; case 0x00000000: // raw rectangle data if((dx + w > c->width) || (dy + h > c->height)) { av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height); return -1; } if(size_left < w * h * c->bpp2) { av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", w * h * c->bpp2, size_left); return -1; } paint_raw(outptr, w, h, src, c->bpp2, c->bigendian, c->pic.linesize[0]); src += w * h * c->bpp2; break; case 0x00000005: // HexTile encoded rectangle if((dx + w > c->width) || (dy + h > c->height)) { av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height); return -1; } res = decode_hextile(c, outptr, src, size_left, w, h, c->pic.linesize[0]); if(res < 0) return -1; src += res; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported block type 0x%08X\n", enc); chunks = 0; // leave chunks decoding loop } } if(c->screendta){ int i; //save screen data before painting cursor w = c->cur_w; if(c->width < c->cur_x + w) w = c->width - c->cur_x; h = c->cur_h; if(c->height < c->cur_y + h) h = c->height - c->cur_y; dx = c->cur_x; if(dx < 0) { w += dx; dx = 0; } dy = c->cur_y; if(dy < 0) { h += dy; dy = 0; } if((w > 0) && (h > 0)) { outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0]; for(i = 0; i < h; i++) { memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, w * c->bpp2); outptr += c->pic.linesize[0]; } outptr = c->pic.data[0]; put_cursor(outptr, c->pic.linesize[0], c, c->cur_x, c->cur_y); } } *data_size = sizeof(AVFrame); *(AVFrame*)data = c->pic; /* always report that the buffer was completely consumed */ return buf_size; }
DataNode *ff_datanode_tree_from_xml(ByteIOContext *p) { int c; char *s; char tag; // tag: // 0 = awaiting opening tag // 1 = either in the opening tag or closing parent // 2 = awaiting closing tag // 3 = either in the closing tag or opening child char ncn; // no closing needed if true char quo; // text is in quotes if true char ctg; // this is the closing tag if true int i, b; DataNode *o; DataNode *d; int ctgidx; // current index in ctgbuf int ctgbuflen; // buffer length of ctgbuf char *ctgbuf = 0; // ctgbuf: used to verify that closing tag matches opening tag name d = av_malloc(sizeof(*d)); memset(d, 0, sizeof(*d)); o = d; s = d->name; tag = ncn = quo = ctg = ctgidx = ctgbuflen = i = b = 0; while (1) { c = url_fgetc(p); if (c == 0 || c == EOF) break; parsetag: if (quo) { // we're in quoted text, tag changes don't matter if (c == '"') { // quote closed quo = 0; } goto writeoutput; } else if (c == '"') { // quote opened quo = 1; goto writeoutput; } if (c == '\n' || c == ' ' || c == '\t') continue; if (tag == 0) { // awaiting opening tag if (c == '<') { // opening tag tag = 1; d = ff_datanode_mknext(d); i = b = 0; s = d->name; continue; } } else if (tag == 1) { // in the opening tag if (c == '>') { // opening tag closed if (ncn) { // no closing tag needed ncn = 0; tag = 0; } else tag = 2; i = b = 0; s = d->value; continue; } else if (c == '/') { if (d->name) { // tag closed, no closing tag needed ncn = 1; continue; } else { // closing parent tag = 3; ctg = 1; ctgidx = 0; memset(ctgbuf, 0, ctgbuflen); d = d->parent; s = d->name; continue; } } else if (c == ' ') { // no longer tag name but attributes // ignore attributes by null-terminating string c = 0; } } else if (tag == 2) { // awaiting closing tag if (c == '<') { // closing tag tag = 3; i = b = 0; s = d->name; continue; } } else if (tag == 3) { // either in the closing tag or opening a new one if (ctg) { if (ctgidx >= ctgbuflen-1) { ctgbuflen += DATANODE_STR_BUFSIZE; ctgbuf = av_realloc(ctgbuf, ctgbuflen); } if (c == '>') { if (strncmp(d->name, ctgbuf, strlen(d->name)) || strlen(d->name) != ctgidx) { fprintf(stderr, "malformed closing tag for %s\n", d->name); // closing anyways } // closing tag closed ctg = 0; memset(ctgbuf, 0, ctgbuflen); ctgidx = 0; tag = 0; } else if (c == ' ') { // ignore spaces and material afterwards ctgbuf[ctgidx++] = 0; ctgbuf[ctgidx] = 0; } else { ctgbuf[ctgidx++] = c; ctgbuf[ctgidx] = 0; } continue; } if (c == '/') { // closing tag ctg = 1; continue; } else { // opening child tag tag = 1; d = ff_datanode_mkchild(d); i = b = 0; s = d->name; goto parsetag; } } writeoutput: if (i >= b-1) { b += DATANODE_STR_BUFSIZE; if (s == d->name) { s = av_realloc(s, b); d->name = s; } else if (s == d->value) { s = av_realloc(s, b); d->value = s; } } s[i++] = c; s[i] = 0; } return o; }
void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref) { *ref = f; f->refs = av_realloc(f->refs, sizeof(AVFilterFormats**) * ++f->refcount); f->refs[f->refcount-1] = ref; }
DataNode *ff_datanode_tree_from_ini(ByteIOContext *p) { int c; char *s; char e; int i, b; DataNode *o; DataNode *d; d = av_malloc(sizeof(*d)); memset(d, 0, sizeof(*d)); o = d; s = d->name; e = 1; i = b = 0; while (1) { c = url_fgetc(p); if (c == 0 || c == EOF) break; if (c == '\n') { d = ff_datanode_mknext(d); i = b = 0; s = d->name; e = 1; continue; } if (!e) { continue; } if (c == '#') { e = 0; continue; } if (c == '[') { if (d->parent) { d = d->parent; } d = ff_datanode_mknext(d); i = b = 0; s = d->name; continue; } if (c == ']') { d = ff_datanode_mkchild(d); i = b = 0; s = d->name; continue; } if (c == '=') { i = b = 0; s = d->value; continue; } if (i >= b-1) { b += DATANODE_STR_BUFSIZE; if (s == d->name) { s = av_realloc(s, b); d->name = s; } else if (s == d->value) { s = av_realloc(s, b); d->value = s; } } s[i++] = c; s[i] = 0; } return o; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + avpkt->size; KgvContext * const c = avctx->priv_data; int offsets[7]; uint16_t *out, *prev; int outcnt = 0, maxcnt; int w, h, i; if (avpkt->size < 2) return -1; w = (buf[0] + 1) * 8; h = (buf[1] + 1) * 8; buf += 2; if (av_image_check_size(w, h, 0, avctx)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); maxcnt = w * h; out = av_realloc(c->cur, w * h * 2); if (!out) return -1; c->cur = out; prev = av_realloc(c->prev, w * h * 2); if (!prev) return -1; c->prev = prev; for (i = 0; i < 7; i++) offsets[i] = -1; while (outcnt < maxcnt && buf_end - 2 > buf) { int code = AV_RL16(buf); buf += 2; if (!(code & 0x8000)) { out[outcnt++] = code; // rgb555 pixel coded directly } else { int count; uint16_t *inp; if ((code & 0x6000) == 0x6000) { // copy from previous frame int oidx = (code >> 10) & 7; int start; count = (code & 0x3FF) + 3; if (offsets[oidx] < 0) { if (buf_end - 3 < buf) break; offsets[oidx] = AV_RL24(buf); buf += 3; } start = (outcnt + offsets[oidx]) % maxcnt; if (maxcnt - start < count) break; inp = prev + start; } else { // copy from earlier in this frame int offset = (code & 0x1FFF) + 1; if (!(code & 0x6000)) { count = 2; } else if ((code & 0x6000) == 0x2000) { count = 3; } else { if (buf_end - 1 < buf) break; count = 4 + *buf++; } if (outcnt < offset) break; inp = out + outcnt - offset; } if (maxcnt - outcnt < count) break; for (i = 0; i < count; i++) out[outcnt++] = inp[i]; }
static av_cold int concat_open(URLContext *h, const char *uri, int flags) { char *node_uri = NULL; int err = 0; int64_t size; size_t len, i; URLContext *uc; struct concat_data *data = h->priv_data; struct concat_nodes *nodes; if (!av_strstart(uri, "concat:", &uri)) { av_log(h, AV_LOG_ERROR, "URL %s lacks prefix\n", uri); return AVERROR(EINVAL); } for (i = 0, len = 1; uri[i]; i++) { if (uri[i] == *AV_CAT_SEPARATOR) { /* integer overflow */ if (++len == UINT_MAX / sizeof(*nodes)) { av_freep(&h->priv_data); return AVERROR(ENAMETOOLONG); } } } if (!(nodes = av_realloc(NULL, sizeof(*nodes) * len))) return AVERROR(ENOMEM); else data->nodes = nodes; /* handle input */ if (!*uri) err = AVERROR(ENOENT); for (i = 0; *uri; i++) { /* parsing uri */ len = strcspn(uri, AV_CAT_SEPARATOR); if ((err = av_reallocp(&node_uri, len + 1)) < 0) break; av_strlcpy(node_uri, uri, len + 1); uri += len + strspn(uri + len, AV_CAT_SEPARATOR); /* creating URLContext */ err = ffurl_open_whitelist(&uc, node_uri, flags, &h->interrupt_callback, NULL, h->protocol_whitelist); if (err < 0) break; /* creating size */ if ((size = ffurl_size(uc)) < 0) { ffurl_close(uc); err = AVERROR(ENOSYS); break; } /* assembling */ nodes[i].uc = uc; nodes[i].size = size; } av_free(node_uri); data->length = i; if (err < 0) concat_close(h); else if (!(nodes = av_realloc(nodes, data->length * sizeof(*nodes)))) { concat_close(h); err = AVERROR(ENOMEM); } else data->nodes = nodes; return err; }
void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size) { if(min_size < *size) return ptr; *size = FFMAX(17*min_size/16+32, min_size); return av_realloc(ptr, *size); } // av_fast_realloc
int COMXAudioCodecOMX::GetData(BYTE** dst, double &dts, double &pts) { if (!m_bGotFrame) return 0; int inLineSize, outLineSize; /* input audio is aligned */ int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0); /* output audio will be packed */ int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1); if (!m_bNoConcatenate && m_iBufferOutputUsed && (int)m_frameSize != outputSize) { LOG_TRACE_2 << "COMXAudioCodecOMX::GetData Unexpected change of size (" << m_frameSize <<" ->" << outputSize << ")"; m_bNoConcatenate = true; } // if this buffer won't fit then flush out what we have int desired_size = AUDIO_DECODE_OUTPUT_BUFFER * (m_pCodecContext->channels * GetBitsPerSample()) >> (rounded_up_channels_shift[m_pCodecContext->channels] + 4); if (m_iBufferOutputUsed && (m_iBufferOutputUsed + outputSize > desired_size || m_bNoConcatenate)) { int ret = m_iBufferOutputUsed; m_iBufferOutputUsed = 0; m_bNoConcatenate = false; dts = m_dts; pts = m_pts; *dst = m_pBufferOutput; return ret; } m_frameSize = outputSize; if (m_iBufferOutputAlloced < m_iBufferOutputUsed + outputSize) { m_pBufferOutput = (BYTE*)av_realloc(m_pBufferOutput, m_iBufferOutputUsed + outputSize + FF_INPUT_BUFFER_PADDING_SIZE); m_iBufferOutputAlloced = m_iBufferOutputUsed + outputSize; } /* need to convert format */ if(m_pCodecContext->sample_fmt != m_desiredSampleFormat) { if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels)) { swr_free(&m_pConvert); m_channels = m_pCodecContext->channels; } if(!m_pConvert) { m_iSampleFormat = m_pCodecContext->sample_fmt; m_pConvert = swr_alloc_set_opts(NULL, av_get_default_channel_layout(m_pCodecContext->channels), m_desiredSampleFormat, m_pCodecContext->sample_rate, av_get_default_channel_layout(m_pCodecContext->channels), m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate, 0, NULL); if(!m_pConvert || swr_init(m_pConvert) < 0) { LOG_TRACE_2 << "COMXAudioCodecOMX::Decode - Unable to initialise convert format " << m_pCodecContext->sample_fmt << " to " << m_desiredSampleFormat; return 0; } } /* use unaligned flag to keep output packed */ uint8_t *out_planes[m_pCodecContext->channels]; if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0) { LOG_TRACE_2 << "COMXAudioCodecOMX::Decode - Unable to convert format " << (int)m_pCodecContext->sample_fmt << " to " << m_desiredSampleFormat; outputSize = 0; } } else { /* copy to a contiguous buffer */ uint8_t *out_planes[m_pCodecContext->channels]; if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 ) { outputSize = 0; } } m_bGotFrame = false; if (m_bFirstFrame) { char log_buf[512]; sprintf(log_buf, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d buf=%p, desired=%d", inputSize, outputSize, inLineSize, outLineSize, m_pBufferOutput, desired_size); LOG_TRACE_2 << log_buf; m_bFirstFrame = false; } m_iBufferOutputUsed += outputSize; return 0; }
static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) { SmackerContext *smk = s->priv_data; int flags; int ret; int i; int frame_size = 0; int palchange = 0; int pos; if (url_feof(s->pb) || smk->cur_frame >= smk->frames) return AVERROR_EOF; /* if we demuxed all streams, pass another frame */ if(smk->curstream < 0) { avio_seek(s->pb, smk->nextpos, 0); frame_size = smk->frm_size[smk->cur_frame] & (~3); flags = smk->frm_flags[smk->cur_frame]; /* handle palette change event */ pos = avio_tell(s->pb); if(flags & SMACKER_PAL){ int size, sz, t, off, j, pos; uint8_t *pal = smk->pal; uint8_t oldpal[768]; memcpy(oldpal, pal, 768); size = avio_r8(s->pb); size = size * 4 - 1; frame_size -= size; frame_size--; sz = 0; pos = avio_tell(s->pb) + size; while(sz < 256){ t = avio_r8(s->pb); if(t & 0x80){ /* skip palette entries */ sz += (t & 0x7F) + 1; pal += ((t & 0x7F) + 1) * 3; } else if(t & 0x40){ /* copy with offset */ off = avio_r8(s->pb) * 3; j = (t & 0x3F) + 1; while(j-- && sz < 256) { *pal++ = oldpal[off + 0]; *pal++ = oldpal[off + 1]; *pal++ = oldpal[off + 2]; sz++; off += 3; } } else { /* new entries */ *pal++ = smk_pal[t]; *pal++ = smk_pal[avio_r8(s->pb) & 0x3F]; *pal++ = smk_pal[avio_r8(s->pb) & 0x3F]; sz++; } } avio_seek(s->pb, pos, 0); palchange |= 1; } flags >>= 1; smk->curstream = -1; /* if audio chunks are present, put them to stack and retrieve later */ for(i = 0; i < 7; i++) { if(flags & 1) { int size; size = avio_rl32(s->pb) - 4; frame_size -= size; frame_size -= 4; smk->curstream++; smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size); smk->buf_sizes[smk->curstream] = size; ret = avio_read(s->pb, smk->bufs[smk->curstream], size); if(ret != size) return AVERROR(EIO); smk->stream_id[smk->curstream] = smk->indexes[i]; } flags >>= 1; } if (av_new_packet(pkt, frame_size + 768)) return AVERROR(ENOMEM); if(smk->frm_size[smk->cur_frame] & 1) palchange |= 2; pkt->data[0] = palchange; memcpy(pkt->data + 1, smk->pal, 768); ret = avio_read(s->pb, pkt->data + 769, frame_size); if(ret != frame_size) return AVERROR(EIO); pkt->stream_index = smk->videoindex; pkt->size = ret + 769; smk->cur_frame++; smk->nextpos = avio_tell(s->pb); } else {
static int fourxm_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int header_size; FourxmDemuxContext *fourxm = s->priv_data; unsigned char *header; int i, ret; AVStream *st; fourxm->track_count = 0; fourxm->tracks = NULL; fourxm->fps = 1.0; /* skip the first 3 32-bit numbers */ avio_skip(pb, 12); /* check for LIST-HEAD */ GET_LIST_HEADER(); header_size = size - 4; if (fourcc_tag != HEAD_TAG || header_size < 0) return AVERROR_INVALIDDATA; /* allocate space for the header and load the whole thing */ header = av_malloc(header_size); if (!header) return AVERROR(ENOMEM); if (avio_read(pb, header, header_size) != header_size){ av_free(header); return AVERROR(EIO); } /* take the lazy approach and search for any and all vtrk and strk chunks */ for (i = 0; i < header_size - 8; i++) { fourcc_tag = AV_RL32(&header[i]); size = AV_RL32(&header[i + 4]); if (fourcc_tag == std__TAG) { fourxm->fps = av_int2float(AV_RL32(&header[i + 12])); } else if (fourcc_tag == vtrk_TAG) { /* check that there is enough data */ if (size != vtrk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } fourxm->width = AV_RL32(&header[i + 36]); fourxm->height = AV_RL32(&header[i + 40]); /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st){ ret= AVERROR(ENOMEM); goto fail; } avpriv_set_pts_info(st, 60, 1, fourxm->fps); fourxm->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_4XM; st->codec->extradata_size = 4; st->codec->extradata = av_malloc(4); AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16])); st->codec->width = fourxm->width; st->codec->height = fourxm->height; i += 8 + size; } else if (fourcc_tag == strk_TAG) { int current_track; /* check that there is enough data */ if (size != strk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } current_track = AV_RL32(&header[i + 8]); if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){ av_log(s, AV_LOG_ERROR, "current_track too large\n"); ret= -1; goto fail; } if (current_track + 1 > fourxm->track_count) { fourxm->tracks = av_realloc(fourxm->tracks, (current_track + 1) * sizeof(AudioTrack)); if (!fourxm->tracks) { ret = AVERROR(ENOMEM); goto fail; } memset(&fourxm->tracks[fourxm->track_count], 0, sizeof(AudioTrack) * (current_track + 1 - fourxm->track_count)); fourxm->track_count = current_track + 1; } fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]); fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]); fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]); fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]); fourxm->tracks[current_track].audio_pts = 0; if( fourxm->tracks[current_track].channels <= 0 || fourxm->tracks[current_track].sample_rate <= 0 || fourxm->tracks[current_track].bits < 0){ av_log(s, AV_LOG_ERROR, "audio header invalid\n"); ret= -1; goto fail; } i += 8 + size; /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st){ ret= AVERROR(ENOMEM); goto fail; } st->id = current_track; avpriv_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate); fourxm->tracks[current_track].stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 0; st->codec->channels = fourxm->tracks[current_track].channels; st->codec->sample_rate = fourxm->tracks[current_track].sample_rate; st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; if (fourxm->tracks[current_track].adpcm){ st->codec->codec_id = CODEC_ID_ADPCM_4XM; }else if (st->codec->bits_per_coded_sample == 8){ st->codec->codec_id = CODEC_ID_PCM_U8; }else st->codec->codec_id = CODEC_ID_PCM_S16LE; } } /* skip over the LIST-MOVI chunk (which is where the stream should be */ GET_LIST_HEADER(); if (fourcc_tag != MOVI_TAG){ ret= AVERROR_INVALIDDATA; goto fail; } av_free(header); /* initialize context members */ fourxm->video_pts = -1; /* first frame will push to 0 */ return 0; fail: av_freep(&fourxm->tracks); av_free(header); return ret; }
static int fourxm_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = &s->pb; unsigned int fourcc_tag; unsigned int size; int header_size; FourxmDemuxContext *fourxm = (FourxmDemuxContext *)s->priv_data; unsigned char *header; int i; int current_track = -1; AVStream *st; fourxm->track_count = 0; fourxm->tracks = NULL; fourxm->selected_track = 0; fourxm->fps = 1.0; /* skip the first 3 32-bit numbers */ url_fseek(pb, 12, SEEK_CUR); /* check for LIST-HEAD */ GET_LIST_HEADER(); header_size = size - 4; if (fourcc_tag != HEAD_TAG) return AVERROR_INVALIDDATA; /* allocate space for the header and load the whole thing */ header = av_malloc(header_size); if (!header) return AVERROR_NOMEM; if (get_buffer(pb, header, header_size) != header_size) return AVERROR_IO; /* take the lazy approach and search for any and all vtrk and strk chunks */ for (i = 0; i < header_size - 8; i++) { fourcc_tag = LE_32(&header[i]); size = LE_32(&header[i + 4]); if (fourcc_tag == std__TAG) { fourxm->fps = get_le_float(&header[i + 12]); fourxm->video_pts_inc = (int)(90000.0 / fourxm->fps); } else if (fourcc_tag == vtrk_TAG) { /* check that there is enough data */ if (size != vtrk_SIZE) { av_free(header); return AVERROR_INVALIDDATA; } fourxm->width = LE_32(&header[i + 36]); fourxm->height = LE_32(&header[i + 40]); i += 8 + size; /* allocate a new AVStream */ st = av_new_stream(s, 0); if (!st) return AVERROR_NOMEM; av_set_pts_info(st, 33, 1, 90000); fourxm->video_stream_index = st->index; st->codec.frame_rate = fourxm->fps; st->codec.frame_rate_base = 1.0; st->codec.codec_type = CODEC_TYPE_VIDEO; st->codec.codec_id = CODEC_ID_4XM; st->codec.codec_tag = 0; /* no fourcc */ st->codec.width = fourxm->width; st->codec.height = fourxm->height; } else if (fourcc_tag == strk_TAG) { /* check that there is enough data */ if (size != strk_SIZE) { av_free(header); return AVERROR_INVALIDDATA; } current_track = LE_32(&header[i + 8]); if (current_track + 1 > fourxm->track_count) { fourxm->track_count = current_track + 1; fourxm->tracks = av_realloc(fourxm->tracks, fourxm->track_count * sizeof(AudioTrack)); if (!fourxm->tracks) { av_free(header); return AVERROR_NOMEM; } } fourxm->tracks[current_track].adpcm = LE_32(&header[i + 12]); fourxm->tracks[current_track].channels = LE_32(&header[i + 36]); fourxm->tracks[current_track].sample_rate = LE_32(&header[i + 40]); fourxm->tracks[current_track].bits = LE_32(&header[i + 44]); i += 8 + size; /* allocate a new AVStream */ st = av_new_stream(s, current_track); if (!st) return AVERROR_NOMEM; /* set the pts reference (1 pts = 1/90000) */ av_set_pts_info(st, 33, 1, 90000); fourxm->tracks[current_track].stream_index = st->index; st->codec.codec_type = CODEC_TYPE_AUDIO; st->codec.codec_tag = 1; st->codec.channels = fourxm->tracks[current_track].channels; st->codec.sample_rate = fourxm->tracks[current_track].sample_rate; st->codec.bits_per_sample = fourxm->tracks[current_track].bits; st->codec.bit_rate = st->codec.channels * st->codec.sample_rate * st->codec.bits_per_sample; st->codec.block_align = st->codec.channels * st->codec.bits_per_sample; if (fourxm->tracks[current_track].adpcm) st->codec.codec_id = CODEC_ID_ADPCM_4XM; else if (st->codec.bits_per_sample == 8) st->codec.codec_id = CODEC_ID_PCM_U8; else st->codec.codec_id = CODEC_ID_PCM_S16LE; } } av_free(header); /* skip over the LIST-MOVI chunk (which is where the stream should be */ GET_LIST_HEADER(); if (fourcc_tag != MOVI_TAG) return AVERROR_INVALIDDATA; /* initialize context members */ fourxm->video_pts = -fourxm->video_pts_inc; /* first frame will push to 0 */ fourxm->audio_pts = 0; return 0; }
void *av_realloc_array(void *ptr, size_t nmemb, size_t size) { if (!size || nmemb >= INT_MAX / size) return NULL; return av_realloc(ptr, nmemb * size); }
/***************************************************************************** * DecodeAudio: Called to decode one frame *****************************************************************************/ aout_buffer_t * DecodeAudio ( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; int i_used, i_output; aout_buffer_t *p_buffer; block_t *p_block; AVPacket pkt; if( !pp_block || !*pp_block ) return NULL; p_block = *pp_block; if( !p_sys->p_context->extradata_size && p_dec->fmt_in.i_extra && p_sys->b_delayed_open) { InitDecoderConfig( p_dec, p_sys->p_context); if( ffmpeg_OpenCodec( p_dec ) ) msg_Err( p_dec, "Cannot open decoder %s", p_sys->psz_namecodec ); } if( p_sys->b_delayed_open ) { block_Release( p_block ); return NULL; } if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) ) { block_Release( p_block ); avcodec_flush_buffers( p_sys->p_context ); p_sys->i_samples = 0; date_Set( &p_sys->end_date, 0 ); if( p_sys->i_codec_id == CODEC_ID_MP2 || p_sys->i_codec_id == CODEC_ID_MP3 ) p_sys->i_reject_count = 3; return NULL; } if( p_sys->i_samples > 0 ) { /* More data */ p_buffer = SplitBuffer( p_dec ); if( !p_buffer ) block_Release( p_block ); return p_buffer; } if( !date_Get( &p_sys->end_date ) && !p_block->i_pts ) { /* We've just started the stream, wait for the first PTS. */ block_Release( p_block ); return NULL; } if( p_block->i_buffer <= 0 ) { block_Release( p_block ); return NULL; } if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 ) { *pp_block = p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE ); if( !p_block ) return NULL; p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE; memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE ); p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED; } do { i_output = __MAX( p_block->i_buffer, p_sys->i_output_max ); if( i_output > p_sys->i_output_max ) { /* Grow output buffer if necessary (eg. for PCM data) */ p_sys->p_output = av_realloc( p_sys->p_output, i_output ); } av_init_packet( &pkt ); pkt.data = p_block->p_buffer; pkt.size = p_block->i_buffer; i_used = avcodec_decode_audio3( p_sys->p_context, (int16_t*)p_sys->p_output, &i_output, &pkt ); if( i_used < 0 || i_output < 0 ) { if( i_used < 0 ) msg_Warn( p_dec, "cannot decode one frame (%zu bytes)", p_block->i_buffer ); block_Release( p_block ); return NULL; } else if( (size_t)i_used > p_block->i_buffer ) { i_used = p_block->i_buffer; } p_block->i_buffer -= i_used; p_block->p_buffer += i_used; } while( p_block->i_buffer > 0 && i_output <= 0 ); if( p_sys->p_context->channels <= 0 || p_sys->p_context->channels > 8 || p_sys->p_context->sample_rate <= 0 ) { msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d", p_sys->p_context->channels, p_sys->p_context->sample_rate ); block_Release( p_block ); return NULL; } if( p_dec->fmt_out.audio.i_rate != (unsigned int)p_sys->p_context->sample_rate ) { date_Init( &p_sys->end_date, p_sys->p_context->sample_rate, 1 ); date_Set( &p_sys->end_date, p_block->i_pts ); } /* **** Set audio output parameters **** */ SetupOutputFormat( p_dec, true ); if( p_block->i_pts != 0 && p_block->i_pts != date_Get( &p_sys->end_date ) ) { date_Set( &p_sys->end_date, p_block->i_pts ); } p_block->i_pts = 0; /* **** Now we can output these samples **** */ p_sys->i_samples = i_output / (p_dec->fmt_out.audio.i_bitspersample / 8) / p_sys->p_context->channels; p_sys->p_samples = p_sys->p_output; /* Silent unwanted samples */ if( p_sys->i_reject_count > 0 ) { memset( p_sys->p_output, 0, i_output ); p_sys->i_reject_count--; } p_buffer = SplitBuffer( p_dec ); if( !p_buffer ) block_Release( p_block ); return p_buffer; }
int ac_decode_audio_package(lp_ac_package pPackage, lp_ac_audio_decoder pDecoder, lp_ac_decoder pDec) { double pts; //Variables describing the destination buffer int dest_buffer_pos = pDecoder->decoder.buffer_size; //Make a copy of the package read by avformat, so that we can move the data pointers around AVPacket pkt_tmp = ((lp_ac_package_data)pPackage)->ffpackage; AVFrame *decoded_frame = NULL; if (pDecoder->tmp_data_length > 0) { pkt_tmp.data = av_realloc(pkt_tmp.data, pkt_tmp.size + pDecoder->tmp_data_length); pkt_tmp.size = pkt_tmp.size + pDecoder->tmp_data_length; memcpy(pkt_tmp.data + pDecoder->tmp_data_length, &pkt_tmp.data, pkt_tmp.size); memcpy(&pkt_tmp.data, pDecoder->tmp_data, pDecoder->tmp_data_length); } while (pkt_tmp.size > 0) { if (!decoded_frame) { if (!(decoded_frame = avcodec_alloc_frame())) { return 0; } } else avcodec_get_frame_defaults(decoded_frame); int got_frame = 0; int len1 = avcodec_decode_audio4(pDecoder->pCodecCtx, decoded_frame, &got_frame, &pkt_tmp); //If an error occured, skip the frame if (len1 < 0){ return 0; } //Increment the source buffer pointers pkt_tmp.size -= len1; pkt_tmp.data += len1; if (got_frame){ int data_size = av_samples_get_buffer_size(NULL, (pDecoder->pCodecCtx)->channels, decoded_frame->nb_samples, (pDecoder->pCodecCtx)->sample_fmt, 1); //Reserve enough memory for coping the result data if (dest_buffer_pos + data_size > pDecoder->max_buffer_size) { pDecoder->decoder.pBuffer = av_realloc(pDecoder->decoder.pBuffer, dest_buffer_pos + data_size); pDecoder->max_buffer_size = dest_buffer_pos + data_size; } memcpy(pDecoder->decoder.pBuffer + dest_buffer_pos, decoded_frame->data[0], data_size); //Increment the destination buffer pointers, copy the result to the output buffer dest_buffer_pos += data_size; pDecoder->decoder.buffer_size += data_size; pts=0; if(((lp_ac_package_data)pPackage)->ffpackage.dts != AV_NOPTS_VALUE){ pts = ((lp_ac_package_data)pPackage)->ffpackage.dts * av_q2d(((lp_ac_data)pDec->pacInstance)->pFormatCtx->streams[pPackage->stream_index]->time_base); pDec->video_clock = pts; } else { pts = pDec->video_clock; } double bytes_per_second = 2 * pDec->stream_info.audio_info.samples_per_second * pDec->stream_info.audio_info.channel_count; if (bytes_per_second > 0) pDec->video_clock += data_size / bytes_per_second; pDec->timecode = pts; if (pkt_tmp.size > 0) { pDecoder->tmp_data = av_malloc(pkt_tmp.size); memcpy(pDecoder->tmp_data, &pkt_tmp.data, pkt_tmp.size); } else { av_free(pDecoder->tmp_data); pDecoder->tmp_data_length = 0; } return 1; } } av_free(pDecoder->tmp_data); pDecoder->tmp_data_length; return 0; }
static int shape_text(AVFilterContext *ctx) { DrawTextContext *s = ctx->priv; uint8_t *tmp; int ret = AVERROR(ENOMEM); static const FriBidiFlags flags = FRIBIDI_FLAGS_DEFAULT | FRIBIDI_FLAGS_ARABIC; FriBidiChar *unicodestr = NULL; FriBidiStrIndex len; FriBidiParType direction = FRIBIDI_PAR_LTR; FriBidiStrIndex line_start = 0; FriBidiStrIndex line_end = 0; FriBidiLevel *embedding_levels = NULL; FriBidiArabicProp *ar_props = NULL; FriBidiCharType *bidi_types = NULL; FriBidiStrIndex i,j; len = strlen(s->text); if (!(unicodestr = av_malloc_array(len, sizeof(*unicodestr)))) { goto out; } len = fribidi_charset_to_unicode(FRIBIDI_CHAR_SET_UTF8, s->text, len, unicodestr); bidi_types = av_malloc_array(len, sizeof(*bidi_types)); if (!bidi_types) { goto out; } fribidi_get_bidi_types(unicodestr, len, bidi_types); embedding_levels = av_malloc_array(len, sizeof(*embedding_levels)); if (!embedding_levels) { goto out; } if (!fribidi_get_par_embedding_levels(bidi_types, len, &direction, embedding_levels)) { goto out; } ar_props = av_malloc_array(len, sizeof(*ar_props)); if (!ar_props) { goto out; } fribidi_get_joining_types(unicodestr, len, ar_props); fribidi_join_arabic(bidi_types, len, embedding_levels, ar_props); fribidi_shape(flags, embedding_levels, len, ar_props, unicodestr); for (line_end = 0, line_start = 0; line_end < len; line_end++) { if (is_newline(unicodestr[line_end]) || line_end == len - 1) { if (!fribidi_reorder_line(flags, bidi_types, line_end - line_start + 1, line_start, direction, embedding_levels, unicodestr, NULL)) { goto out; } line_start = line_end + 1; } } /* Remove zero-width fill chars put in by libfribidi */ for (i = 0, j = 0; i < len; i++) if (unicodestr[i] != FRIBIDI_CHAR_FILL) unicodestr[j++] = unicodestr[i]; len = j; if (!(tmp = av_realloc(s->text, (len * 4 + 1) * sizeof(*s->text)))) { /* Use len * 4, as a unicode character can be up to 4 bytes in UTF-8 */ goto out; } s->text = tmp; len = fribidi_unicode_to_charset(FRIBIDI_CHAR_SET_UTF8, unicodestr, len, s->text); ret = 0; out: av_free(unicodestr); av_free(embedding_levels); av_free(ar_props); av_free(bidi_types); return ret; }
// --------------------------------------------------------------------------------- static PyObject * Decoder_Decode( PyACodecObject* obj, PyObject *args) { unsigned char* sData; void* pBuf; PyAFrameObject *cFrame= NULL; int iLen, out_size, len, iBufSize, iPos= 0; if (!PyArg_ParseTuple(args, "s#:decode", &sData, &iLen )) return NULL; // Get the header data first //need to add padding to buffer for libavcodec if( !Codec_AdjustPadBuffer( obj, iLen ) ) { PyErr_NoMemory(); return NULL; } memcpy( obj->pPaddedBuf, sData, iLen); sData=(uint8_t*)obj->pPaddedBuf; // Realloc memory iBufSize= AVCODEC_MAX_AUDIO_FRAME_SIZE* 2; pBuf= av_malloc( iBufSize ); if( !pBuf ) { PyErr_NoMemory(); return NULL; } while( iLen> 0 ) { if( iBufSize- iPos< AVCODEC_MAX_AUDIO_FRAME_SIZE ) { pBuf= av_realloc( pBuf, iBufSize+ AVCODEC_MAX_AUDIO_FRAME_SIZE* 2 ); if( !pBuf ) { PyErr_NoMemory(); return NULL; } iBufSize+= AVCODEC_MAX_AUDIO_FRAME_SIZE* 2; } out_size= 0; len= obj->cCodec->codec->decode( obj->cCodec, (char*)pBuf+ iPos, &out_size, sData, iLen ); if( len < 0 ) { // Need to report out the error( it should be in the error list ) /*while( g_AvilibErr[ i ].iErrCode ) if( g_AvilibErr[ i ].iErrCode== len ) { PyErr_SetString(g_cErr, g_AvilibErr[ i ].sErrDesc ); return NULL; } else i++; */ PyErr_Format(g_cErr, "Unspecified error %d. Cannot find any help text for it.", len ); av_free( pBuf ); return NULL; } else { iLen-= len; sData+= len; if( out_size> 0 ) { iPos+= out_size; if( cFrame ) { cFrame->cData->pData= pBuf; cFrame->cData->iLen= iPos; } else { PyACStringObject* cRes; cFrame= (PyAFrameObject*)PyObject_New( PyAFrameObject, &FrameType ); if( !cFrame ) return NULL; cRes= ACString_New( pBuf, out_size ); if( !cRes ) return NULL; cFrame->bit_rate= obj->cCodec->bit_rate; cFrame->sample_rate= obj->cCodec->sample_rate; cFrame->bits_per_sample= obj->cCodec->bits_per_sample; cFrame->channels= obj->cCodec->channels; cFrame->cData= cRes; } } } } #ifdef HAVE_MMX emms(); #endif if( !cFrame ) free( pBuf ); else return (PyObject*)cFrame; if( out_size ) // Raise an error if data was found but no frames created return NULL; RETURN_NONE }