uint32_t av_q2intfloat(AVRational q) { int64_t n; int shift; int sign = 0; if (q.den < 0) { q.den *= -1; q.num *= -1; } if (q.num < 0) { q.num *= -1; sign = 1; } if (!q.num && !q.den) return 0xFFC00000; if (!q.num) return 0; if (!q.den) return 0x7F800000 | (q.num & 0x80000000); shift = 23 + av_log2(q.den) - av_log2(q.num); if (shift >= 0) n = av_rescale(q.num, 1LL<<shift, q.den); else n = av_rescale(q.num, 1, ((int64_t)q.den) << -shift); shift -= n >= (1<<24); shift += n < (1<<23); if (shift >= 0) n = av_rescale(q.num, 1LL<<shift, q.den); else n = av_rescale(q.num, 1, ((int64_t)q.den) << -shift); av_assert1(n < (1<<24)); av_assert1(n >= (1<<23)); return sign<<31 | (150-shift)<<23 | (n - (1<<23)); }
int OutputProcessor::packageAudio(unsigned char* inBuff, int inBuffLen, unsigned char* outBuff, long int pts) { if (audioPackager == 0) { ELOG_DEBUG("No se ha inicializado el codec de output audio RTP"); return -1; } timeval time; gettimeofday(&time, NULL); long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); RtpHeader head; head.setSeqNumber(audioSeqnum_++); // head.setTimestamp(millis*8); head.setMarker(1); if (pts==0){ // head.setTimestamp(audioSeqnum_*160); head.setTimestamp(av_rescale(audioSeqnum_, (mediaInfo.audioCodec.sampleRate/1000), 1)); }else{ // head.setTimestamp(pts*8); head.setTimestamp(av_rescale(pts, mediaInfo.audioCodec.sampleRate,1000)); } head.setSSRC(44444); head.setPayloadType(mediaInfo.rtpAudioInfo.PT); // memcpy (rtpAudioBuffer_, &head, head.getHeaderLength()); // memcpy(&rtpAudioBuffer_[head.getHeaderLength()], inBuff, inBuffLen); memcpy (outBuff, &head, head.getHeaderLength()); memcpy(&outBuff[head.getHeaderLength()], inBuff, inBuffLen); // sink_->sendData(rtpBuffer_, l); // rtpReceiver_->receiveRtpData(rtpBuffer_, (inBuffLen + RTP_HEADER_LEN)); return (inBuffLen+head.getHeaderLength()); }
static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap, struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps) { dc1394_data* dc1394 = c->priv_data; AVStream* vst; struct dc1394_frame_format *fmt; struct dc1394_frame_rate *fps; enum PixelFormat pix_fmt = ap->pix_fmt == PIX_FMT_NONE ? PIX_FMT_UYVY422 : ap->pix_fmt; /* defaults */ int width = !ap->width ? 320 : ap->width; int height = !ap->height ? 240 : ap->height; int frame_rate = !ap->time_base.num ? 30000 : av_rescale(1000, ap->time_base.den, ap->time_base.num); for (fmt = dc1394_frame_formats; fmt->width; fmt++) if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height) break; for (fps = dc1394_frame_rates; fps->frame_rate; fps++) if (fps->frame_rate == frame_rate) break; if (!fps->frame_rate || !fmt->width) { av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", avcodec_get_pix_fmt_name(pix_fmt), width, height, frame_rate); goto out; } /* create a video stream */ vst = av_new_stream(c, 0); if (!vst) goto out; av_set_pts_info(vst, 64, 1, 1000); vst->codec->codec_type = CODEC_TYPE_VIDEO; vst->codec->codec_id = CODEC_ID_RAWVIDEO; vst->codec->time_base.den = fps->frame_rate; vst->codec->time_base.num = 1000; vst->codec->width = fmt->width; vst->codec->height = fmt->height; vst->codec->pix_fmt = fmt->pix_fmt; /* packet init */ av_init_packet(&dc1394->packet); dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); dc1394->packet.stream_index = vst->index; dc1394->packet.flags |= PKT_FLAG_KEY; dc1394->current_frame = 0; dc1394->fps = fps->frame_rate; vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000); *select_fps = fps; *select_fmt = fmt; return 0; out: return -1; }
void ExternalInput::receiveLoop() { av_read_play(context_); // play RTSP int gotDecodedFrame = 0; int length; startTime_ = av_gettime(); ELOG_DEBUG("Start playing external input %s", url_.c_str() ); while (av_read_frame(context_, &avpacket_) >= 0&& running_ == true) { AVPacket orig_pkt = avpacket_; if (needTranscoding_) { if (avpacket_.stream_index == video_stream_index_) { // packet is video inCodec_.decodeVideo(avpacket_.data, avpacket_.size, decodedBuffer_.get(), bufflen_, &gotDecodedFrame); RawDataPacket packetR; if (gotDecodedFrame) { packetR.data = decodedBuffer_.get(); packetR.length = bufflen_; packetR.type = VIDEO; queueMutex_.lock(); packetQueue_.push(packetR); queueMutex_.unlock(); gotDecodedFrame = 0; } } } else { if (avpacket_.stream_index == video_stream_index_) { // packet is video // av_rescale(input, new_scale, old_scale) int64_t pts = av_rescale(lastPts_, 1000000, (long int) video_time_base_); // NOLINT int64_t now = av_gettime() - startTime_; if (pts > now) { av_usleep(pts - now); } lastPts_ = avpacket_.pts; op_->packageVideo(avpacket_.data, avpacket_.size, decodedBuffer_.get(), avpacket_.pts); } else if (avpacket_.stream_index == audio_stream_index_) { // packet is audio int64_t pts = av_rescale(lastAudioPts_, 1000000, (long int)audio_time_base_); // NOLINT int64_t now = av_gettime() - startTime_; if (pts > now) { av_usleep(pts - now); } lastAudioPts_ = avpacket_.pts; length = op_->packageAudio(avpacket_.data, avpacket_.size, decodedBuffer_.get(), avpacket_.pts); if (length > 0) { audioSink_->deliverAudioData(reinterpret_cast<char*>(decodedBuffer_.get()), length); } } } av_free_packet(&orig_pkt); } ELOG_DEBUG("Ended stream to play %s", url_.c_str()); running_ = false; av_read_pause(context_); }
int ff_pcm_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st; int block_align, byte_rate; int64_t pos, ret; st = s->streams[0]; block_align = st->codecpar->block_align ? st->codecpar->block_align : (av_get_bits_per_sample(st->codecpar->codec_id) * st->codecpar->channels) >> 3; byte_rate = st->codecpar->bit_rate ? st->codecpar->bit_rate >> 3 : block_align * st->codecpar->sample_rate; if (block_align <= 0 || byte_rate <= 0) return -1; if (timestamp < 0) timestamp = 0; /* compute the position by aligning it to block_align */ pos = av_rescale_rnd(timestamp * byte_rate, st->time_base.num, st->time_base.den * (int64_t)block_align, (flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP); pos *= block_align; /* recompute exact position */ st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num); if ((ret = avio_seek(s->pb, pos + s->internal->data_offset, SEEK_SET)) < 0) return ret; return 0; }
//Seek function int CALL_CONVT ac_seek(lp_ac_decoder pDecoder, int dir, int64_t target_pos) { AVRational timebase = ((lp_ac_data)pDecoder->pacInstance)->pFormatCtx->streams[pDecoder->stream_index]->time_base; int flags = dir < 0 ? AVSEEK_FLAG_BACKWARD : 0; int64_t pos = av_rescale(target_pos, AV_TIME_BASE, 1000); ((lp_ac_decoder_data)pDecoder)->sought = 100; pDecoder->timecode = target_pos / 1000; if (av_seek_frame(((lp_ac_data)pDecoder->pacInstance)->pFormatCtx, pDecoder->stream_index, av_rescale_q(pos, AV_TIME_BASE_Q, timebase), flags) >= 0) { if (pDecoder->type == AC_DECODER_TYPE_AUDIO) { if (((lp_ac_audio_decoder)pDecoder)->pCodecCtx->codec->flush != NULL) avcodec_flush_buffers(((lp_ac_audio_decoder)pDecoder)->pCodecCtx); av_free(((lp_ac_audio_decoder)pDecoder)->tmp_data); ((lp_ac_audio_decoder)pDecoder)->tmp_data_length = 0; } return 1; } return 0; }
void AVFile::seekToPercent(float p) { if (0. < p && p < 1.) { AVStream * s = formatCtx->streams[audioStream]; _seek_to = av_rescale(p * formatCtx->duration, s->time_base.den, AV_TIME_BASE * s->time_base.num); } }
static int wav_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; WAVContext *wav = s->priv_data; int64_t file_size; put_flush_packet(pb); if (!url_is_streamed(s->pb)) { ff_end_tag(pb, wav->data); /* update file size */ file_size = url_ftell(pb); url_fseek(pb, 4, SEEK_SET); put_le32(pb, (uint32_t)(file_size - 8)); url_fseek(pb, file_size, SEEK_SET); put_flush_packet(pb); if(s->streams[0]->codec->codec_tag != 0x01) { /* Update num_samps in fact chunk */ int number_of_samples; number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration, s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num, s->streams[0]->time_base.den); url_fseek(pb, wav->data-12, SEEK_SET); put_le32(pb, number_of_samples); url_fseek(pb, file_size, SEEK_SET); put_flush_packet(pb); } } return 0; }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; LADSPAContext *s = ctx->priv; AVFrame *out; int64_t t; int i; if (ctx->nb_inputs) return ff_request_frame(ctx->inputs[0]); t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate); if (s->duration >= 0 && t >= s->duration) return AVERROR_EOF; out = ff_get_audio_buffer(outlink, s->nb_samples); if (!out) return AVERROR(ENOMEM); for (i = 0; i < s->nb_outputs; i++) s->desc->connect_port(s->handles[0], s->opmap[i], (LADSPA_Data*)out->extended_data[i]); s->desc->run(s->handles[0], s->nb_samples); for (i = 0; i < s->nb_outputcontrols; i++) print_ctl_info(ctx, AV_LOG_INFO, s, i, s->ocmap, s->octlv, 1); out->sample_rate = s->sample_rate; out->pts = s->pts; s->pts += s->nb_samples; return ff_filter_frame(outlink, out); }
static int w64_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; WAVMuxContext *wav = s->priv_data; int64_t file_size; if (pb->seekable) { end_guid(pb, wav->data); file_size = avio_tell(pb); avio_seek(pb, 16, SEEK_SET); avio_wl64(pb, file_size); if (s->streams[0]->codec->codec_tag != 0x01) { int64_t number_of_samples; number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration, s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num, s->streams[0]->time_base.den); avio_seek(pb, wav->fact_pos + 24, SEEK_SET); avio_wl64(pb, number_of_samples); } avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } return 0; }
static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size) { GXFStreamContext *sc = &ctx->streams[pkt->stream_index]; int64_t dts = av_rescale(pkt->dts, ctx->sample_rate, sc->sample_rate); put_byte(pb, sc->media_type); put_byte(pb, sc->index); put_be32(pb, dts); if (sc->codec->codec_type == CODEC_TYPE_AUDIO) { put_be16(pb, 0); put_be16(pb, size / 2); } else if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO) { int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size); if (frame_type == FF_I_TYPE) { put_byte(pb, 0x0d); sc->iframes++; } else if (frame_type == FF_B_TYPE) { put_byte(pb, 0x0f); sc->bframes++; } else { put_byte(pb, 0x0e); sc->pframes++; } put_be24(pb, size); } else if (sc->codec->codec_id == CODEC_ID_DVVIDEO) { put_byte(pb, size / 4096); put_be24(pb, 0); } else put_be32(pb, size); put_be32(pb, dts); put_byte(pb, 1); /* flags */ put_byte(pb, 0); /* reserved */ return 16; }
/** * This was the second switch in rtp_parse packet. * Normalizes time, if required, sets stream_index, etc. */ static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp) { if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE) return; /* Timestamp already set by depacketizer */ if (timestamp == RTP_NOTS_VALUE) return; if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE && s->ic->nb_streams > 1) { int64_t addend; int delta_timestamp; /* compute pts from timestamp with received ntp_time */ delta_timestamp = timestamp - s->last_rtcp_timestamp; /* convert to the PTS timebase */ addend = av_rescale(s->last_rtcp_ntp_time - s->first_rtcp_ntp_time, s->st->time_base.den, (uint64_t) s->st->time_base.num << 32); pkt->pts = s->range_start_offset + s->rtcp_ts_offset + addend + delta_timestamp; return; } if (!s->base_timestamp) s->base_timestamp = timestamp; /* assume that the difference is INT32_MIN < x < INT32_MAX, * but allow the first timestamp to exceed INT32_MAX */ if (!s->timestamp) s->unwrapped_timestamp += timestamp; else s->unwrapped_timestamp += (int32_t)(timestamp - s->timestamp); s->timestamp = timestamp; pkt->pts = s->unwrapped_timestamp + s->range_start_offset - s->base_timestamp; }
static uint32_t parse_peak(const uint8_t *peak) { int64_t val = 0; int64_t scale = 1; if (!peak) return 0; peak += strspn(peak, " \t"); if (peak[0] == '1' && peak[1] == '.') return UINT32_MAX; else if (!(peak[0] == '0' && peak[1] == '.')) return 0; peak += 2; while (av_isdigit(*peak)) { int digit = *peak - '0'; if (scale > INT64_MAX / 10) break; val = 10 * val + digit; scale *= 10; peak++; } return av_rescale(val, UINT32_MAX, scale); }
static int wav_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; WAVMuxContext *wav = s->priv_data; int64_t file_size; avio_flush(pb); if (s->pb->seekable) { ff_end_tag(pb, wav->data); /* update file size */ file_size = avio_tell(pb); avio_seek(pb, 4, SEEK_SET); avio_wl32(pb, (uint32_t)(file_size - 8)); avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); if(s->streams[0]->codec->codec_tag != 0x01) { /* Update num_samps in fact chunk */ int number_of_samples; number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration, s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num, s->streams[0]->time_base.den); avio_seek(pb, wav->fact_pos, SEEK_SET); avio_wl32(pb, number_of_samples); avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } } return 0; }
REFERENCE_TIME CBDDemuxer::GetDuration() const { if(m_pTitle) { return av_rescale(m_pTitle->duration, 1000, 9); } return m_lavfDemuxer->GetDuration(); }
static void mp3_update_xing(AVFormatContext *s) { MP3Context *mp3 = s->priv_data; AVReplayGain *rg; uint16_t tag_crc; uint8_t *toc; int i, rg_size; /* replace "Xing" identification string with "Info" for CBR files. */ if (!mp3->has_variable_bitrate) AV_WL32(mp3->xing_frame + mp3->xing_offset, MKTAG('I', 'n', 'f', 'o')); AV_WB32(mp3->xing_frame + mp3->xing_offset + 8, mp3->frames); AV_WB32(mp3->xing_frame + mp3->xing_offset + 12, mp3->size); toc = mp3->xing_frame + mp3->xing_offset + 16; toc[0] = 0; // first toc entry has to be zero. for (i = 1; i < XING_TOC_SIZE; ++i) { int j = i * mp3->pos / XING_TOC_SIZE; int seek_point = 256LL * mp3->bag[j] / mp3->size; toc[i] = FFMIN(seek_point, 255); } /* write replaygain */ rg = (AVReplayGain*)av_stream_get_side_data(s->streams[0], AV_PKT_DATA_REPLAYGAIN, &rg_size); if (rg && rg_size >= sizeof(*rg)) { uint16_t val; AV_WB32(mp3->xing_frame + mp3->xing_offset + 131, av_rescale(rg->track_peak, 1 << 23, 100000)); if (rg->track_gain != INT32_MIN) { val = FFABS(rg->track_gain / 10000) & ((1 << 9) - 1); val |= (rg->track_gain < 0) << 9; val |= 1 << 13; AV_WB16(mp3->xing_frame + mp3->xing_offset + 135, val); } if (rg->album_gain != INT32_MIN) { val = FFABS(rg->album_gain / 10000) & ((1 << 9) - 1); val |= (rg->album_gain < 0) << 9; val |= 1 << 14; AV_WB16(mp3->xing_frame + mp3->xing_offset + 137, val); } } AV_WB32(mp3->xing_frame + mp3->xing_offset + XING_SIZE - 8, mp3->audio_size); AV_WB16(mp3->xing_frame + mp3->xing_offset + XING_SIZE - 4, mp3->audio_crc); tag_crc = av_crc(av_crc_get_table(AV_CRC_16_ANSI_LE), 0, mp3->xing_frame, 190); AV_WB16(mp3->xing_frame + mp3->xing_offset + XING_SIZE - 2, tag_crc); avio_seek(s->pb, mp3->xing_frame_offset, SEEK_SET); avio_write(s->pb, mp3->xing_frame, mp3->xing_frame_size); avio_seek(s->pb, 0, SEEK_END); }
/** * Try to find Xing/Info/VBRI tags and compute duration from info therein */ static int mp3_parse_vbr_tags(AVFormatContext *s, AVStream *st, int64_t base) { uint32_t v, spf; unsigned frames = 0; /* Total number of frames in file */ unsigned size = 0; /* Total number of bytes in the stream */ const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}}; MPADecodeHeader c; int vbrtag_size = 0; v = avio_rb32(s->pb); if(ff_mpa_check_header(v) < 0) return -1; if (avpriv_mpegaudio_decode_header(&c, v) == 0) vbrtag_size = c.frame_size; if(c.layer != 3) return -1; /* Check for Xing / Info tag */ avio_skip(s->pb, xing_offtbl[c.lsf == 1][c.nb_channels == 1]); v = avio_rb32(s->pb); if(v == MKBETAG('X', 'i', 'n', 'g') || v == MKBETAG('I', 'n', 'f', 'o')) { v = avio_rb32(s->pb); if(v & 0x1) frames = avio_rb32(s->pb); if(v & 0x2) size = avio_rb32(s->pb); } /* Check for VBRI tag (always 32 bytes after end of mpegaudio header) */ avio_seek(s->pb, base + 4 + 32, SEEK_SET); v = avio_rb32(s->pb); if(v == MKBETAG('V', 'B', 'R', 'I')) { /* Check tag version */ if(avio_rb16(s->pb) == 1) { /* skip delay and quality */ avio_skip(s->pb, 4); size = avio_rb32(s->pb); frames = avio_rb32(s->pb); } } if(!frames && !size) return -1; /* Skip the vbr tag frame */ avio_seek(s->pb, base + vbrtag_size, SEEK_SET); spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */ if(frames) st->duration = av_rescale_q(frames, (AVRational){spf, c.sample_rate}, st->time_base); if(size && frames) st->codec->bit_rate = av_rescale(size, 8 * c.sample_rate, frames * (int64_t)spf); return 0; }
static int64_t get_delay(struct SwrContext *s, int64_t base){ ResampleContext *c = s->resample; int64_t num = s->in_buffer_count - (c->filter_length-1)/2; num *= c->phase_count; num -= c->index; num *= c->src_incr; num -= c->frac; return av_rescale(num, base, s->in_sample_rate*(int64_t)c->src_incr * c->phase_count); }
static int config_input(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; SilenceRemoveContext *s = ctx->priv; s->window_size = (inlink->sample_rate / 50) * inlink->channels; s->window = av_malloc_array(s->window_size, sizeof(*s->window)); if (!s->window) return AVERROR(ENOMEM); clear_rms(s); s->start_duration = av_rescale(s->start_duration, inlink->sample_rate, AV_TIME_BASE); s->stop_duration = av_rescale(s->stop_duration, inlink->sample_rate, AV_TIME_BASE); s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1), sizeof(*s->start_holdoff) * inlink->channels); if (!s->start_holdoff) return AVERROR(ENOMEM); s->start_holdoff_offset = 0; s->start_holdoff_end = 0; s->start_found_periods = 0; s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1), sizeof(*s->stop_holdoff) * inlink->channels); if (!s->stop_holdoff) return AVERROR(ENOMEM); s->stop_holdoff_offset = 0; s->stop_holdoff_end = 0; s->stop_found_periods = 0; if (s->start_periods) s->mode = SILENCE_TRIM; else s->mode = SILENCE_COPY; return 0; }
int MP4Parser::mp4_read_packet(FormatContext *s, Packet *pkt) { int selected_stream = -1; int64_t best_dts = INT64_MAX; MP4Parser *obj = NULL; Track *trak = NULL; ReadStatus *rstatus = NULL; // Read frame with monotonous decode timestamp for (unsigned i = 0; i < s->nb_streams; ++i) { Stream *st = s->streams[i]; obj = (MP4Parser *) st->priv_data;; trak = &obj->m_track[st->id]; rstatus = &obj->m_status[st->id]; // Already read to the end of this stream, ignore it and try another one if (rstatus->sample_idx == trak->stsz->sample_count) continue; int64_t dts = av_rescale(rstatus->dts.val, AV_TIME_BASE, st->time_base.den); if (selected_stream == -1 || ((abs(best_dts - dts) <= 4 && rstatus->sample_offset < obj->m_status[selected_stream].sample_offset) || (abs(best_dts - dts) > 4 && dts < best_dts))) { selected_stream = i; best_dts = dts; } } // No more frame to be read, parse done if (selected_stream < 0) return -1; trak = &obj->m_track[selected_stream]; rstatus = &obj->m_status[selected_stream]; SampleEntry sentry; Frame frame; if (locate_sample(trak, rstatus, &sentry) < 0 || read_frame(obj->m_file, trak, &sentry, &frame) < 0) return -1; pkt->stream_index = selected_stream; pkt->pts = frame.get_dts() + frame.get_composition_time(); pkt->dts = frame.get_dts(); pkt->duration = rstatus->dts.val - pkt->dts; pkt->pos = sentry.sample_offset; // Hack style, optimize the memory algorithm pkt->data = frame.get_data(); pkt->size = frame.get_data_length(); frame.set_data(NULL); #ifdef XDEBUG LOGD("%s pkt->pts=%lld, pkt->dts=%lld (composition_time=%u), pkt->size=%d, pkt->duration: %d, current_sample#=%d, total samples#=%d", pkt->stream_index == AUDIO ? "AUDIO" : "VIDEO", pkt->pts, pkt->dts, frame.get_composition_time(), pkt->size, pkt->duration, rstatus->sample_idx, trak->stsz->sample_count); #endif return 0; }
int OutputProcessor::packageVideo(unsigned char* inBuff, int buffSize, unsigned char* outBuff, long int pts) { if (videoPackager == 0) { ELOG_DEBUG("No se ha inicailizado el codec de output vídeo RTP"); return -1; } // ELOG_DEBUG("To packetize %u", buffSize); if (buffSize <= 0) return -1; RtpVP8Fragmenter frag(inBuff, buffSize, 1100); bool lastFrame = false; unsigned int outlen = 0; timeval time; gettimeofday(&time, NULL); long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); // timestamp_ += 90000 / mediaInfo.videoCodec.frameRate; //int64_t pts = av_rescale(lastPts_, 1000000, (long int)video_time_base_); do { outlen = 0; frag.getPacket(outBuff, &outlen, &lastFrame); RtpHeader rtpHeader; rtpHeader.setMarker(lastFrame?1:0); rtpHeader.setSeqNumber(seqnum_++); if (pts==0){ rtpHeader.setTimestamp(av_rescale(millis, 90000, 1000)); }else{ rtpHeader.setTimestamp(av_rescale(pts, 90000, 1000)); } rtpHeader.setSSRC(55543); rtpHeader.setPayloadType(100); memcpy(rtpBuffer_, &rtpHeader, rtpHeader.getHeaderLength()); memcpy(&rtpBuffer_[rtpHeader.getHeaderLength()],outBuff, outlen); int l = outlen + rtpHeader.getHeaderLength(); // sink_->sendData(rtpBuffer_, l); rtpReceiver_->receiveRtpData(rtpBuffer_, l); } while (!lastFrame); return 0; }
/** \brief Seek to millisecond **/ bool DecodeThread::seekMs(int tsms) { //printf("**** SEEK TO ms %d. LLT: %d. LT: %d. LLF: %d. LF: %d. LastFrameOk: %d\n",tsms,LastLastFrameTime,LastFrameTime,LastLastFrameNumber,LastFrameNumber,(int)LastFrameOk); cout << "tsms" << tsms << endl; // Convert time into frame number qint64 DesiredFrameNumber = av_rescale(tsms,pFormatCtx->streams[videoStream]->time_base.den,pFormatCtx->streams[videoStream]->time_base.num); DesiredFrameNumber/=1000; return seekFrame(DesiredFrameNumber); }
//FIXME merge with compute_pkt_fields static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt) { int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); int num, den, i; av_dlog(s, "compute_pkt_fields2: pts:%" PRId64 " dts:%" PRId64 " cur_dts:%" PRId64 " b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) * return AVERROR(EINVAL);*/ /* duration field */ if (pkt->duration == 0) { ff_compute_frame_duration(s, &num, &den, st, NULL, pkt); if (den && num) { pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); } } if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0) pkt->pts = pkt->dts; //calculate dts from pts if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) { st->pts_buffer[0] = pkt->pts; for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration; for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++) FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]); pkt->dts = st->pts_buffer[0]; } if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) { av_log(s, AV_LOG_ERROR, "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n", st->index, st->cur_dts, pkt->dts); return AVERROR(EINVAL); } if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) { av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n", pkt->pts, pkt->dts, st->index); return AVERROR(EINVAL); } av_dlog(s, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); st->cur_dts = pkt->dts; return 0; }
static int oggvorbis_encode_frame(AVCodecContext *avccontext, unsigned char *packets, int buf_size, void *data) { OggVorbisContext *context = avccontext->priv_data ; float **buffer ; ogg_packet op ; signed char *audio = data ; int l, samples = data ? OGGVORBIS_FRAME_SIZE : 0; buffer = vorbis_analysis_buffer(&context->vd, samples) ; if(context->vi.channels == 1) { for(l = 0 ; l < samples ; l++) buffer[0][l]=((audio[l*2+1]<<8)|(0x00ff&(int)audio[l*2]))/32768.f; } else { for(l = 0 ; l < samples ; l++){ buffer[0][l]=((audio[l*4+1]<<8)|(0x00ff&(int)audio[l*4]))/32768.f; buffer[1][l]=((audio[l*4+3]<<8)|(0x00ff&(int)audio[l*4+2]))/32768.f; } } vorbis_analysis_wrote(&context->vd, samples) ; while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) { vorbis_analysis(&context->vb, NULL); vorbis_bitrate_addblock(&context->vb) ; while(vorbis_bitrate_flushpacket(&context->vd, &op)) { if(op.bytes==1) //id love to say this is a hack, bad sadly its not, appearently the end of stream decission is in libogg continue; memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet)); context->buffer_index += sizeof(ogg_packet); memcpy(context->buffer + context->buffer_index, op.packet, op.bytes); context->buffer_index += op.bytes; // av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes); } } l=0; if(context->buffer_index){ ogg_packet *op2= (ogg_packet*)context->buffer; op2->packet = context->buffer + sizeof(ogg_packet); l= op2->bytes; avccontext->coded_frame->pts= av_rescale(op2->granulepos, AV_TIME_BASE, avccontext->sample_rate); memcpy(packets, op2->packet, l); context->buffer_index -= l + sizeof(ogg_packet); memcpy(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index); // av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l); } return l; }
bool AVDecoder::seekVideo(int percentage) { if(!loadCodec(CODEC_VIDEO)) { return false; } qint64 seek_pos = pFormatCtx->duration * percentage / (AV_TIME_BASE * 100); qint64 frame = av_rescale(seek_pos, av_stream->time_base.den, av_stream->time_base.num); return avformat_seek_file(pFormatCtx, stream_index, 0, frame, frame, AVSEEK_FLAG_FRAME) >= 0; }
static void read_xing_toc(AVFormatContext *s, int64_t filesize, int64_t duration) { int i; MP3Context *mp3 = s->priv_data; if (!filesize && !(filesize = avio_size(s->pb))) { av_log(s, AV_LOG_WARNING, "Cannot determine file size, skipping TOC table.\n"); return; } for (i = 0; i < XING_TOC_COUNT; i++) { uint8_t b = avio_r8(s->pb); av_add_index_entry(s->streams[0], av_rescale(b, filesize, 256), av_rescale(i, duration, XING_TOC_COUNT), 0, 0, AVINDEX_KEYFRAME); } mp3->xing_toc = 1; }
static void reset(AVFormatContext *s, int64_t global_ts){ NUTContext *nut = s->priv_data; int i; for(i=0; i<s->nb_streams; i++){ StreamContext *stream= &nut->stream[i]; stream->last_key_frame= 1; stream->last_pts= av_rescale(global_ts, stream->rate_num*(int64_t)nut->rate_den, stream->rate_den*(int64_t)nut->rate_num); } }
void cSoftPlayer::RemuxAndQueue(AVPacket &pkt) { // set audio index if not yet set if ( AudioIdx== -1 && #if LIBAVFORMAT_BUILD > 4628 ic->streams[pkt.stream_index]->codec->codec_type == CODEC_TYPE_AUDIO #else ic->streams[pkt.stream_index]->codec.codec_type == CODEC_TYPE_AUDIO #endif ) AudioIdx=pkt.stream_index; // set video index if not yet set if ( VideoIdx== -1 && #if LIBAVFORMAT_BUILD > 4628 ic->streams[pkt.stream_index]->codec->codec_type == CODEC_TYPE_VIDEO #else ic->streams[pkt.stream_index]->codec.codec_type == CODEC_TYPE_VIDEO #endif ) VideoIdx=pkt.stream_index; // skip packets which do not belong to the current streams if ( pkt.stream_index != VideoIdx && pkt.stream_index != AudioIdx ) { printf("Drop Packet PTS: %lld\n",pkt.pts); return; }; #if LIBAVFORMAT_BUILD > 4623 AVRational time_base; time_base=ic->streams[pkt.stream_index]->time_base; if ( pkt.pts != (int64_t) AV_NOPTS_VALUE ) { pkt.pts=av_rescale(pkt.pts, AV_TIME_BASE* (int64_t)time_base.num, time_base.den)/100 ; }; //printf("PTS: %lld new %lld num %d den %d\n",PTS,pkt.pts, // time_base.num,time_base.den); #else if ( pkt.pts != (int64_t) AV_NOPTS_VALUE ) pkt.pts/=100; #endif //pkt.pts*=1000/AV_TIME_BASE; av_dup_packet(&pkt); // length = -2 : queue packet PKTDBG("Queue Packet index: %d PTS: %lld\n",pkt.stream_index,pkt.pts); #if VDRVERSNUM >= 10330 SoftHandles.QueuePacket(SoftDevice,ic,pkt); #else SoftDevice->PlayVideo((uchar *)&pkt,-2); #endif };
double wxFfmpegMediaDecoder::GetPosition() { AVStream *st = GetVideoStream(); if (st == NULL) return -1; int64_t timestamp = st->cur_dts; if (timestamp == (int64_t)AV_NOPTS_VALUE) return -1; timestamp = av_rescale(timestamp, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den); if (m_formatCtx->start_time != (int64_t)AV_NOPTS_VALUE) timestamp -= m_formatCtx->start_time; return ((double)timestamp)/AV_TIME_BASE; }
InputContext * input_context_new (const char * filename) { int ok = 0; // open file AVFormatContext * pfc = NULL; ok = avformat_open_input(&pfc, filename, NULL, NULL); if (ok != 0) { goto failed; } // find stream int stnb = av_find_best_stream(pfc, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); AVStream * pst = pfc->streams[stnb]; AVRational * ptb = &pst->time_base; int64_t duration = av_rescale(pst->duration, ptb->num, ptb->den); // find codec AVCodec * pc = avcodec_find_decoder(pst->codecpar->codec_id); if (!pc) { goto close_demuxer; } AVCodecContext * pcc = avcodec_alloc_context3(pc); if (!pcc) { goto close_demuxer; } ok = avcodec_parameters_to_context(pcc, pst->codecpar); if (ok < 0) { goto close_decoder; } ok = avcodec_open2(pcc, pc, NULL); if (ok != 0) { goto close_decoder; } InputContext * context = malloc(sizeof(InputContext)); context->format_context = pfc; context->stream = pst; context->codec = pc; context->codec_context = pcc; context->stream_index = stnb; context->time_base = ptb; context->duration = duration; return context; close_decoder: avcodec_free_context(&pcc); close_demuxer: avformat_close_input(&pfc); failed: return NULL; }