Esempio n. 1
0
static av_cold int init(AVFilterContext *ctx)
{
    AudioEchoContext *s = ctx->priv;
    int nb_delays, nb_decays, i;

    if (!s->delays || !s->decays) {
        av_log(ctx, AV_LOG_ERROR, "Missing delays and/or decays.\n");
        return AVERROR(EINVAL);
    }

    count_items(s->delays, &nb_delays);
    count_items(s->decays, &nb_decays);

    s->delay = av_realloc_f(s->delay, nb_delays, sizeof(*s->delay));
    s->decay = av_realloc_f(s->decay, nb_decays, sizeof(*s->decay));
    if (!s->delay || !s->decay)
        return AVERROR(ENOMEM);

    fill_items(s->delays, &nb_delays, s->delay);
    fill_items(s->decays, &nb_decays, s->decay);

    if (nb_delays != nb_decays) {
        av_log(ctx, AV_LOG_ERROR, "Number of delays %d differs from number of decays %d.\n", nb_delays, nb_decays);
        return AVERROR(EINVAL);
    }

    s->nb_echoes = nb_delays;
    if (!s->nb_echoes) {
        av_log(ctx, AV_LOG_ERROR, "At least one decay & delay must be set.\n");
        return AVERROR(EINVAL);
    }

    s->samples = av_realloc_f(s->samples, nb_delays, sizeof(*s->samples));
    if (!s->samples)
        return AVERROR(ENOMEM);

    for (i = 0; i < nb_delays; i++) {
        if (s->delay[i] <= 0 || s->delay[i] > 90000) {
            av_log(ctx, AV_LOG_ERROR, "delay[%d]: %f is out of allowed range: (0, 90000]\n", i, s->delay[i]);
            return AVERROR(EINVAL);
        }
        if (s->decay[i] <= 0 || s->decay[i] > 1) {
            av_log(ctx, AV_LOG_ERROR, "decay[%d]: %f is out of allowed range: (0, 1]\n", i, s->decay[i]);
            return AVERROR(EINVAL);
        }
    }

    s->next_pts = AV_NOPTS_VALUE;

    av_log(ctx, AV_LOG_DEBUG, "nb_echoes:%d\n", s->nb_echoes);
    return 0;
}
Esempio n. 2
0
static int gxf_write_map_packet(AVFormatContext *s, int rewrite)
{
    GXFContext *gxf = s->priv_data;
    AVIOContext *pb = s->pb;
    int64_t pos = avio_tell(pb);

    if (!rewrite) {
        if (!(gxf->map_offsets_nb % 30)) {
            gxf->map_offsets = av_realloc_f(gxf->map_offsets,
                                            sizeof(*gxf->map_offsets),
                                            gxf->map_offsets_nb+30);
            if (!gxf->map_offsets) {
                av_log(s, AV_LOG_ERROR, "could not realloc map offsets\n");
                return -1;
            }
        }
        gxf->map_offsets[gxf->map_offsets_nb++] = pos; // do not increment here
    }

    gxf_write_packet_header(pb, PKT_MAP);

    /* preamble */
    avio_w8(pb, 0xE0); /* version */
    avio_w8(pb, 0xFF); /* reserved */

    gxf_write_material_data_section(s);
    gxf_write_track_description_section(s);

    return updatePacketSize(pb, pos);
}
Esempio n. 3
0
File: JavMem.c Progetto: decamp/jav
JNIEXPORT jlong JNICALL Java_bits_jav_util_JavMem_reallocf
( JNIEnv *env, jclass clazz, jlong ptr, jlong elCount, jlong elSize )
{
    void *p   = *(void**)&ptr;
    void *ret = av_realloc_f( p, (size_t)elCount, (size_t)elSize );
    return *(jlong*)&ret;
}
Esempio n. 4
0
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
{
    void **ptrptr = ptr;
    *ptrptr = av_realloc_f(*ptrptr, nmemb, size);
    if (!*ptrptr && nmemb && size)
        return AVERROR(ENOMEM);
    return 0;
}
Esempio n. 5
0
static inline int realloc_argv(char ***argv, int *numargs)
{
    *numargs += NUM_ADDED_ARGS;
    *argv = av_realloc_f(*argv, *numargs, sizeof(**argv));
    if (!*argv)
        return AVERROR(ENOMEM);
    else
        return *numargs;
}
Esempio n. 6
0
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
{
    void *val;

    memcpy(&val, ptr, sizeof(val));
    val = av_realloc_f(val, nmemb, size);
    memcpy(ptr, &val, sizeof(val));
    if (!val && nmemb && size)
        return AVERROR(ENOMEM);

    return 0;
}
Esempio n. 7
0
static int gxf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    GXFContext *gxf = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st = s->streams[pkt->stream_index];
    int64_t pos = avio_tell(pb);
    int padding = 0;
    int packet_start_offset = avio_tell(pb) / 1024;

    gxf_write_packet_header(pb, PKT_MEDIA);
    if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO && pkt->size % 4) /* MPEG-2 frames must be padded */
        padding = 4 - pkt->size % 4;
    else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        padding = GXF_AUDIO_PACKET_SIZE - pkt->size;
    gxf_write_media_preamble(s, pkt, pkt->size + padding);
    avio_write(pb, pkt->data, pkt->size);
    gxf_write_padding(pb, padding);

    if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
        if (!(gxf->flt_entries_nb % 500)) {
            gxf->flt_entries = av_realloc_f(gxf->flt_entries,
                                            sizeof(*gxf->flt_entries),
                                            gxf->flt_entries_nb+500);
            if (!gxf->flt_entries) {
                av_log(s, AV_LOG_ERROR, "could not reallocate flt entries\n");
                return -1;
            }
        }
        gxf->flt_entries[gxf->flt_entries_nb++] = packet_start_offset;
        gxf->nb_fields += 2; // count fields
    }

    updatePacketSize(pb, pos);

    gxf->packet_count++;
    if (gxf->packet_count == 100) {
        gxf_write_map_packet(s, 0);
        gxf->packet_count = 0;
    }

    avio_flush(pb);

    return 0;
}
Esempio n. 8
0
static int config_input(AVFilterLink *inlink)
{
    double x0, x1, x2, x3, x4, x5, x6, x7, q;
    AVFilterContext *ctx = inlink->dst;
    PerspectiveContext *s = ctx->priv;
    double (*ref)[2] = s->ref;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h };
    int h = inlink->h;
    int w = inlink->w;
    int x, y, i, j, ret;

    for (i = 0; i < 4; i++) {
        for (j = 0; j < 2; j++) {
            if (!s->expr_str[i][j])
                return AVERROR(EINVAL);
            ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
                                         var_names, &values[0],
                                         NULL, NULL, NULL, NULL,
                                         0, 0, ctx);
            if (ret < 0)
                return ret;
        }
    }

    s->hsub = desc->log2_chroma_w;
    s->vsub = desc->log2_chroma_h;
    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
    if (!s->pv)
        return AVERROR(ENOMEM);

    x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
          (ref[2][1] - ref[3][1]) -
         ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
          (ref[2][0] - ref[3][0])) * h;
    x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
          (ref[1][0] - ref[3][0]) -
         ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
          (ref[1][1] - ref[3][1])) * w;
    q =  ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
         ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);

    x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
    x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
    x2 = q *  ref[0][0] * w * h;
    x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
    x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
    x5 = q *  ref[0][1] * w * h;

    for (y = 0; y < h; y++){
        for (x = 0; x < w; x++){
            int u, v;

            u = (int)floor(SUB_PIXELS * (x0 * x + x1 * y + x2) /
                                        (x6 * x + x7 * y + q * w * h) + 0.5);
            v = (int)floor(SUB_PIXELS * (x3 * x + x4 * y + x5) /
                                        (x6 * x + x7 * y + q * w * h) + 0.5);

            s->pv[x + y * w][0] = u;
            s->pv[x + y * w][1] = v;
        }
    }

    for (i = 0; i < SUB_PIXELS; i++){
        double d = i / (double)SUB_PIXELS;
        double temp[4];
        double sum = 0;

        for (j = 0; j < 4; j++)
            temp[j] = get_coeff(j - d - 1);

        for (j = 0; j < 4; j++)
            sum += temp[j];

        for (j = 0; j < 4; j++)
            s->coeff[i][j] = (int)floor((1 << COEFF_BITS) * temp[j] / sum + 0.5);
    }

    return 0;
}
Esempio n. 9
0
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    unsigned char tag[5];
    unsigned int flags = 0;
    const int stream_index = pkt->stream_index;
    int size               = pkt->size;
    AVIContext *avi     = s->priv_data;
    AVIOContext *pb     = s->pb;
    AVIStream *avist    = s->streams[stream_index]->priv_data;
    AVCodecContext *enc = s->streams[stream_index]->codec;
    int ret;

    if (enc->codec_id == AV_CODEC_ID_H264 && enc->codec_tag == MKTAG('H','2','6','4') && pkt->size) {
        ret = ff_check_h264_startcode(s, s->streams[stream_index], pkt);
        if (ret < 0)
            return ret;
    }

    if ((ret = write_skip_frames(s, stream_index, pkt->dts)) < 0)
        return ret;

    if (pkt->dts != AV_NOPTS_VALUE)
        avist->last_dts = pkt->dts + pkt->duration;

    avist->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (pb->seekable &&
        (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
        avi_write_ix(s);
        ff_end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        ff_end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(tag, stream_index, enc->codec_type);
    if (pkt->flags & AV_PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == AVMEDIA_TYPE_AUDIO)
        avist->audio_strm_length += size;

    if (s->pb->seekable) {
        AVIIndex *idx = &avist->indexes;
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            idx->cluster = av_realloc_f(idx->cluster, sizeof(void*), cl+1);
            if (!idx->cluster) {
                idx->ents_allocated = 0;
                idx->entry          = 0;
                return AVERROR(ENOMEM);
            }
            idx->cluster[cl] =
                av_malloc(AVI_INDEX_CLUSTER_SIZE * sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return AVERROR(ENOMEM);
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos   = avio_tell(pb) - avi->movi_list;
        idx->cluster[cl][id].len   = size;
        avist->max_size = FFMAX(avist->max_size, size);
        idx->entry++;
    }

    avio_write(pb, tag, 4);
    avio_wl32(pb, size);
    avio_write(pb, pkt->data, size);
    if (size & 1)
        avio_w8(pb, 0);

    return 0;
}
Esempio n. 10
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    AFFTFiltContext *s = ctx->priv;
    char *saveptr = NULL;
    int ret = 0, ch, i;
    float overlap;
    char *args;
    const char *last_expr = "1";

    s->fft  = av_fft_init(s->fft_bits, 0);
    s->ifft = av_fft_init(s->fft_bits, 1);
    if (!s->fft || !s->ifft)
        return AVERROR(ENOMEM);

    s->window_size = 1 << s->fft_bits;

    s->fft_data = av_calloc(inlink->channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        s->fft_data[ch] = av_calloc(s->window_size, sizeof(**s->fft_data));
        if (!s->fft_data[ch])
            return AVERROR(ENOMEM);
    }

    s->real = av_calloc(inlink->channels, sizeof(*s->real));
    if (!s->real)
        return AVERROR(ENOMEM);

    s->imag = av_calloc(inlink->channels, sizeof(*s->imag));
    if (!s->imag)
        return AVERROR(ENOMEM);

    args = av_strdup(s->real_str);
    if (!args)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);

        ret = av_expr_parse(&s->real[ch], arg ? arg : last_expr, var_names,
                            NULL, NULL, NULL, NULL, 0, ctx);
        if (ret < 0)
            break;
        if (arg)
            last_expr = arg;
        s->nb_exprs++;
    }

    av_free(args);

    args = av_strdup(s->img_str ? s->img_str : s->real_str);
    if (!args)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);

        ret = av_expr_parse(&s->imag[ch], arg ? arg : last_expr, var_names,
                            NULL, NULL, NULL, NULL, 0, ctx);
        if (ret < 0)
            break;
        if (arg)
            last_expr = arg;
    }

    av_free(args);

    s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->window_size);
    if (!s->fifo)
        return AVERROR(ENOMEM);

    s->window_func_lut = av_realloc_f(s->window_func_lut, s->window_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->window_size, s->win_func, &overlap);
    if (s->overlap == 1)
        s->overlap = overlap;

    for (s->win_scale = 0, i = 0; i < s->window_size; i++) {
        s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
    }

    s->hop_size = s->window_size * (1 - s->overlap);
    if (s->hop_size <= 0)
        return AVERROR(EINVAL);

    s->buffer = ff_get_audio_buffer(inlink, s->window_size * 2);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    return ret;
}
Esempio n. 11
0
STDMETHODIMP CDecAvcodec::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtStartIn, REFERENCE_TIME rtStopIn, BOOL bSyncPoint, BOOL bDiscontinuity)
{
  int     got_picture = 0;
  int     used_bytes  = 0;
  BOOL    bParserFrame = FALSE;
  BOOL    bFlush = (buffer == NULL);
  BOOL    bEndOfSequence = FALSE;

  AVPacket avpkt;
  av_init_packet(&avpkt);

  if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
    if (!m_bFFReordering) {
      m_tcThreadBuffer[m_CurrentThread].rtStart = rtStartIn;
      m_tcThreadBuffer[m_CurrentThread].rtStop  = rtStopIn;
    }

    m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
  } else if (m_bBFrameDelay) {
    m_tcBFrameDelay[m_nBFramePos].rtStart = rtStartIn;
    m_tcBFrameDelay[m_nBFramePos].rtStop = rtStopIn;
    m_nBFramePos = !m_nBFramePos;
  }

  uint8_t *pDataBuffer = NULL;
  if (!bFlush && buflen > 0) {
    if (!m_bInputPadded && (!(m_pAVCtx->active_thread_type & FF_THREAD_FRAME) || m_pParser)) {
      // Copy bitstream into temporary buffer to ensure overread protection
      // Verify buffer size
      if (buflen > m_nFFBufferSize) {
        m_nFFBufferSize	= buflen;
        m_pFFBuffer = (BYTE *)av_realloc_f(m_pFFBuffer, m_nFFBufferSize + FF_INPUT_BUFFER_PADDING_SIZE, 1);
        if (!m_pFFBuffer) {
          m_nFFBufferSize = 0;
          return E_OUTOFMEMORY;
        }
      }
      
      memcpy(m_pFFBuffer, buffer, buflen);
      memset(m_pFFBuffer+buflen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
      pDataBuffer = m_pFFBuffer;
    } else {
      pDataBuffer = (uint8_t *)buffer;
    }

    if (m_nCodecId == AV_CODEC_ID_H264) {
      BOOL bRecovered = m_h264RandomAccess.searchRecoveryPoint(pDataBuffer, buflen);
      if (!bRecovered) {
        return S_OK;
      }
    } else if (m_nCodecId == AV_CODEC_ID_VP8 && m_bWaitingForKeyFrame) {
      if (!(pDataBuffer[0] & 1)) {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found VP8 key-frame, resuming decoding"));
        m_bWaitingForKeyFrame = FALSE;
      } else {
        return S_OK;
      }
    }
  }

  while (buflen > 0 || bFlush) {
    REFERENCE_TIME rtStart = rtStartIn, rtStop = rtStopIn;

    if (!bFlush) {
      avpkt.data = pDataBuffer;
      avpkt.size = buflen;
      avpkt.pts = rtStartIn;
      if (rtStartIn != AV_NOPTS_VALUE && rtStopIn != AV_NOPTS_VALUE)
        avpkt.duration = (int)(rtStopIn - rtStartIn);
      else
        avpkt.duration = 0;
      avpkt.flags = AV_PKT_FLAG_KEY;

      if (m_bHasPalette) {
        m_bHasPalette = FALSE;
        uint32_t *pal = (uint32_t *)av_packet_new_side_data(&avpkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
        int pal_size = FFMIN((1 << m_pAVCtx->bits_per_coded_sample) << 2, m_pAVCtx->extradata_size);
        uint8_t *pal_src = m_pAVCtx->extradata + m_pAVCtx->extradata_size - pal_size;

        for (int i = 0; i < pal_size/4; i++)
          pal[i] = 0xFF<<24 | AV_RL32(pal_src+4*i);
      }
    } else {
      avpkt.data = NULL;
      avpkt.size = 0;
    }

    // Parse the data if a parser is present
    // This is mandatory for MPEG-1/2
    if (m_pParser) {
      BYTE *pOut = NULL;
      int pOut_size = 0;

      used_bytes = av_parser_parse2(m_pParser, m_pAVCtx, &pOut, &pOut_size, avpkt.data, avpkt.size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);

      if (used_bytes == 0 && pOut_size == 0 && !bFlush) {
        DbgLog((LOG_TRACE, 50, L"::Decode() - could not process buffer, starving?"));
        break;
      }

      // Update start time cache
      // If more data was read then output, update the cache (incomplete frame)
      // If output is bigger, a frame was completed, update the actual rtStart with the cached value, and then overwrite the cache
      if (used_bytes > pOut_size) {
        if (rtStartIn != AV_NOPTS_VALUE)
          m_rtStartCache = rtStartIn;
      } else if (used_bytes == pOut_size || ((used_bytes + 9) == pOut_size)) {
        // Why +9 above?
        // Well, apparently there are some broken MKV muxers that like to mux the MPEG-2 PICTURE_START_CODE block (which is 9 bytes) in the package with the previous frame
        // This would cause the frame timestamps to be delayed by one frame exactly, and cause timestamp reordering to go wrong.
        // So instead of failing on those samples, lets just assume that 9 bytes are that case exactly.
        m_rtStartCache = rtStartIn = AV_NOPTS_VALUE;
      } else if (pOut_size > used_bytes) {
        rtStart = m_rtStartCache;
        m_rtStartCache = rtStartIn;
        // The value was used once, don't use it for multiple frames, that ends up in weird timings
        rtStartIn = AV_NOPTS_VALUE;
      }

       bParserFrame = (pOut_size > 0);

      if (pOut_size > 0 || bFlush) {

        if (pOut && pOut_size > 0) {
          if (pOut_size > m_nFFBufferSize2) {
            m_nFFBufferSize2	= pOut_size;
            m_pFFBuffer2 = (BYTE *)av_realloc_f(m_pFFBuffer2, m_nFFBufferSize2 + FF_INPUT_BUFFER_PADDING_SIZE, 1);
            if (!m_pFFBuffer2) {
              m_nFFBufferSize2 = 0;
              return E_OUTOFMEMORY;
            }
          }
          memcpy(m_pFFBuffer2, pOut, pOut_size);
          memset(m_pFFBuffer2+pOut_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

          avpkt.data = m_pFFBuffer2;
          avpkt.size = pOut_size;
          avpkt.pts = rtStart;
          avpkt.duration = 0;

          const uint8_t *eosmarker = CheckForEndOfSequence(m_nCodecId, avpkt.data, avpkt.size, &m_MpegParserState);
          if (eosmarker) {
            bEndOfSequence = TRUE;
          }
        } else {
          avpkt.data = NULL;
          avpkt.size = 0;
        }

        int ret2 = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
        if (ret2 < 0) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - decoding failed despite successfull parsing"));
          got_picture = 0;
        }
      } else {
        got_picture = 0;
      }
    } else {
      used_bytes = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
    }

    if (FAILED(PostDecode())) {
      av_frame_unref(m_pFrame);
      return E_FAIL;
    }

    // Decoding of this frame failed ... oh well!
    if (used_bytes < 0) {
      av_frame_unref(m_pFrame);
      return S_OK;
    }

    // When Frame Threading, we won't know how much data has been consumed, so it by default eats everything.
    // In addition, if no data got consumed, and no picture was extracted, the frame probably isn't all that useufl.
    // The MJPEB decoder is somewhat buggy and doesn't let us know how much data was consumed really...
    if ((!m_pParser && (m_pAVCtx->active_thread_type & FF_THREAD_FRAME || (!got_picture && used_bytes == 0))) || m_bNoBufferConsumption || bFlush) {
      buflen = 0;
    } else {
      buflen -= used_bytes;
      pDataBuffer += used_bytes;
    }

    // Judge frame usability
    // This determines if a frame is artifact free and can be delivered
    // For H264 this does some wicked magic hidden away in the H264RandomAccess class
    // MPEG-2 and VC-1 just wait for a keyframe..
    if (m_nCodecId == AV_CODEC_ID_H264 && (bParserFrame || !m_pParser || got_picture)) {
      m_h264RandomAccess.judgeFrameUsability(m_pFrame, &got_picture);
    } else if (m_bResumeAtKeyFrame) {
      if (m_bWaitingForKeyFrame && got_picture) {
        if (m_pFrame->key_frame) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - Found Key-Frame, resuming decoding at %I64d", m_pFrame->pkt_pts));
          m_bWaitingForKeyFrame = FALSE;
        } else {
          got_picture = 0;
        }
      }
    }

    // Handle B-frame delay for frame threading codecs
    if ((m_pAVCtx->active_thread_type & FF_THREAD_FRAME) && m_bBFrameDelay) {
      m_tcBFrameDelay[m_nBFramePos] = m_tcThreadBuffer[m_CurrentThread];
      m_nBFramePos = !m_nBFramePos;
    }

    if (!got_picture || !m_pFrame->data[0]) {
      if (!avpkt.size)
        bFlush = FALSE; // End flushing, no more frames
      av_frame_unref(m_pFrame);
      continue;
    }

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Determine the proper timestamps for the frame, based on different possible flags.
    ///////////////////////////////////////////////////////////////////////////////////////////////
    if (m_bFFReordering) {
      rtStart = m_pFrame->pkt_pts;
      if (m_pFrame->pkt_duration)
        rtStop = m_pFrame->pkt_pts + m_pFrame->pkt_duration;
      else
        rtStop = AV_NOPTS_VALUE;
    } else if (m_bBFrameDelay && m_pAVCtx->has_b_frames) {
      rtStart = m_tcBFrameDelay[m_nBFramePos].rtStart;
      rtStop  = m_tcBFrameDelay[m_nBFramePos].rtStop;
    } else if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
      unsigned index = m_CurrentThread;
      rtStart = m_tcThreadBuffer[index].rtStart;
      rtStop  = m_tcThreadBuffer[index].rtStop;
    }

    if (m_bRVDropBFrameTimings && m_pFrame->pict_type == AV_PICTURE_TYPE_B) {
      rtStart = AV_NOPTS_VALUE;
    }

    if (m_bCalculateStopTime)
      rtStop = AV_NOPTS_VALUE;

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // All required values collected, deliver the frame
    ///////////////////////////////////////////////////////////////////////////////////////////////
    LAVFrame *pOutFrame = NULL;
    AllocateFrame(&pOutFrame);

    AVRational display_aspect_ratio;
    int64_t num = (int64_t)m_pFrame->sample_aspect_ratio.num * m_pFrame->width;
    int64_t den = (int64_t)m_pFrame->sample_aspect_ratio.den * m_pFrame->height;
    av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, num, den, 1 << 30);

    pOutFrame->width        = m_pFrame->width;
    pOutFrame->height       = m_pFrame->height;
    pOutFrame->aspect_ratio = display_aspect_ratio;
    pOutFrame->repeat       = m_pFrame->repeat_pict;
    pOutFrame->key_frame    = m_pFrame->key_frame;
    pOutFrame->frame_type   = av_get_picture_type_char(m_pFrame->pict_type);
    pOutFrame->ext_format   = GetDXVA2ExtendedFlags(m_pAVCtx, m_pFrame);

    if (m_pFrame->interlaced_frame || (!m_pAVCtx->progressive_sequence && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)))
      m_iInterlaced = 1;
    else if (m_pAVCtx->progressive_sequence)
      m_iInterlaced = 0;

    pOutFrame->interlaced   = (m_pFrame->interlaced_frame || (m_iInterlaced == 1 && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

    LAVDeintFieldOrder fo   = m_pSettings->GetDeintFieldOrder();
    pOutFrame->tff          = (fo == DeintFieldOrder_Auto) ? m_pFrame->top_field_first : (fo == DeintFieldOrder_TopFieldFirst);

    pOutFrame->rtStart      = rtStart;
    pOutFrame->rtStop       = rtStop;

    PixelFormatMapping map  = getPixFmtMapping((AVPixelFormat)m_pFrame->format);
    pOutFrame->format       = map.lavpixfmt;
    pOutFrame->bpp          = map.bpp;

    if (m_nCodecId == AV_CODEC_ID_MPEG2VIDEO || m_nCodecId == AV_CODEC_ID_MPEG1VIDEO)
      pOutFrame->avgFrameDuration = GetFrameDuration();

    if (map.conversion) {
      ConvertPixFmt(m_pFrame, pOutFrame);
    } else {
      for (int i = 0; i < 4; i++) {
        pOutFrame->data[i]   = m_pFrame->data[i];
        pOutFrame->stride[i] = m_pFrame->linesize[i];
      }

      pOutFrame->priv_data = av_frame_alloc();
      av_frame_ref((AVFrame *)pOutFrame->priv_data, m_pFrame);
      pOutFrame->destruct  = lav_avframe_free;
    }

    if (bEndOfSequence)
      pOutFrame->flags |= LAV_FRAME_FLAG_END_OF_SEQUENCE;

    if (pOutFrame->format == LAVPixFmt_DXVA2) {
      pOutFrame->data[0] = m_pFrame->data[4];
      HandleDXVA2Frame(pOutFrame);
    } else {
      Deliver(pOutFrame);
    }

    if (bEndOfSequence) {
      bEndOfSequence = FALSE;
      if (pOutFrame->format == LAVPixFmt_DXVA2) {
        HandleDXVA2Frame(m_pCallback->GetFlushFrame());
      } else {
        Deliver(m_pCallback->GetFlushFrame());
      }
    }

    if (bFlush) {
      m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
    }
    av_frame_unref(m_pFrame);
  }

  return S_OK;
}
Esempio n. 12
0
int main(int argc, char **argv)
{
    size_t buf_size = 256;
    char *buf = av_malloc(buf_size);
    const char *outfilename = NULL, *infilename = NULL;
    FILE *outfile = NULL, *infile = NULL;
    const char *prompt = "=> ";
    int count = 0, echo = 0;
    char c;

    av_max_alloc(MAX_BLOCK_SIZE);

    while ((c = getopt(argc, argv, "ehi:o:p:")) != -1) {
        switch (c) {
        case 'e':
            echo = 1;
            break;
        case 'h':
            usage();
            return 0;
        case 'i':
            infilename = optarg;
            break;
        case 'o':
            outfilename = optarg;
            break;
        case 'p':
            prompt = optarg;
            break;
        case '?':
            return 1;
        }
    }

    if (!infilename || !strcmp(infilename, "-"))
        infilename = "/dev/stdin";
    infile = fopen(infilename, "r");
    if (!infile) {
        fprintf(stderr, "Impossible to open input file '%s': %s\n", infilename, strerror(errno));
        return 1;
    }

    if (!outfilename || !strcmp(outfilename, "-"))
        outfilename = "/dev/stdout";
    outfile = fopen(outfilename, "w");
    if (!outfile) {
        fprintf(stderr, "Impossible to open output file '%s': %s\n", outfilename, strerror(errno));
        return 1;
    }

    while ((c = fgetc(infile)) != EOF) {
        if (c == '\n') {
            double d;

            buf[count] = 0;
            if (buf[0] != '#') {
                av_expr_parse_and_eval(&d, buf,
                                       NULL, NULL,
                                       NULL, NULL, NULL, NULL, NULL, 0, NULL);
                if (echo)
                    fprintf(outfile, "%s ", buf);
                fprintf(outfile, "%s%f\n", prompt, d);
            }
            count = 0;
        } else {
            if (count >= buf_size-1) {
                if (buf_size == MAX_BLOCK_SIZE) {
                    av_log(NULL, AV_LOG_ERROR, "Memory allocation problem, "
                           "max block size '%zd' reached\n", MAX_BLOCK_SIZE);
                    return 1;
                }
                buf_size = FFMIN(buf_size, MAX_BLOCK_SIZE / 2) * 2;
                buf = av_realloc_f((void *)buf, buf_size, 1);
                if (!buf) {
                    av_log(NULL, AV_LOG_ERROR, "Memory allocation problem occurred\n");
                    return 1;
                }
            }
            buf[count++] = c;
        }
    }

    av_free(buf);
    return 0;
}
Esempio n. 13
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    SpectrumSynthContext *s = ctx->priv;
    int width = ctx->inputs[0]->w;
    int height = ctx->inputs[0]->h;
    AVRational time_base  = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    int i, ch, fft_bits;
    float factor, overlap;

    outlink->sample_rate = s->sample_rate;
    outlink->time_base = (AVRational){1, s->sample_rate};

    if (width  != ctx->inputs[1]->w ||
        height != ctx->inputs[1]->h) {
        av_log(ctx, AV_LOG_ERROR,
               "Magnitude and Phase sizes differ (%dx%d vs %dx%d).\n",
               width, height,
               ctx->inputs[1]->w, ctx->inputs[1]->h);
        return AVERROR_INVALIDDATA;
    } else if (av_cmp_q(time_base, ctx->inputs[1]->time_base) != 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Magnitude and Phase time bases differ (%d/%d vs %d/%d).\n",
               time_base.num, time_base.den,
               ctx->inputs[1]->time_base.num,
               ctx->inputs[1]->time_base.den);
        return AVERROR_INVALIDDATA;
    } else if (av_cmp_q(frame_rate, ctx->inputs[1]->frame_rate) != 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Magnitude and Phase framerates differ (%d/%d vs %d/%d).\n",
               frame_rate.num, frame_rate.den,
               ctx->inputs[1]->frame_rate.num,
               ctx->inputs[1]->frame_rate.den);
        return AVERROR_INVALIDDATA;
    }

    s->size = s->orientation == VERTICAL ? height / s->channels : width / s->channels;
    s->xend = s->orientation == VERTICAL ? width : height;

    for (fft_bits = 1; 1 << fft_bits < 2 * s->size; fft_bits++);

    s->win_size = 1 << fft_bits;
    s->nb_freq = 1 << (fft_bits - 1);

    s->fft = av_fft_init(fft_bits, 1);
    if (!s->fft) {
        av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
               "The window size might be too high.\n");
        return AVERROR(EINVAL);
    }
    s->fft_data = av_calloc(s->channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    for (ch = 0; ch < s->channels; ch++) {
        s->fft_data[ch] = av_calloc(s->win_size, sizeof(**s->fft_data));
        if (!s->fft_data[ch])
            return AVERROR(ENOMEM);
    }

    s->buffer = ff_get_audio_buffer(outlink, s->win_size * 2);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    /* pre-calc windowing function */
    s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
    if (s->overlap == 1)
        s->overlap = overlap;
    s->hop_size = (1 - s->overlap) * s->win_size;
    for (factor = 0, i = 0; i < s->win_size; i++) {
        factor += s->window_func_lut[i] * s->window_func_lut[i];
    }
    s->factor = (factor / s->win_size) / FFMAX(1 / (1 - s->overlap) - 1, 1);

    return 0;
}
Esempio n. 14
0
static int fourxm_read_header(AVFormatContext *s,
                              AVFormatParameters *ap)
{
    AVIOContext *pb = s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int header_size;
    FourxmDemuxContext *fourxm = s->priv_data;
    unsigned char *header;
    int i, ret;
    AVStream *st;

    fourxm->track_count = 0;
    fourxm->tracks = NULL;
    fourxm->fps = 1.0;

    /* skip the first 3 32-bit numbers */
    avio_skip(pb, 12);

    /* check for LIST-HEAD */
    GET_LIST_HEADER();
    header_size = size - 4;
    if (fourcc_tag != HEAD_TAG || header_size < 0)
        return AVERROR_INVALIDDATA;

    /* allocate space for the header and load the whole thing */
    header = av_malloc(header_size);
    if (!header)
        return AVERROR(ENOMEM);
    if (avio_read(pb, header, header_size) != header_size){
        av_free(header);
        return AVERROR(EIO);
    }

    /* take the lazy approach and search for any and all vtrk and strk chunks */
    for (i = 0; i < header_size - 8; i++) {
        fourcc_tag = AV_RL32(&header[i]);
        size = AV_RL32(&header[i + 4]);

        if (fourcc_tag == std__TAG) {
            fourxm->fps = av_int2flt(AV_RL32(&header[i + 12]));
        } else if (fourcc_tag == vtrk_TAG) {
            /* check that there is enough data */
            if (size != vtrk_SIZE) {
                ret= AVERROR_INVALIDDATA;
                goto fail;
            }
            fourxm->width  = AV_RL32(&header[i + 36]);
            fourxm->height = AV_RL32(&header[i + 40]);

            /* allocate a new AVStream */
            st = av_new_stream(s, 0);
            if (!st){
                ret= AVERROR(ENOMEM);
                goto fail;
            }
            av_set_pts_info(st, 60, 1, fourxm->fps);

            fourxm->video_stream_index = st->index;

            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
            st->codec->codec_id = CODEC_ID_4XM;
            st->codec->extradata_size = 4;
            st->codec->extradata = av_malloc(4);
            AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16]));
            st->codec->width  = fourxm->width;
            st->codec->height = fourxm->height;

            i += 8 + size;
        } else if (fourcc_tag == strk_TAG) {
            int current_track;
            /* check that there is enough data */
            if (size != strk_SIZE) {
                ret= AVERROR_INVALIDDATA;
                goto fail;
            }
            current_track = AV_RL32(&header[i + 8]);
            if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){
                av_log(s, AV_LOG_ERROR, "current_track too large\n");
                ret= -1;
                goto fail;
            }
            if (current_track + 1 > fourxm->track_count) {
                fourxm->tracks = av_realloc_f(fourxm->tracks,
                                              sizeof(AudioTrack),
                                              current_track + 1);
                if (!fourxm->tracks) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                memset(&fourxm->tracks[fourxm->track_count], 0,
                       sizeof(AudioTrack) * (current_track + 1 - fourxm->track_count));
                fourxm->track_count = current_track + 1;
            }
            fourxm->tracks[current_track].adpcm       = AV_RL32(&header[i + 12]);
            fourxm->tracks[current_track].channels    = AV_RL32(&header[i + 36]);
            fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]);
            fourxm->tracks[current_track].bits        = AV_RL32(&header[i + 44]);
            fourxm->tracks[current_track].audio_pts   = 0;
            if(   fourxm->tracks[current_track].channels    <= 0
               || fourxm->tracks[current_track].sample_rate <= 0
               || fourxm->tracks[current_track].bits        <  0){
                av_log(s, AV_LOG_ERROR, "audio header invalid\n");
                ret= -1;
                goto fail;
            }
            i += 8 + size;

            /* allocate a new AVStream */
            st = av_new_stream(s, current_track);
            if (!st){
                ret= AVERROR(ENOMEM);
                goto fail;
            }

            av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);

            fourxm->tracks[current_track].stream_index = st->index;

            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            st->codec->codec_tag = 0;
            st->codec->channels              = fourxm->tracks[current_track].channels;
            st->codec->sample_rate           = fourxm->tracks[current_track].sample_rate;
            st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits;
            st->codec->bit_rate              = st->codec->channels * st->codec->sample_rate *
                st->codec->bits_per_coded_sample;
            st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
            if (fourxm->tracks[current_track].adpcm){
                st->codec->codec_id = CODEC_ID_ADPCM_4XM;
            }else if (st->codec->bits_per_coded_sample == 8){
                st->codec->codec_id = CODEC_ID_PCM_U8;
            }else
                st->codec->codec_id = CODEC_ID_PCM_S16LE;
        }
    }

    /* skip over the LIST-MOVI chunk (which is where the stream should be */
    GET_LIST_HEADER();
    if (fourcc_tag != MOVI_TAG){
        ret= AVERROR_INVALIDDATA;
        goto fail;
    }

    av_free(header);
    /* initialize context members */
    fourxm->video_pts = -1;  /* first frame will push to 0 */

    return 0;
fail:
    av_freep(&fourxm->tracks);
    av_free(header);
    return ret;
}
Esempio n. 15
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = ctx->inputs[0];
    ShowFreqsContext *s = ctx->priv;
    float overlap;
    int i;

    s->nb_freq = 1 << (s->fft_bits - 1);
    s->win_size = s->nb_freq << 1;
    av_audio_fifo_free(s->fifo);
    av_fft_end(s->fft);
    s->fft = av_fft_init(s->fft_bits, 0);
    if (!s->fft) {
        av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
               "The window size might be too high.\n");
        return AVERROR(ENOMEM);
    }

    /* FFT buffers: x2 for each (display) channel buffer.
     * Note: we use free and malloc instead of a realloc-like function to
     * make sure the buffer is aligned in memory for the FFT functions. */
    for (i = 0; i < s->nb_channels; i++) {
        av_freep(&s->fft_data[i]);
        av_freep(&s->avg_data[i]);
    }
    av_freep(&s->fft_data);
    av_freep(&s->avg_data);
    s->nb_channels = inlink->channels;

    s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    for (i = 0; i < s->nb_channels; i++) {
        s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
        s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
        if (!s->fft_data[i] || !s->avg_data[i])
            return AVERROR(ENOMEM);
    }

    /* pre-calc windowing function */
    s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
    if (s->overlap == 1.)
        s->overlap = overlap;
    s->skip_samples = (1. - s->overlap) * s->win_size;
    if (s->skip_samples < 1) {
        av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
        return AVERROR(EINVAL);
    }

    for (s->scale = 0, i = 0; i < s->win_size; i++) {
        s->scale += s->window_func_lut[i] * s->window_func_lut[i];
    }

    outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
    outlink->sample_aspect_ratio = (AVRational){1,1};
    outlink->w = s->w;
    outlink->h = s->h;

    s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
    if (!s->fifo)
        return AVERROR(ENOMEM);
    return 0;
}
Esempio n. 16
0
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char tag[5];
    unsigned int flags=0;
    const int stream_index= pkt->stream_index;
    AVIStream *avist= s->streams[stream_index]->priv_data;
    AVCodecContext *enc= s->streams[stream_index]->codec;
    int size= pkt->size;

    av_dlog(s, "dts:%s packet_count:%d stream_index:%d\n", av_ts2str(pkt->dts), avist->packet_count, stream_index);
    while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count && enc->codec_id != AV_CODEC_ID_XSUB && avist->packet_count){
        AVPacket empty_packet;

        if(pkt->dts - avist->packet_count > 60000){
            av_log(s, AV_LOG_ERROR, "Too large number of skipped frames %"PRId64" > 60000\n", pkt->dts - avist->packet_count);
            return AVERROR(EINVAL);
        }

        av_init_packet(&empty_packet);
        empty_packet.size= 0;
        empty_packet.data= NULL;
        empty_packet.stream_index= stream_index;
        avi_write_packet(s, &empty_packet);
        av_dlog(s, "dup dts:%s packet_count:%d\n", av_ts2str(pkt->dts), avist->packet_count);
    }
    avist->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (pb->seekable &&
        (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {

        avi_write_ix(s);
        ff_end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        ff_end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(tag, stream_index, enc->codec_type);
    if(pkt->flags&AV_PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
       avist->audio_strm_length += size;
    }

    if (s->pb->seekable) {
        AVIIndex* idx = &avist->indexes;
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            idx->cluster = av_realloc_f(idx->cluster, sizeof(void*), cl+1);
            if (!idx->cluster) {
                idx->ents_allocated = 0;
                idx->entry = 0;
                return AVERROR(ENOMEM);
            }
            idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return AVERROR(ENOMEM);
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos = avio_tell(pb) - avi->movi_list;
        idx->cluster[cl][id].len = size;
        idx->entry++;
    }

    avio_write(pb, tag, 4);
    avio_wl32(pb, size);
    avio_write(pb, pkt->data, size);
    if (size & 1)
        avio_w8(pb, 0);

    return 0;
}
Esempio n. 17
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    VmncContext * const c = avctx->priv_data;
    uint8_t *outptr;
    const uint8_t *src = buf;
    int dx, dy, w, h, depth, enc, chunks, res, size_left, ret;
    AVFrame *frame = c->frame;

    if ((ret = ff_reget_buffer(avctx, frame)) < 0)
        return ret;

    frame->key_frame = 0;
    frame->pict_type = AV_PICTURE_TYPE_P;

    //restore screen after cursor
    if(c->screendta) {
        int i;
        w = c->cur_w;
        if(c->width < c->cur_x + w) w = c->width - c->cur_x;
        h = c->cur_h;
        if(c->height < c->cur_y + h) h = c->height - c->cur_y;
        dx = c->cur_x;
        if(dx < 0) {
            w += dx;
            dx = 0;
        }
        dy = c->cur_y;
        if(dy < 0) {
            h += dy;
            dy = 0;
        }
        if((w > 0) && (h > 0)) {
            outptr = frame->data[0] + dx * c->bpp2 + dy * frame->linesize[0];
            for(i = 0; i < h; i++) {
                memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, w * c->bpp2);
                outptr += frame->linesize[0];
            }
        }
    }
    src += 2;
    chunks = AV_RB16(src); src += 2;
    while(chunks--) {
        if(buf_size - (src - buf) < 12) {
            av_log(avctx, AV_LOG_ERROR, "Premature end of data!\n");
            return -1;
        }
        dx = AV_RB16(src); src += 2;
        dy = AV_RB16(src); src += 2;
        w  = AV_RB16(src); src += 2;
        h  = AV_RB16(src); src += 2;
        enc = AV_RB32(src); src += 4;
        outptr = frame->data[0] + dx * c->bpp2 + dy * frame->linesize[0];
        size_left = buf_size - (src - buf);
        switch(enc) {
        case MAGIC_WMVd: // cursor
            if (w*(int64_t)h*c->bpp2 > INT_MAX/2 - 2) {
                av_log(avctx, AV_LOG_ERROR, "dimensions too large\n");
                return AVERROR_INVALIDDATA;
            }
            if(size_left < 2 + w * h * c->bpp2 * 2) {
                av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", 2 + w * h * c->bpp2 * 2, size_left);
                return -1;
            }
            src += 2;
            c->cur_w = w;
            c->cur_h = h;
            c->cur_hx = dx;
            c->cur_hy = dy;
            if((c->cur_hx > c->cur_w) || (c->cur_hy > c->cur_h)) {
                av_log(avctx, AV_LOG_ERROR, "Cursor hot spot is not in image: %ix%i of %ix%i cursor size\n", c->cur_hx, c->cur_hy, c->cur_w, c->cur_h);
                c->cur_hx = c->cur_hy = 0;
            }
            c->curbits = av_realloc_f(c->curbits, c->cur_w * c->cur_h, c->bpp2);
            c->curmask = av_realloc_f(c->curmask, c->cur_w * c->cur_h, c->bpp2);
            c->screendta = av_realloc_f(c->screendta, c->cur_w * c->cur_h, c->bpp2);
            if (!c->curbits || !c->curmask || !c->screendta)
                return AVERROR(ENOMEM);
            load_cursor(c, src);
            src += w * h * c->bpp2 * 2;
            break;
        case MAGIC_WMVe: // unknown
            src += 2;
            break;
        case MAGIC_WMVf: // update cursor position
            c->cur_x = dx - c->cur_hx;
            c->cur_y = dy - c->cur_hy;
            break;
        case MAGIC_WMVg: // unknown
            src += 10;
            break;
        case MAGIC_WMVh: // unknown
            src += 4;
            break;
        case MAGIC_WMVi: // ServerInitialization struct
            frame->key_frame = 1;
            frame->pict_type = AV_PICTURE_TYPE_I;
            depth = *src++;
            if(depth != c->bpp) {
                av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth);
            }
            src++;
            c->bigendian = *src++;
            if(c->bigendian & (~1)) {
                av_log(avctx, AV_LOG_INFO, "Invalid header: bigendian flag = %i\n", c->bigendian);
                return -1;
            }
            //skip the rest of pixel format data
            src += 13;
            break;
        case MAGIC_WMVj: // unknown
            src += 2;
            break;
        case 0x00000000: // raw rectangle data
            if((dx + w > c->width) || (dy + h > c->height)) {
                av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
                return -1;
            }
            if(size_left < w * h * c->bpp2) {
                av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", w * h * c->bpp2, size_left);
                return -1;
            }
            paint_raw(outptr, w, h, src, c->bpp2, c->bigendian, frame->linesize[0]);
            src += w * h * c->bpp2;
            break;
        case 0x00000005: // HexTile encoded rectangle
            if((dx + w > c->width) || (dy + h > c->height)) {
                av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
                return -1;
            }
            res = decode_hextile(c, outptr, src, size_left, w, h, frame->linesize[0]);
            if(res < 0)
                return -1;
            src += res;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported block type 0x%08X\n", enc);
            chunks = 0; // leave chunks decoding loop
        }
    }
    if(c->screendta){
        int i;
        //save screen data before painting cursor
        w = c->cur_w;
        if(c->width < c->cur_x + w) w = c->width - c->cur_x;
        h = c->cur_h;
        if(c->height < c->cur_y + h) h = c->height - c->cur_y;
        dx = c->cur_x;
        if(dx < 0) {
            w += dx;
            dx = 0;
        }
        dy = c->cur_y;
        if(dy < 0) {
            h += dy;
            dy = 0;
        }
        if((w > 0) && (h > 0)) {
            outptr = frame->data[0] + dx * c->bpp2 + dy * frame->linesize[0];
            for(i = 0; i < h; i++) {
                memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, w * c->bpp2);
                outptr += frame->linesize[0];
            }
            outptr = frame->data[0];
            put_cursor(outptr, frame->linesize[0], c, c->cur_x, c->cur_y);
        }
    }
    *got_frame      = 1;
    if ((ret = av_frame_ref(data, frame)) < 0)
        return ret;

    /* always report that the buffer was completely consumed */
    return buf_size;
}
Esempio n. 18
0
STDMETHODIMP CDecAvcodec::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtStartIn, REFERENCE_TIME rtStopIn, BOOL bSyncPoint, BOOL bDiscontinuity)
{
  CheckPointer(m_pAVCtx, E_UNEXPECTED);

  int     got_picture = 0;
  int     used_bytes  = 0;
  BOOL    bFlush = (buffer == nullptr);
  BOOL    bEndOfSequence = FALSE;

  AVPacket avpkt;
  av_init_packet(&avpkt);

  if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
    if (!m_bFFReordering) {
      m_tcThreadBuffer[m_CurrentThread].rtStart = rtStartIn;
      m_tcThreadBuffer[m_CurrentThread].rtStop  = rtStopIn;
    }

    m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
  } else if (m_bBFrameDelay) {
    m_tcBFrameDelay[m_nBFramePos].rtStart = rtStartIn;
    m_tcBFrameDelay[m_nBFramePos].rtStop = rtStopIn;
    m_nBFramePos = !m_nBFramePos;
  }

  uint8_t *pDataBuffer = nullptr;
  if (!bFlush && buflen > 0) {
    if (!m_bInputPadded && (!(m_pAVCtx->active_thread_type & FF_THREAD_FRAME) || m_pParser)) {
      // Copy bitstream into temporary buffer to ensure overread protection
      // Verify buffer size
      if (buflen > m_nFFBufferSize) {
        m_nFFBufferSize	= buflen;
        m_pFFBuffer = (BYTE *)av_realloc_f(m_pFFBuffer, m_nFFBufferSize + FF_INPUT_BUFFER_PADDING_SIZE, 1);
        if (!m_pFFBuffer) {
          m_nFFBufferSize = 0;
          return E_OUTOFMEMORY;
        }
      }
      
      memcpy(m_pFFBuffer, buffer, buflen);
      memset(m_pFFBuffer+buflen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
      pDataBuffer = m_pFFBuffer;
    } else {
      pDataBuffer = (uint8_t *)buffer;
    }

    if (m_nCodecId == AV_CODEC_ID_VP8 && m_bWaitingForKeyFrame) {
      if (!(pDataBuffer[0] & 1)) {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found VP8 key-frame, resuming decoding"));
        m_bWaitingForKeyFrame = FALSE;
      } else {
        return S_OK;
      }
    }
  }

  while (buflen > 0 || bFlush) {
    REFERENCE_TIME rtStart = rtStartIn, rtStop = rtStopIn;

    if (!bFlush) {
      avpkt.data = pDataBuffer;
      avpkt.size = buflen;
      avpkt.pts = rtStartIn;
      if (rtStartIn != AV_NOPTS_VALUE && rtStopIn != AV_NOPTS_VALUE)
        avpkt.duration = (int)(rtStopIn - rtStartIn);
      else
        avpkt.duration = 0;
      avpkt.flags = AV_PKT_FLAG_KEY;

      if (m_bHasPalette) {
        m_bHasPalette = FALSE;
        uint32_t *pal = (uint32_t *)av_packet_new_side_data(&avpkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
        int pal_size = FFMIN((1 << m_pAVCtx->bits_per_coded_sample) << 2, m_pAVCtx->extradata_size);
        uint8_t *pal_src = m_pAVCtx->extradata + m_pAVCtx->extradata_size - pal_size;

        for (int i = 0; i < pal_size/4; i++)
          pal[i] = 0xFF<<24 | AV_RL32(pal_src+4*i);
      }
    } else {
      avpkt.data = nullptr;
      avpkt.size = 0;
    }

    // Parse the data if a parser is present
    // This is mandatory for MPEG-1/2
    if (m_pParser) {
      BYTE *pOut = nullptr;
      int pOut_size = 0;

      used_bytes = av_parser_parse2(m_pParser, m_pAVCtx, &pOut, &pOut_size, avpkt.data, avpkt.size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);

      if (used_bytes == 0 && pOut_size == 0 && !bFlush) {
        DbgLog((LOG_TRACE, 50, L"::Decode() - could not process buffer, starving?"));
        break;
      } else if (used_bytes > 0) {
        buflen -= used_bytes;
        pDataBuffer += used_bytes;
      }

      // Update start time cache
      // If more data was read then output, update the cache (incomplete frame)
      // If output is bigger, a frame was completed, update the actual rtStart with the cached value, and then overwrite the cache
      if (used_bytes > pOut_size) {
        if (rtStartIn != AV_NOPTS_VALUE)
          m_rtStartCache = rtStartIn;
      } else if (used_bytes == pOut_size || ((used_bytes + 9) == pOut_size)) {
        // Why +9 above?
        // Well, apparently there are some broken MKV muxers that like to mux the MPEG-2 PICTURE_START_CODE block (which is 9 bytes) in the package with the previous frame
        // This would cause the frame timestamps to be delayed by one frame exactly, and cause timestamp reordering to go wrong.
        // So instead of failing on those samples, lets just assume that 9 bytes are that case exactly.
        m_rtStartCache = rtStartIn = AV_NOPTS_VALUE;
      } else if (pOut_size > used_bytes) {
        rtStart = m_rtStartCache;
        m_rtStartCache = rtStartIn;
        // The value was used once, don't use it for multiple frames, that ends up in weird timings
        rtStartIn = AV_NOPTS_VALUE;
      }

      if (pOut_size > 0 || bFlush) {

        if (pOut && pOut_size > 0) {
          if (pOut_size > m_nFFBufferSize2) {
            m_nFFBufferSize2	= pOut_size;
            m_pFFBuffer2 = (BYTE *)av_realloc_f(m_pFFBuffer2, m_nFFBufferSize2 + FF_INPUT_BUFFER_PADDING_SIZE, 1);
            if (!m_pFFBuffer2) {
              m_nFFBufferSize2 = 0;
              return E_OUTOFMEMORY;
            }
          }
          memcpy(m_pFFBuffer2, pOut, pOut_size);
          memset(m_pFFBuffer2+pOut_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

          avpkt.data = m_pFFBuffer2;
          avpkt.size = pOut_size;
          avpkt.pts = rtStart;
          avpkt.duration = 0;

          const uint8_t *eosmarker = CheckForEndOfSequence(m_nCodecId, avpkt.data, avpkt.size, &m_MpegParserState);
          if (eosmarker) {
            bEndOfSequence = TRUE;
          }
        } else {
          avpkt.data = nullptr;
          avpkt.size = 0;
        }

        int ret2 = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
        if (ret2 < 0) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - decoding failed despite successfull parsing"));
          got_picture = 0;
        }
      } else {
        got_picture = 0;
      }
    } else {
      used_bytes = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
      buflen = 0;
    }

    if (FAILED(PostDecode())) {
      av_frame_unref(m_pFrame);
      return E_FAIL;
    }

    // Decoding of this frame failed ... oh well!
    if (used_bytes < 0) {
      av_frame_unref(m_pFrame);
      return S_OK;
    }

    // Judge frame usability
    // This determines if a frame is artifact free and can be delivered.
    if (m_bResumeAtKeyFrame) {
      if (m_bWaitingForKeyFrame && got_picture) {
        if (m_pFrame->key_frame) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - Found Key-Frame, resuming decoding at %I64d", m_pFrame->pkt_pts));
          m_bWaitingForKeyFrame = FALSE;
        } else {
          got_picture = 0;
        }
      }
    }

    // Handle B-frame delay for frame threading codecs
    if ((m_pAVCtx->active_thread_type & FF_THREAD_FRAME) && m_bBFrameDelay) {
      m_tcBFrameDelay[m_nBFramePos] = m_tcThreadBuffer[m_CurrentThread];
      m_nBFramePos = !m_nBFramePos;
    }

    if (!got_picture || !m_pFrame->data[0]) {
      if (!avpkt.size)
        bFlush = FALSE; // End flushing, no more frames
      av_frame_unref(m_pFrame);
      continue;
    }

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Determine the proper timestamps for the frame, based on different possible flags.
    ///////////////////////////////////////////////////////////////////////////////////////////////
    if (m_bFFReordering) {
      rtStart = m_pFrame->pkt_pts;
      if (m_pFrame->pkt_duration)
        rtStop = m_pFrame->pkt_pts + m_pFrame->pkt_duration;
      else
        rtStop = AV_NOPTS_VALUE;
    } else if (m_bBFrameDelay && m_pAVCtx->has_b_frames) {
      rtStart = m_tcBFrameDelay[m_nBFramePos].rtStart;
      rtStop  = m_tcBFrameDelay[m_nBFramePos].rtStop;
    } else if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
      unsigned index = m_CurrentThread;
      rtStart = m_tcThreadBuffer[index].rtStart;
      rtStop  = m_tcThreadBuffer[index].rtStop;
    }

    if (m_bRVDropBFrameTimings && m_pFrame->pict_type == AV_PICTURE_TYPE_B) {
      rtStart = AV_NOPTS_VALUE;
    }

    if (m_bCalculateStopTime)
      rtStop = AV_NOPTS_VALUE;

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // All required values collected, deliver the frame
    ///////////////////////////////////////////////////////////////////////////////////////////////
    LAVFrame *pOutFrame = nullptr;
    AllocateFrame(&pOutFrame);

    AVRational display_aspect_ratio;
    int64_t num = (int64_t)m_pFrame->sample_aspect_ratio.num * m_pFrame->width;
    int64_t den = (int64_t)m_pFrame->sample_aspect_ratio.den * m_pFrame->height;
    av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, num, den, INT_MAX);

    pOutFrame->width        = m_pFrame->width;
    pOutFrame->height       = m_pFrame->height;
    pOutFrame->aspect_ratio = display_aspect_ratio;
    pOutFrame->repeat       = m_pFrame->repeat_pict;
    pOutFrame->key_frame    = m_pFrame->key_frame;
    pOutFrame->frame_type   = av_get_picture_type_char(m_pFrame->pict_type);
    pOutFrame->ext_format   = GetDXVA2ExtendedFlags(m_pAVCtx, m_pFrame);

    if (m_pFrame->interlaced_frame || (!m_pAVCtx->progressive_sequence && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)))
      m_iInterlaced = 1;
    else if (m_pAVCtx->progressive_sequence)
      m_iInterlaced = 0;

    if ((m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO) && m_pFrame->repeat_pict)
      m_nSoftTelecine = 2;
    else if (m_nSoftTelecine > 0)
      m_nSoftTelecine--;

    // Don't apply aggressive deinterlacing to content that looks soft-telecined, as it would destroy the content
    bool bAggressiveFlag    = (m_iInterlaced == 1 && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) && !m_nSoftTelecine;

    pOutFrame->interlaced   = (m_pFrame->interlaced_frame || bAggressiveFlag || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

    LAVDeintFieldOrder fo   = m_pSettings->GetDeintFieldOrder();
    pOutFrame->tff          = (fo == DeintFieldOrder_Auto) ? m_pFrame->top_field_first : (fo == DeintFieldOrder_TopFieldFirst);

    pOutFrame->rtStart      = rtStart;
    pOutFrame->rtStop       = rtStop;

    PixelFormatMapping map  = getPixFmtMapping((AVPixelFormat)m_pFrame->format);
    pOutFrame->format       = map.lavpixfmt;
    pOutFrame->bpp          = map.bpp;

    if (m_nCodecId == AV_CODEC_ID_MPEG2VIDEO || m_nCodecId == AV_CODEC_ID_MPEG1VIDEO)
      pOutFrame->avgFrameDuration = GetFrameDuration();

    AVFrameSideData * sdHDR = av_frame_get_side_data(m_pFrame, AV_FRAME_DATA_HDR_MASTERING_INFO);
    if (sdHDR) {
      if (sdHDR->size == 24) {
        MediaSideDataHDR * hdr = (MediaSideDataHDR *)AddLAVFrameSideData(pOutFrame, IID_MediaSideDataHDR, sizeof(MediaSideDataHDR));
        if (hdr) {
          CByteParser hdrParser(sdHDR->data, sdHDR->size);
          for (int i = 0; i < 3; i++)
          {
            hdr->display_primaries_x[i] = hdrParser.BitRead(16) * 0.00002;
            hdr->display_primaries_y[i] = hdrParser.BitRead(16) * 0.00002;
          }
          hdr->white_point_x = hdrParser.BitRead(16) * 0.00002;
          hdr->white_point_y = hdrParser.BitRead(16) * 0.00002;
          hdr->max_display_mastering_luminance = hdrParser.BitRead(32) * 0.0001;
          hdr->min_display_mastering_luminance = hdrParser.BitRead(32) * 0.0001;
        }
      }
      else {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found HDR data of an unexpected size (%d)", sdHDR->size));
      }
    }

    if (map.conversion) {
      ConvertPixFmt(m_pFrame, pOutFrame);
    } else {
      AVFrame *pFrameRef = av_frame_alloc();
      av_frame_ref(pFrameRef, m_pFrame);

      for (int i = 0; i < 4; i++) {
        pOutFrame->data[i]   = pFrameRef->data[i];
        pOutFrame->stride[i] = pFrameRef->linesize[i];
      }

      pOutFrame->priv_data = pFrameRef;
      pOutFrame->destruct = lav_avframe_free;

      // Check alignment on rawvideo, which can be off depending on the source file
      if (m_nCodecId == AV_CODEC_ID_RAWVIDEO) {
        for (int i = 0; i < 4; i++) {
          if ((intptr_t)pOutFrame->data[i] % 16u || pOutFrame->stride[i] % 16u) {
            // copy the frame, its not aligned properly and would crash later
            CopyLAVFrameInPlace(pOutFrame);
            break;
          }
        }
      }
    }

    if (bEndOfSequence)
      pOutFrame->flags |= LAV_FRAME_FLAG_END_OF_SEQUENCE;

    if (pOutFrame->format == LAVPixFmt_DXVA2) {
      pOutFrame->data[0] = m_pFrame->data[4];
      HandleDXVA2Frame(pOutFrame);
    } else {
      Deliver(pOutFrame);
    }

    if (bEndOfSequence) {
      bEndOfSequence = FALSE;
      if (pOutFrame->format == LAVPixFmt_DXVA2) {
        HandleDXVA2Frame(m_pCallback->GetFlushFrame());
      } else {
        Deliver(m_pCallback->GetFlushFrame());
      }
    }

    if (bFlush) {
      m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
    }
    av_frame_unref(m_pFrame);
  }

  return S_OK;
}