コード例 #1
0
int real_get_rdt_chunk(rtsp_t *rtsp_session, char **buffer, int rdt_rawdata) {

  int n=1;
  uint8_t header[8];
  rmff_pheader_t ph;
  int size;
  int flags1, flags2;
  int unknown1;
  uint32_t ts;
  static uint32_t prev_ts = -1;
  static int prev_stream_number = -1;

  n=rtsp_read_data(rtsp_session, header, 8);
  if (n<8) return 0;
  if (header[0] != 0x24)
  {
    mp_msg(MSGT_STREAM, MSGL_WARN, "realrtsp: rdt chunk not recognized: got 0x%02x\n",
      header[0]);
    return 0;
  }
  /* header[1] is channel, normally 0, ignored */
  size=(header[2]<<8)+header[3];
  flags1=header[4];
  if ((flags1 & 0xc0) != 0x40)
  {
#ifdef LOG
    printf("got flags1: 0x%02x\n",flags1);
#endif
    if(header[6] == 0x06) { // eof packet
      rtsp_read_data(rtsp_session, header, 7); // Skip the rest of the eof packet
      /* Some files have short auxiliary streams, we must ignore eof packets
       * for these streams to avoid premature eof.
       * Now the code declares eof only if the stream with id == 0 gets eof
       * (old code was: eof on the first eof packet received).
       */
      if(flags1 & 0x7c) // ignore eof for streams with id != 0
        return 0;
      mp_msg(MSGT_STREAM, MSGL_INFO, "realrtsp: Stream EOF detected\n");
      return -1;
    }
    header[0]=header[5];
    header[1]=header[6];
    header[2]=header[7];
    n=rtsp_read_data(rtsp_session, header+3, 5);
    if (n<5) return 0;
#ifdef LOG
    printf("ignoring bytes:\n");
    hexdump(header, 8);
#endif
    n=rtsp_read_data(rtsp_session, header+4, 4);
    if (n<4) return 0;
    flags1=header[4];
    size-=9;
  }
  flags2=header[7];
  // header[5..6] == frame number in stream
  unknown1=(header[5]<<16)+(header[6]<<8)+(header[7]);
  n=rtsp_read_data(rtsp_session, header, 6);
  if (n<6) return 0;
  ts=AV_RB32(header);

#ifdef LOG
  printf("ts: %u, size: %u, flags: 0x%02x, unknown values: 0x%06x 0x%02x 0x%02x\n",
          ts, size, flags1, unknown1, header[4], header[5]);
#endif
  size+=2;

  ph.object_version=0;
  ph.length=size;
  ph.stream_number=(flags1>>1)&0x1f;
  ph.timestamp=ts;
  ph.reserved=0;
  if ((flags2&1) == 0 && (prev_ts != ts || prev_stream_number != ph.stream_number))
  {
    prev_ts = ts;
    prev_stream_number = ph.stream_number;
    ph.flags=2;
  }
  else
    ph.flags=0;
  *buffer = xbuffer_ensure_size(*buffer, 12+size);
  if(rdt_rawdata) {
      if (size < 12)
          return 0;
    n=rtsp_read_data(rtsp_session, *buffer, size-12);
    return (n <= 0) ? 0 : n;
  }
  rmff_dump_pheader(&ph, *buffer);
  if (size < 12)
      return 0;
  size-=12;
  n=rtsp_read_data(rtsp_session, (*buffer)+12, size);

  return (n <= 0) ? 0 : n+12;
}
コード例 #2
0
ファイル: electronicarts.c プロジェクト: Armada651/FFmpeg
static int ea_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    EaDemuxContext *ea = s->priv_data;
    AVIOContext *pb    = s->pb;
    int partial_packet = 0;
    unsigned int chunk_type, chunk_size;
    int ret = 0, packet_read = 0, key = 0;
    int av_uninit(num_samples);

    while (!packet_read || partial_packet) {
        chunk_type = avio_rl32(pb);
        chunk_size = ea->big_endian ? avio_rb32(pb) : avio_rl32(pb);
        if (chunk_size <= 8)
            return AVERROR_INVALIDDATA;
        chunk_size -= 8;

        switch (chunk_type) {
        /* audio data */
        case ISNh_TAG:
            /* header chunk also contains data; skip over the header portion */
            if (chunk_size < 32)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 32);
            chunk_size -= 32;
        case ISNd_TAG:
        case SCDl_TAG:
        case SNDC_TAG:
        case SDEN_TAG:
            if (!ea->audio_codec) {
                avio_skip(pb, chunk_size);
                break;
            } else if (ea->audio_codec == AV_CODEC_ID_PCM_S16LE_PLANAR ||
                       ea->audio_codec == AV_CODEC_ID_MP3) {
                num_samples = avio_rl32(pb);
                avio_skip(pb, 8);
                chunk_size -= 12;
            }
            if (partial_packet) {
                avpriv_request_sample(s, "video header followed by audio packet");
                av_free_packet(pkt);
                partial_packet = 0;
            }
            ret = av_get_packet(pb, pkt, chunk_size);
            if (ret < 0)
                return ret;
            pkt->stream_index = ea->audio_stream_index;

            switch (ea->audio_codec) {
            case AV_CODEC_ID_ADPCM_EA:
            case AV_CODEC_ID_ADPCM_EA_R1:
            case AV_CODEC_ID_ADPCM_EA_R2:
            case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
                if (pkt->size >= 4)
                    pkt->duration = AV_RL32(pkt->data);
                break;
            case AV_CODEC_ID_ADPCM_EA_R3:
                if (pkt->size >= 4)
                    pkt->duration = AV_RB32(pkt->data);
                break;
            case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
                pkt->duration = ret * 2 / ea->num_channels;
                break;
            case AV_CODEC_ID_PCM_S16LE_PLANAR:
            case AV_CODEC_ID_MP3:
                pkt->duration = num_samples;
                break;
            default:
                pkt->duration = chunk_size / (ea->bytes * ea->num_channels);
            }

            packet_read = 1;
            break;

        /* ending tag */
        case 0:
        case ISNe_TAG:
        case SCEl_TAG:
        case SEND_TAG:
        case SEEN_TAG:
            ret         = AVERROR(EIO);
            packet_read = 1;
            break;

        case MVIh_TAG:
        case kVGT_TAG:
        case pQGT_TAG:
        case TGQs_TAG:
        case MADk_TAG:
            key = AV_PKT_FLAG_KEY;
        case MVIf_TAG:
        case fVGT_TAG:
        case MADm_TAG:
        case MADe_TAG:
            avio_seek(pb, -8, SEEK_CUR);    // include chunk preamble
            chunk_size += 8;
            goto get_video_packet;

        case mTCD_TAG:
            avio_skip(pb, 8);               // skip ea DCT header
            chunk_size -= 8;
            goto get_video_packet;

        case MV0K_TAG:
        case MPCh_TAG:
        case pIQT_TAG:
            key = AV_PKT_FLAG_KEY;
        case MV0F_TAG:
get_video_packet:
            if (partial_packet) {
                ret = av_append_packet(pb, pkt, chunk_size);
            } else
                ret = av_get_packet(pb, pkt, chunk_size);
            if (ret < 0) {
                packet_read = 1;
                break;
            }
            partial_packet = chunk_type == MVIh_TAG;
            pkt->stream_index = ea->video_stream_index;
            pkt->flags       |= key;
            packet_read       = 1;
            break;

        default:
            avio_skip(pb, chunk_size);
            break;
        }
    }

    if (ret < 0 && partial_packet)
        av_free_packet(pkt);
    return ret;
}
コード例 #3
0
ファイル: soxdec.c プロジェクト: komh/kmp
static int sox_probe(AVProbeData *p)
{
    if (AV_RL32(p->buf) == SOX_TAG || AV_RB32(p->buf) == SOX_TAG)
        return AVPROBE_SCORE_MAX;
    return 0;
}
コード例 #4
0
ファイル: LAVFVideoHelper.cpp プロジェクト: cynics/LAVFilters
VIDEOINFOHEADER2 *CLAVFVideoHelper::CreateVIH2(const AVStream* avstream, ULONG *size, std::string container)
{
  int extra = 0;
  BYTE *extradata = NULL;
  BOOL bZeroPad = FALSE;
  if (avstream->codec->codec_id == AV_CODEC_ID_VC1 && avstream->codec->extradata_size) {
    int i = 0;
    for (i = 0; i < (avstream->codec->extradata_size-4); i++) {
      uint32_t code = AV_RB32(avstream->codec->extradata + i);
      if (IS_VC1_MARKER(code))
        break;
    }
    if (i == 0) {
      bZeroPad = TRUE;
    } else if (i > 1) {
      DbgLog((LOG_TRACE, 10, L"CLAVFVideoHelper::CreateVIH2(): VC-1 extradata does not start at position 0/1, but %d", i));
    }
  }

  // Create a VIH that we'll convert
  VIDEOINFOHEADER *vih = CreateVIH(avstream, size, container);
  if (!vih) return NULL;

  if(avstream->codec->extradata_size > 0) {
    extra = avstream->codec->extradata_size;
    //increase extra size by one, because VIH2 requires one 0 byte between header and extra data
    if (bZeroPad) {
      extra++;
    }

    extradata = avstream->codec->extradata;
  }

  VIDEOINFOHEADER2 *vih2 = (VIDEOINFOHEADER2 *)CoTaskMemAlloc(sizeof(VIDEOINFOHEADER2) + extra); 
  if (!vih2) return NULL;
  memset(vih2, 0, sizeof(VIDEOINFOHEADER2));

  vih2->rcSource = vih->rcSource;
  vih2->rcTarget = vih->rcTarget;
  vih2->dwBitRate = vih->dwBitRate;
  vih2->dwBitErrorRate = vih->dwBitErrorRate;
  vih2->AvgTimePerFrame = vih->AvgTimePerFrame;

  // Calculate aspect ratio
  AVRational r = avstream->sample_aspect_ratio;
  AVRational rc = avstream->codec->sample_aspect_ratio;
  int num = vih->bmiHeader.biWidth, den = vih->bmiHeader.biHeight;
  if (r.den > 0 && r.num > 0) {
    av_reduce(&num, &den, (int64_t)r.num * num, (int64_t)r.den * den, 255);
  } else if (rc.den > 0 && rc.num > 0) {
    av_reduce(&num, &den, (int64_t)rc.num * num, (int64_t)rc.den * den, 255);
  } else {
    if (avstream->codec->codec_id == AV_CODEC_ID_RV40) {
      AVDictionaryEntry *w = av_dict_get(avstream->metadata, "rm_width", NULL, 0);
      AVDictionaryEntry *h = av_dict_get(avstream->metadata, "rm_height", NULL, 0);
      if (w && h) {
        num = atoi(w->value);
        den = atoi(h->value);
      }
    }
    av_reduce(&num, &den, num, den, num);
  }
  vih2->dwPictAspectRatioX = num;
  vih2->dwPictAspectRatioY = den;

  memcpy(&vih2->bmiHeader, &vih->bmiHeader, sizeof(BITMAPINFOHEADER));
  vih2->bmiHeader.biSize = sizeof(BITMAPINFOHEADER) + extra;

  vih2->dwInterlaceFlags = 0;
  vih2->dwCopyProtectFlags = 0;
  vih2->dwControlFlags = 0;
  vih2->dwReserved2 = 0;

  if(extra) {
    // The first byte after the infoheader has to be 0 in mpeg-ts
    if (bZeroPad) {
      *((BYTE*)vih2 + sizeof(VIDEOINFOHEADER2)) = 0;
      // after that, the extradata .. size reduced by one again
      memcpy((BYTE*)vih2 + sizeof(VIDEOINFOHEADER2) + 1, extradata, extra - 1);
    } else {
      memcpy((BYTE*)vih2 + sizeof(VIDEOINFOHEADER2), extradata, extra);
    }
  }

  // Free the VIH that we converted
  CoTaskMemFree(vih);

  *size = sizeof(VIDEOINFOHEADER2) + extra;
  return vih2;
}
コード例 #5
0
/* aiff input */
static int aiff_read_header(AVFormatContext *s)
{
    int ret, size, filesize;
    int64_t offset = 0, position;
    uint32_t tag;
    unsigned version = AIFF_C_VERSION1;
    AVIOContext *pb = s->pb;
    AVStream * st;
    AIFFInputContext *aiff = s->priv_data;
    ID3v2ExtraMeta *id3v2_extra_meta = NULL;

    /* check FORM header */
    filesize = get_tag(pb, &tag);
    if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
        return AVERROR_INVALIDDATA;

    /* AIFF data type */
    tag = avio_rl32(pb);
    if (tag == MKTAG('A', 'I', 'F', 'F'))       /* Got an AIFF file */
        version = AIFF;
    else if (tag != MKTAG('A', 'I', 'F', 'C'))  /* An AIFF-C file then */
        return AVERROR_INVALIDDATA;

    filesize -= 4;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    while (filesize > 0) {
        /* parse different chunks */
        size = get_tag(pb, &tag);
        if (size < 0)
            return size;

        filesize -= size + 8;

        switch (tag) {
        case MKTAG('C', 'O', 'M', 'M'):     /* Common chunk */
            /* Then for the complete header info */
            st->nb_frames = get_aiff_header(s, size, version);
            if (st->nb_frames < 0)
                return st->nb_frames;
            if (offset > 0) // COMM is after SSND
                goto got_sound;
            break;
        case MKTAG('I', 'D', '3', ' '):
            position = avio_tell(pb);
            ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
            if (id3v2_extra_meta)
                if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) {
                    ff_id3v2_free_extra_meta(&id3v2_extra_meta);
                    return ret;
                }
            ff_id3v2_free_extra_meta(&id3v2_extra_meta);
            if (position + size > avio_tell(pb))
                avio_skip(pb, position + size - avio_tell(pb));
            break;
        case MKTAG('F', 'V', 'E', 'R'):     /* Version chunk */
            version = avio_rb32(pb);
            break;
        case MKTAG('N', 'A', 'M', 'E'):     /* Sample name chunk */
            get_meta(s, "title"    , size);
            break;
        case MKTAG('A', 'U', 'T', 'H'):     /* Author chunk */
            get_meta(s, "author"   , size);
            break;
        case MKTAG('(', 'c', ')', ' '):     /* Copyright chunk */
            get_meta(s, "copyright", size);
            break;
        case MKTAG('A', 'N', 'N', 'O'):     /* Annotation chunk */
            get_meta(s, "comment"  , size);
            break;
        case MKTAG('S', 'S', 'N', 'D'):     /* Sampled sound chunk */
            aiff->data_end = avio_tell(pb) + size;
            offset = avio_rb32(pb);      /* Offset of sound data */
            avio_rb32(pb);               /* BlockSize... don't care */
            offset += avio_tell(pb);    /* Compute absolute data offset */
            if (st->codec->block_align && !pb->seekable)    /* Assume COMM already parsed */
                goto got_sound;
            if (!pb->seekable) {
                av_log(s, AV_LOG_ERROR, "file is not seekable\n");
                return -1;
            }
            avio_skip(pb, size - 8);
            break;
        case MKTAG('w', 'a', 'v', 'e'):
            if ((uint64_t)size > (1<<30))
                return -1;
            st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            st->codec->extradata_size = size;
            avio_read(pb, st->codec->extradata, size);
            if (st->codec->codec_id == AV_CODEC_ID_QDM2 && size>=12*4 && !st->codec->block_align) {
                st->codec->block_align = AV_RB32(st->codec->extradata+11*4);
                aiff->block_duration = AV_RB32(st->codec->extradata+9*4);
            } else if (st->codec->codec_id == AV_CODEC_ID_QCELP) {
                char rate = 0;
                if (size >= 25)
                    rate = st->codec->extradata[24];
                switch (rate) {
                case 'H': // RATE_HALF
                    st->codec->block_align = 17;
                    break;
                case 'F': // RATE_FULL
                default:
                    st->codec->block_align = 35;
                }
                aiff->block_duration = 160;
                st->codec->bit_rate = st->codec->sample_rate * (st->codec->block_align << 3) /
                                      aiff->block_duration;
            }
            break;
        case MKTAG('C','H','A','N'):
            if(ff_mov_read_chan(s, pb, st, size) < 0)
                return AVERROR_INVALIDDATA;
            break;
        default: /* Jump */
            if (size & 1)   /* Always even aligned */
                size++;
            avio_skip(pb, size);
        }
    }

got_sound:
    if (!st->codec->block_align) {
        av_log(s, AV_LOG_ERROR, "could not find COMM tag or invalid block_align value\n");
        return -1;
    }

    /* Now positioned, get the sound data start and end */
    avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
    st->start_time = 0;
    st->duration = st->nb_frames * aiff->block_duration;

    /* Position the stream at the first block */
    avio_seek(pb, offset, SEEK_SET);

    return 0;
}
コード例 #6
0
ファイル: flacdec.c プロジェクト: GenomeXP/mpc-hc
static int flac_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    FLACContext *s = avctx->priv_data;
    int bytes_read = 0;
    int ret;

    *got_frame_ptr = 0;

    if (s->max_framesize == 0) {
        s->max_framesize =
            ff_flac_get_max_frame_size(s->max_blocksize ? s->max_blocksize : FLAC_MAX_BLOCKSIZE,
                                       FLAC_MAX_CHANNELS, 32);
    }

    if (buf_size > 5 && !memcmp(buf, "\177FLAC", 5)) {
        av_log(s->avctx, AV_LOG_DEBUG, "skiping flac header packet 1\n");
        return buf_size;
    }

    if (buf_size > 0 && (*buf & 0x7F) == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
        av_log(s->avctx, AV_LOG_DEBUG, "skiping vorbis comment\n");
        return buf_size;
    }

    /* check that there is at least the smallest decodable amount of data.
       this amount corresponds to the smallest valid FLAC frame possible.
       FF F8 69 02 00 00 9A 00 00 34 46 */
    if (buf_size < FLAC_MIN_FRAME_SIZE)
        return buf_size;

    /* check for inline header */
    if (AV_RB32(buf) == MKBETAG('f','L','a','C')) {
        if (!s->got_streaminfo && parse_streaminfo(s, buf, buf_size)) {
            av_log(s->avctx, AV_LOG_ERROR, "invalid header\n");
            return -1;
        }
        return get_metadata_size(buf, buf_size);
    }

    /* decode frame */
    init_get_bits(&s->gb, buf, buf_size*8);
    if (decode_frame(s) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "decode_frame() failed\n");
        return -1;
    }
    bytes_read = (get_bits_count(&s->gb)+7)/8;

    /* get output buffer */
    frame->nb_samples = s->blocksize;
    if ((ret = ff_get_buffer(avctx, frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded, s->channels,
                                   s->blocksize, s->sample_shift);

    if (bytes_read > buf_size) {
        av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
        return -1;
    }
    if (bytes_read < buf_size) {
        av_log(s->avctx, AV_LOG_DEBUG, "underread: %d orig size: %d\n",
               buf_size - bytes_read, buf_size);
    }

    *got_frame_ptr = 1;

    return bytes_read;
}
コード例 #7
0
ファイル: wmv9.cpp プロジェクト: betaking/LAVFilters
static AVPictureType parse_picture_type(const uint8_t *buf, int buflen, CVC1HeaderParser *vc1Header)
{
  AVPictureType pictype = AV_PICTURE_TYPE_NONE;
  int skipped = 0;
  const BYTE *framestart = buf;
  if (IS_MARKER(AV_RB32(buf))) {
    framestart = NULL;
    const BYTE *start, *end, *next;
    next = buf;
    for (start = buf, end = buf + buflen; next < end; start = next) {
      if (AV_RB32(start) == VC1_CODE_FRAME) {
        framestart = start + 4;
        break;
      }
      next = find_next_marker(start + 4, end);
    }
  }
  if (framestart) {
    GetBitContext gb;
    init_get_bits(&gb, framestart, (buflen - (framestart-buf))*8);
    if (vc1Header->hdr.profile == PROFILE_ADVANCED) {
      int fcm = PROGRESSIVE;
      if (vc1Header->hdr.interlaced)
        fcm = decode012(&gb);
      if (fcm == ILACE_FIELD) {
        int fptype = get_bits(&gb, 3);
        pictype = (fptype & 2) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
        if (fptype & 4) // B-picture
          pictype = (fptype & 2) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
      } else {
        switch (get_unary(&gb, 0, 4)) {
        case 0:
            pictype = AV_PICTURE_TYPE_P;
            break;
        case 1:
            pictype = AV_PICTURE_TYPE_B;
            break;
        case 2:
            pictype = AV_PICTURE_TYPE_I;
            break;
        case 3:
            pictype = AV_PICTURE_TYPE_BI;
            break;
        case 4:
            pictype = AV_PICTURE_TYPE_P; // skipped pic
            skipped = 1;
            break;
        }
      }
    } else {
      if (vc1Header->hdr.finterp)
        skip_bits1(&gb);
      skip_bits(&gb, 2); // framecnt
      if (vc1Header->hdr.rangered)
        skip_bits1(&gb);
      int pic = get_bits1(&gb);
      if (vc1Header->hdr.bframes) {
        if (!pic) {
          if (get_bits1(&gb)) {
            pictype = AV_PICTURE_TYPE_I;
          } else {
            pictype = AV_PICTURE_TYPE_B;
          }
        } else {
          pictype = AV_PICTURE_TYPE_P;
        }
      } else {
        pictype = pic ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
      }
    }
  }
  return pictype;
}
コード例 #8
0
ファイル: sunrast.c プロジェクト: simock85/libav
static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
                                int *data_size, AVPacket *avpkt) {
    const uint8_t *buf       = avpkt->data;
    const uint8_t *buf_end   = avpkt->data + avpkt->size;
    SUNRASTContext * const s = avctx->priv_data;
    AVFrame *picture         = data;
    AVFrame * const p        = &s->picture;
    unsigned int w, h, depth, type, maptype, maplength, stride, x, y, len, alen;
    uint8_t *ptr;
    const uint8_t *bufstart = buf;

    if (avpkt->size < 32)
        return AVERROR_INVALIDDATA;

    if (AV_RB32(buf) != RAS_MAGIC) {
        av_log(avctx, AV_LOG_ERROR, "this is not sunras encoded data\n");
        return -1;
    }

    w         = AV_RB32(buf + 4);
    h         = AV_RB32(buf + 8);
    depth     = AV_RB32(buf + 12);
    type      = AV_RB32(buf + 20);
    maptype   = AV_RB32(buf + 24);
    maplength = AV_RB32(buf + 28);
    buf      += 32;

    if (type == RT_FORMAT_TIFF || type == RT_FORMAT_IFF || type == RT_EXPERIMENTAL) {
        av_log_ask_for_sample(avctx, "unsupported (compression) type\n");
        return AVERROR_PATCHWELCOME;
    }
    if (type > RT_FORMAT_IFF) {
        av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n");
        return -1;
    }
    if (av_image_check_size(w, h, 0, avctx)) {
        av_log(avctx, AV_LOG_ERROR, "invalid image size\n");
        return -1;
    }
    if (maptype & ~1) {
        av_log(avctx, AV_LOG_ERROR, "invalid colormap type\n");
        return -1;
    }


    switch (depth) {
        case 1:
            avctx->pix_fmt = PIX_FMT_MONOWHITE;
            break;
        case 8:
            avctx->pix_fmt = PIX_FMT_PAL8;
            break;
        case 24:
            avctx->pix_fmt = (type == RT_FORMAT_RGB) ? PIX_FMT_RGB24 : PIX_FMT_BGR24;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "invalid depth\n");
            return -1;
    }

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);
    if (avctx->get_buffer(avctx, p) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    p->pict_type = AV_PICTURE_TYPE_I;

    if (buf_end - buf < maplength)
        return AVERROR_INVALIDDATA;

    if (depth != 8 && maplength) {
        av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n");

    } else if (depth == 8) {
        unsigned int len = maplength / 3;

        if (!maplength) {
            av_log(avctx, AV_LOG_ERROR, "colormap expected\n");
            return -1;
        }
        if (maplength % 3 || maplength > 768) {
            av_log(avctx, AV_LOG_WARNING, "invalid colormap length\n");
            return -1;
        }

        ptr = p->data[1];
        for (x = 0; x < len; x++, ptr += 4)
            *(uint32_t *)ptr = (buf[x] << 16) + (buf[len + x] << 8) + buf[len + len + x];
    }

    buf += maplength;

    ptr    = p->data[0];
    stride = p->linesize[0];

    /* scanlines are aligned on 16 bit boundaries */
    len  = (depth * w + 7) >> 3;
    alen = len + (len & 1);

    if (type == RT_BYTE_ENCODED) {
        int value, run;
        uint8_t *end = ptr + h * stride;

        x = 0;
        while (ptr != end && buf < buf_end) {
            run = 1;
            if (buf_end - buf < 1)
                return AVERROR_INVALIDDATA;

            if ((value = *buf++) == 0x80) {
                run = *buf++ + 1;
                if (run != 1)
                    value = *buf++;
            }
            while (run--) {
                if (x < len)
                    ptr[x] = value;
                if (++x >= alen) {
                    x = 0;
                    ptr += stride;
                    if (ptr == end)
                        break;
                }
            }
        }
    } else {
        for (y = 0; y < h; y++) {
            if (buf_end - buf < len)
                break;
            memcpy(ptr, buf, len);
            ptr += stride;
            buf += alen;
        }
    }

    *picture   = s->picture;
    *data_size = sizeof(AVFrame);

    return buf - bufstart;
}
コード例 #9
0
ファイル: libschroedingerenc.c プロジェクト: rkrishna1/libav
static int libschroedinger_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                        const AVFrame *frame, int *got_packet)
{
    int enc_size = 0;
    SchroEncoderParams *p_schro_params = avctx->priv_data;
    SchroEncoder *encoder = p_schro_params->encoder;
    struct FFSchroEncodedFrame *p_frame_output = NULL;
    int go = 1;
    SchroBuffer *enc_buf;
    int presentation_frame;
    int parse_code;
    int last_frame_in_sequence = 0;
    int pkt_size, ret;

    if (!frame) {
        /* Push end of sequence if not already signalled. */
        if (!p_schro_params->eos_signalled) {
            schro_encoder_end_of_stream(encoder);
            p_schro_params->eos_signalled = 1;
        }
    } else {
        /* Allocate frame data to schro input buffer. */
        SchroFrame *in_frame = libschroedinger_frame_from_data(avctx, frame);
        if (!in_frame)
            return AVERROR(ENOMEM);
        /* Load next frame. */
        schro_encoder_push_frame(encoder, in_frame);
    }

    if (p_schro_params->eos_pulled)
        go = 0;

    /* Now check to see if we have any output from the encoder. */
    while (go) {
        int err;
        SchroStateEnum state;
        state = schro_encoder_wait(encoder);
        switch (state) {
        case SCHRO_STATE_HAVE_BUFFER:
        case SCHRO_STATE_END_OF_STREAM:
            enc_buf = schro_encoder_pull(encoder, &presentation_frame);
            if (enc_buf->length <= 0)
                return AVERROR_BUG;
            parse_code = enc_buf->data[4];

            /* All non-frame data is prepended to actual frame data to
             * be able to set the pts correctly. So we don't write data
             * to the frame output queue until we actually have a frame
             */
            if ((err = av_reallocp(&p_schro_params->enc_buf,
                                   p_schro_params->enc_buf_size +
                                   enc_buf->length)) < 0) {
                p_schro_params->enc_buf_size = 0;
                return err;
            }

            memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size,
                   enc_buf->data, enc_buf->length);
            p_schro_params->enc_buf_size += enc_buf->length;


            if (state == SCHRO_STATE_END_OF_STREAM) {
                p_schro_params->eos_pulled = 1;
                go = 0;
            }

            if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) {
                schro_buffer_unref(enc_buf);
                break;
            }

            /* Create output frame. */
            p_frame_output = av_mallocz(sizeof(FFSchroEncodedFrame));
            if (!p_frame_output)
                return AVERROR(ENOMEM);
            /* Set output data. */
            p_frame_output->size     = p_schro_params->enc_buf_size;
            p_frame_output->p_encbuf = p_schro_params->enc_buf;
            if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) &&
                SCHRO_PARSE_CODE_IS_REFERENCE(parse_code))
                p_frame_output->key_frame = 1;

            /* Parse the coded frame number from the bitstream. Bytes 14
             * through 17 represesent the frame number. */
            p_frame_output->frame_num = AV_RB32(enc_buf->data + 13);

            ff_schro_queue_push_back(&p_schro_params->enc_frame_queue,
                                     p_frame_output);
            p_schro_params->enc_buf_size = 0;
            p_schro_params->enc_buf      = NULL;

            schro_buffer_unref(enc_buf);

            break;

        case SCHRO_STATE_NEED_FRAME:
            go = 0;
            break;

        case SCHRO_STATE_AGAIN:
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown Schro Encoder state\n");
            return -1;
        }
    }

    /* Copy 'next' frame in queue. */

    if (p_schro_params->enc_frame_queue.size == 1 &&
        p_schro_params->eos_pulled)
        last_frame_in_sequence = 1;

    p_frame_output = ff_schro_queue_pop(&p_schro_params->enc_frame_queue);

    if (!p_frame_output)
        return 0;

    pkt_size = p_frame_output->size;
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0)
        pkt_size += p_schro_params->enc_buf_size;
    if ((ret = ff_alloc_packet(pkt, pkt_size)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", pkt_size);
        goto error;
    }

    memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size);
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
    avctx->coded_frame->key_frame = p_frame_output->key_frame;
    avctx->coded_frame->pts = p_frame_output->frame_num;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    /* Use the frame number of the encoded frame as the pts. It is OK to
     * do so since Dirac is a constant frame rate codec. It expects input
     * to be of constant frame rate. */
    pkt->pts = p_frame_output->frame_num;
    pkt->dts = p_schro_params->dts++;
    enc_size = p_frame_output->size;

    /* Append the end of sequence information to the last frame in the
     * sequence. */
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
        memcpy(pkt->data + enc_size, p_schro_params->enc_buf,
               p_schro_params->enc_buf_size);
        enc_size += p_schro_params->enc_buf_size;
        av_freep(&p_schro_params->enc_buf);
        p_schro_params->enc_buf_size = 0;
    }

    if (p_frame_output->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

error:
    /* free frame */
    libschroedinger_free_frame(p_frame_output);
    return ret;
}
コード例 #10
0
ファイル: a64.c プロジェクト: Gemini88/xbmc-1
static int a64_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *avctx = s->streams[0]->codec;
    A64MuxerContext *c = s->priv_data;
    int i, j;
    int ch_chunksize;
    int lifetime;
    int frame_count;
    int charset_size;
    int frame_size;
    int num_frames;

    /* fetch values from extradata */
    switch (avctx->codec->id) {
    case CODEC_ID_A64_MULTI:
    case CODEC_ID_A64_MULTI5:
        if(c->interleaved) {
            /* Write interleaved, means we insert chunks of the future charset before each current frame.
             * Reason: if we load 1 charset + corresponding frames in one block on c64, we need to store
             * them first and then display frame by frame to keep in sync. Thus we would read and write
             * the data for colram from/to ram first and waste too much time. If we interleave and send the
             * charset beforehand, we assemble a new charset chunk by chunk, write current screen data to
             * screen-ram to be displayed and decode the colram directly to colram-location $d800 during
             * the overscan, while reading directly from source.
             * This is the only way so far, to achieve 25fps on c64 */
            if(avctx->extradata) {
                /* fetch values from extradata */
                lifetime     = AV_RB32(avctx->extradata + 0);
                frame_count  = AV_RB32(avctx->extradata + 4);
                charset_size = AV_RB32(avctx->extradata + 8);
                frame_size   = AV_RB32(avctx->extradata + 12);

                /* TODO: sanity checks? */
            } else {
                av_log(avctx, AV_LOG_ERROR, "extradata not set\n");
                return AVERROR(EINVAL);
            }

            ch_chunksize=charset_size/lifetime;
            /* TODO: check if charset/size is % lifetime, but maybe check in codec */

            if(pkt->data) num_frames = lifetime;
            else num_frames = c->prev_frame_count;

            for(i = 0; i < num_frames; i++) {
                if(pkt->data) {
                    /* if available, put newest charset chunk into buffer */
                    put_buffer(s->pb, pkt->data + ch_chunksize * i, ch_chunksize);
                } else {
                    /* a bit ugly, but is there an alternative to put many zeros? */
                    for(j = 0; j < ch_chunksize; j++) put_byte(s->pb, 0);
                }

                if(c->prev_pkt.data) {
                    /* put frame (screen + colram) from last packet into buffer */
                    put_buffer(s->pb, c->prev_pkt.data + charset_size + frame_size * i, frame_size);
                } else {
                    /* a bit ugly, but is there an alternative to put many zeros? */
                    for(j = 0; j < frame_size; j++) put_byte(s->pb, 0);
                }
            }

            /* backup current packet for next turn */
            if(pkt->data) {
                /* no backup packet yet? create one! */
                if(!c->prev_pkt.data) av_new_packet(&c->prev_pkt, pkt->size);
                /* we have a packet and data is big enough, reuse it */
                if(c->prev_pkt.data && c->prev_pkt.size >= pkt->size) {
                    memcpy(c->prev_pkt.data, pkt->data, pkt->size);
                    c->prev_pkt.size = pkt->size;
                } else {
                    av_log(avctx, AV_LOG_ERROR, "Too less memory for prev_pkt.\n");
                    return AVERROR(ENOMEM);
                }
            }

            c->prev_frame_count = frame_count;
            break;
        }
    default:
        /* Write things as is. Nice for self-contained frames from non-multicolor modes or if played
         * directly from ram and not from a streaming device (rrnet/mmc) */
        if(pkt) put_buffer(s->pb, pkt->data, pkt->size);
        break;
    }

    put_flush_packet(s->pb);
    return 0;
}
コード例 #11
0
ファイル: avuidec.c プロジェクト: r-type/vice-libretro
static int avui_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    int ret;
    AVFrame *pic = data;
    const uint8_t *src = avpkt->data, *extradata = avctx->extradata;
    const uint8_t *srca;
    uint8_t *y, *u, *v, *a;
    int transparent, interlaced = 1, skip, opaque_length, i, j, k;
    uint32_t extradata_size = avctx->extradata_size;

    while (extradata_size >= 24) {
        uint32_t atom_size = AV_RB32(extradata);
        if (!memcmp(&extradata[4], "APRGAPRG0001", 12)) {
            interlaced = extradata[19] != 1;
            break;
        }
        if (atom_size && atom_size <= extradata_size) {
            extradata      += atom_size;
            extradata_size -= atom_size;
        } else {
            break;
        }
    }
    if (avctx->height == 486) {
        skip = 10;
    } else {
        skip = 16;
    }
    opaque_length = 2 * avctx->width * (avctx->height + skip) + 4 * interlaced;
    if (avpkt->size < opaque_length) {
        av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
        return AVERROR(EINVAL);
    }
    transparent = avctx->bits_per_coded_sample == 32 &&
                  avpkt->size >= opaque_length * 2 + 4;
    srca = src + opaque_length + 5;

    if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
        return ret;

    pic->key_frame = 1;
    pic->pict_type = AV_PICTURE_TYPE_I;

    if (!interlaced) {
        src  += avctx->width * skip;
        srca += avctx->width * skip;
    }

    for (i = 0; i < interlaced + 1; i++) {
        src  += avctx->width * skip;
        srca += avctx->width * skip;
        if (interlaced && avctx->height == 486) {
            y = pic->data[0] + (1 - i) * pic->linesize[0];
            u = pic->data[1] + (1 - i) * pic->linesize[1];
            v = pic->data[2] + (1 - i) * pic->linesize[2];
            a = pic->data[3] + (1 - i) * pic->linesize[3];
        } else {
            y = pic->data[0] + i * pic->linesize[0];
            u = pic->data[1] + i * pic->linesize[1];
            v = pic->data[2] + i * pic->linesize[2];
            a = pic->data[3] + i * pic->linesize[3];
        }

        for (j = 0; j < avctx->height >> interlaced; j++) {
            for (k = 0; k < avctx->width >> 1; k++) {
                u[    k    ] = *src++;
                y[2 * k    ] = *src++;
                a[2 * k    ] = 0xFF - (transparent ? *srca++ : 0);
                srca++;
                v[    k    ] = *src++;
                y[2 * k + 1] = *src++;
                a[2 * k + 1] = 0xFF - (transparent ? *srca++ : 0);
                srca++;
            }

            y += (interlaced + 1) * pic->linesize[0];
            u += (interlaced + 1) * pic->linesize[1];
            v += (interlaced + 1) * pic->linesize[2];
            a += (interlaced + 1) * pic->linesize[3];
        }
        src  += 4;
        srca += 4;
    }
    *got_frame       = 1;

    return avpkt->size;
}
コード例 #12
0
ファイル: mp3dec.c プロジェクト: changbiao/libav
static int mp3_read_probe(AVProbeData *p)
{
    int max_frames, first_frames = 0;
    int frames, ret;
    uint32_t header;
    uint8_t *buf, *buf0, *buf2, *end;

    buf0 = p->buf;
    end = p->buf + p->buf_size - sizeof(uint32_t);
    while(buf0 < end && !*buf0)
        buf0++;

    max_frames = 0;
    buf = buf0;

    for(; buf < end; buf= buf2+1) {
        buf2 = buf;

        for(frames = 0; buf2 < end; frames++) {
            MPADecodeHeader h;

            header = AV_RB32(buf2);
            ret = avpriv_mpegaudio_decode_header(&h, header);
            if (ret != 0)
                break;
            buf2 += h.frame_size;
        }
        max_frames = FFMAX(max_frames, frames);
        if(buf == buf0)
            first_frames= frames;
    }
    // keep this in sync with ac3 probe, both need to avoid
    // issues with MPEG-files!
    if (first_frames >= 10)
        return AVPROBE_SCORE_EXTENSION + 5;
    if (first_frames >= 4)
        return AVPROBE_SCORE_EXTENSION + 1;

    if (max_frames) {
        int pes = 0, i;
        unsigned int code = -1;

#define VIDEO_ID 0x000001e0
#define AUDIO_ID 0x000001c0
        /* do a search for mpegps headers to be able to properly bias
         * towards mpegps if we detect this stream as both. */
        for (i = 0; i<p->buf_size; i++) {
            code = (code << 8) + p->buf[i];
            if ((code & 0xffffff00) == 0x100) {
                if     ((code & 0x1f0) == VIDEO_ID) pes++;
                else if((code & 0x1e0) == AUDIO_ID) pes++;
            }
        }

        if (pes)
            max_frames = (max_frames + pes - 1) / pes;
    }
    if      (max_frames >  500) return AVPROBE_SCORE_EXTENSION;
    else if (max_frames >= 4)   return AVPROBE_SCORE_EXTENSION / 2;
    else if (max_frames >= 1)   return 1;
    else                        return 0;
//mpegps_mp3_unrecognized_format.mpg has max_frames=3
}
コード例 #13
0
ファイル: segafilm.c プロジェクト: MichaelH13/sdkpub
static int film_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FilmDemuxContext *film = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    unsigned char scratch[256];
    int i;
    unsigned int data_offset;
    unsigned int audio_frame_counter;

    film->sample_table = NULL;
    film->stereo_buffer = NULL;
    film->stereo_buffer_size = 0;

    /* load the main FILM header */
    if (get_buffer(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    data_offset = AV_RB32(&scratch[4]);
    film->version = AV_RB32(&scratch[8]);

    /* load the FDSC chunk */
    if (film->version == 0) {
        /* special case for Lemmings .film files; 20-byte header */
        if (get_buffer(pb, scratch, 20) != 20)
            return AVERROR(EIO);
        /* make some assumptions about the audio parameters */
        film->audio_type = CODEC_ID_PCM_S8;
        film->audio_samplerate = 22050;
        film->audio_channels = 1;
        film->audio_bits = 8;
    } else {
        /* normal Saturn .cpk files; 32-byte header */
        if (get_buffer(pb, scratch, 32) != 32)
            return AVERROR(EIO);
        film->audio_samplerate = AV_RB16(&scratch[24]);;
        film->audio_channels = scratch[21];
        film->audio_bits = scratch[22];
        if (film->audio_bits == 8)
            film->audio_type = CODEC_ID_PCM_S8;
        else if (film->audio_bits == 16)
            film->audio_type = CODEC_ID_PCM_S16BE;
        else
            film->audio_type = 0;
    }

    if (AV_RB32(&scratch[0]) != FDSC_TAG)
        return AVERROR_INVALIDDATA;

    if (AV_RB32(&scratch[8]) == CVID_TAG) {
        film->video_type = CODEC_ID_CINEPAK;
    } else
        film->video_type = 0;

    /* initialize the decoder streams */
    if (film->video_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        film->video_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_VIDEO;
        st->codec->codec_id = film->video_type;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->width = AV_RB32(&scratch[16]);
        st->codec->height = AV_RB32(&scratch[12]);
    }

    if (film->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        film->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = film->audio_type;
        st->codec->codec_tag = 1;
        st->codec->channels = film->audio_channels;
        st->codec->bits_per_sample = film->audio_bits;
        st->codec->sample_rate = film->audio_samplerate;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_sample;
        st->codec->block_align = st->codec->channels *
            st->codec->bits_per_sample / 8;
    }

    /* load the sample table */
    if (get_buffer(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    if (AV_RB32(&scratch[0]) != STAB_TAG)
        return AVERROR_INVALIDDATA;
    film->base_clock = AV_RB32(&scratch[8]);
    film->sample_count = AV_RB32(&scratch[12]);
    if(film->sample_count >= UINT_MAX / sizeof(film_sample_t))
        return -1;
    film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t));

    for(i=0; i<s->nb_streams; i++)
        av_set_pts_info(s->streams[i], 33, 1, film->base_clock);

    audio_frame_counter = 0;
    for (i = 0; i < film->sample_count; i++) {
        /* load the next sample record and transfer it to an internal struct */
        if (get_buffer(pb, scratch, 16) != 16) {
            av_free(film->sample_table);
            return AVERROR(EIO);
        }
        film->sample_table[i].sample_offset =
            data_offset + AV_RB32(&scratch[0]);
        film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
        if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
            film->sample_table[i].stream = film->audio_stream_index;
            film->sample_table[i].pts = audio_frame_counter;
            film->sample_table[i].pts *= film->base_clock;
            film->sample_table[i].pts /= film->audio_samplerate;

            audio_frame_counter += (film->sample_table[i].sample_size /
                (film->audio_channels * film->audio_bits / 8));
        } else {
            film->sample_table[i].stream = film->video_stream_index;
            film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
            film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
        }
    }

    film->current_sample = 0;

    return 0;
}
コード例 #14
0
ファイル: segafilm.c プロジェクト: changbiao/libav
static int film_read_header(AVFormatContext *s)
{
    FilmDemuxContext *film = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    unsigned char scratch[256];
    int i, ret;
    unsigned int data_offset;
    unsigned int audio_frame_counter;
    unsigned int video_frame_counter;

    film->sample_table = NULL;

    /* load the main FILM header */
    if (avio_read(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    data_offset = AV_RB32(&scratch[4]);
    film->version = AV_RB32(&scratch[8]);

    /* load the FDSC chunk */
    if (film->version == 0) {
        /* special case for Lemmings .film files; 20-byte header */
        if (avio_read(pb, scratch, 20) != 20)
            return AVERROR(EIO);
        /* make some assumptions about the audio parameters */
        film->audio_type = AV_CODEC_ID_PCM_S8;
        film->audio_samplerate = 22050;
        film->audio_channels = 1;
        film->audio_bits = 8;
    } else {
        /* normal Saturn .cpk files; 32-byte header */
        if (avio_read(pb, scratch, 32) != 32)
            return AVERROR(EIO);
        film->audio_samplerate = AV_RB16(&scratch[24]);
        film->audio_channels = scratch[21];
        if (!film->audio_channels || film->audio_channels > 2) {
            av_log(s, AV_LOG_ERROR,
                   "Invalid number of channels: %d\n", film->audio_channels);
            return AVERROR_INVALIDDATA;
        }
        film->audio_bits = scratch[22];
        if (scratch[23] == 2)
            film->audio_type = AV_CODEC_ID_ADPCM_ADX;
        else if (film->audio_channels > 0) {
            if (film->audio_bits == 8)
                film->audio_type = AV_CODEC_ID_PCM_S8_PLANAR;
            else if (film->audio_bits == 16)
                film->audio_type = AV_CODEC_ID_PCM_S16BE_PLANAR;
            else
                film->audio_type = AV_CODEC_ID_NONE;
        } else
            film->audio_type = AV_CODEC_ID_NONE;
    }

    if (AV_RB32(&scratch[0]) != FDSC_TAG)
        return AVERROR_INVALIDDATA;

    if (AV_RB32(&scratch[8]) == CVID_TAG) {
        film->video_type = AV_CODEC_ID_CINEPAK;
    } else if (AV_RB32(&scratch[8]) == RAW_TAG) {
        film->video_type = AV_CODEC_ID_RAWVIDEO;
    } else {
        film->video_type = AV_CODEC_ID_NONE;
    }

    /* initialize the decoder streams */
    if (film->video_type) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        film->video_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id = film->video_type;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->width = AV_RB32(&scratch[16]);
        st->codec->height = AV_RB32(&scratch[12]);

        if (film->video_type == AV_CODEC_ID_RAWVIDEO) {
            if (scratch[20] == 24) {
                st->codec->pix_fmt = AV_PIX_FMT_RGB24;
            } else {
                av_log(s, AV_LOG_ERROR, "raw video is using unhandled %dbpp\n", scratch[20]);
                return -1;
            }
        }
    }

    if (film->audio_type) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        film->audio_stream_index = st->index;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = film->audio_type;
        st->codec->codec_tag = 1;
        st->codec->channels = film->audio_channels;
        st->codec->sample_rate = film->audio_samplerate;

        if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) {
            st->codec->bits_per_coded_sample = 18 * 8 / 32;
            st->codec->block_align = st->codec->channels * 18;
            st->need_parsing = AVSTREAM_PARSE_FULL;
        } else {
            st->codec->bits_per_coded_sample = film->audio_bits;
            st->codec->block_align = st->codec->channels *
                st->codec->bits_per_coded_sample / 8;
        }

        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample;
    }

    /* load the sample table */
    if (avio_read(pb, scratch, 16) != 16)
        return AVERROR(EIO);
    if (AV_RB32(&scratch[0]) != STAB_TAG)
        return AVERROR_INVALIDDATA;
    film->base_clock = AV_RB32(&scratch[8]);
    film->sample_count = AV_RB32(&scratch[12]);
    if(film->sample_count >= UINT_MAX / sizeof(film_sample))
        return -1;
    film->sample_table = av_malloc(film->sample_count * sizeof(film_sample));
    if (!film->sample_table)
        return AVERROR(ENOMEM);

    for (i = 0; i < s->nb_streams; i++) {
        st = s->streams[i];
        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            avpriv_set_pts_info(st, 33, 1, film->base_clock);
        else
            avpriv_set_pts_info(st, 64, 1, film->audio_samplerate);
    }

    audio_frame_counter = video_frame_counter = 0;
    for (i = 0; i < film->sample_count; i++) {
        /* load the next sample record and transfer it to an internal struct */
        if (avio_read(pb, scratch, 16) != 16) {
            ret = AVERROR(EIO);
            goto fail;
        }
        film->sample_table[i].sample_offset =
            data_offset + AV_RB32(&scratch[0]);
        film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
        if (film->sample_table[i].sample_size > INT_MAX / 4) {
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }
        if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
            film->sample_table[i].stream = film->audio_stream_index;
            film->sample_table[i].pts = audio_frame_counter;

            if (film->audio_type == AV_CODEC_ID_ADPCM_ADX)
                audio_frame_counter += (film->sample_table[i].sample_size * 32 /
                    (18 * film->audio_channels));
            else if (film->audio_type != AV_CODEC_ID_NONE)
                audio_frame_counter += (film->sample_table[i].sample_size /
                    (film->audio_channels * film->audio_bits / 8));
        } else {
            film->sample_table[i].stream = film->video_stream_index;
            film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
            film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
            video_frame_counter++;
            av_add_index_entry(s->streams[film->video_stream_index],
                               film->sample_table[i].sample_offset,
                               film->sample_table[i].pts,
                               film->sample_table[i].sample_size, 0,
                               film->sample_table[i].keyframe);
        }
    }

    if (film->audio_type)
        s->streams[film->audio_stream_index]->duration = audio_frame_counter;

    if (film->video_type)
        s->streams[film->video_stream_index]->duration = video_frame_counter;

    film->current_sample = 0;

    return 0;
fail:
    film_read_close(s);
    return ret;
}
コード例 #15
0
ファイル: westwood.c プロジェクト: banketree/faplayer
static int wsvqa_read_header(AVFormatContext *s,
                             AVFormatParameters *ap)
{
    WsVqaDemuxContext *wsvqa = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    unsigned char *header;
    unsigned char scratch[VQA_PREAMBLE_SIZE];
    unsigned int chunk_tag;
    unsigned int chunk_size;

    /* initialize the video decoder stream */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
    wsvqa->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_WS_VQA;
    st->codec->codec_tag = 0;  /* no fourcc */

    /* skip to the start of the VQA header */
    url_fseek(pb, 20, SEEK_SET);

    /* the VQA header needs to go to the decoder */
    st->codec->extradata_size = VQA_HEADER_SIZE;
    st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
    header = (unsigned char *)st->codec->extradata;
    if (avio_read(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
        VQA_HEADER_SIZE) {
        av_free(st->codec->extradata);
        return AVERROR(EIO);
    }
    st->codec->width = AV_RL16(&header[6]);
    st->codec->height = AV_RL16(&header[8]);

    /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
    if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        if (AV_RL16(&header[0]) == 1)
            st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
        else
            st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->sample_rate = AV_RL16(&header[24]);
        if (!st->codec->sample_rate)
            st->codec->sample_rate = 22050;
        st->codec->channels = header[26];
        if (!st->codec->channels)
            st->codec->channels = 1;
        st->codec->bits_per_coded_sample = 16;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample / 4;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;

        wsvqa->audio_stream_index = st->index;
        wsvqa->audio_samplerate = st->codec->sample_rate;
        wsvqa->audio_channels = st->codec->channels;
        wsvqa->audio_frame_counter = 0;
    }

    /* there are 0 or more chunks before the FINF chunk; iterate until
     * FINF has been skipped and the file will be ready to be demuxed */
    do {
        if (avio_read(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
            av_free(st->codec->extradata);
            return AVERROR(EIO);
        }
        chunk_tag = AV_RB32(&scratch[0]);
        chunk_size = AV_RB32(&scratch[4]);

        /* catch any unknown header tags, for curiousity */
        switch (chunk_tag) {
        case CINF_TAG:
        case CINH_TAG:
        case CIND_TAG:
        case PINF_TAG:
        case PINH_TAG:
        case PIND_TAG:
        case FINF_TAG:
        case CMDS_TAG:
            break;

        default:
            av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
                scratch[0], scratch[1],
                scratch[2], scratch[3]);
            break;
        }

        url_fseek(pb, chunk_size, SEEK_CUR);
    } while (chunk_tag != FINF_TAG);

    return 0;
}
コード例 #16
0
ファイル: cafdec.c プロジェクト: WangCrystal/FFplayer
static int probe(AVProbeData *p)
{
    if (AV_RB32(p->buf) == MKBETAG('c','a','f','f') && AV_RB16(&p->buf[4]) == 1)
        return AVPROBE_SCORE_MAX;
    return 0;
}
コード例 #17
0
ファイル: flacdec.c プロジェクト: EchoLiao/FFmpeg
static int flac_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    ThreadFrame tframe = { .f = data };
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    FLACContext *s = avctx->priv_data;
    int bytes_read = 0;
    int ret;

    *got_frame_ptr = 0;

    if (s->max_framesize == 0) {
        s->max_framesize =
            ff_flac_get_max_frame_size(s->max_blocksize ? s->max_blocksize : FLAC_MAX_BLOCKSIZE,
                                       FLAC_MAX_CHANNELS, 32);
    }

    if (buf_size > 5 && !memcmp(buf, "\177FLAC", 5)) {
        av_log(s->avctx, AV_LOG_DEBUG, "skipping flac header packet 1\n");
        return buf_size;
    }

    if (buf_size > 0 && (*buf & 0x7F) == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
        av_log(s->avctx, AV_LOG_DEBUG, "skipping vorbis comment\n");
        return buf_size;
    }

    /* check that there is at least the smallest decodable amount of data.
       this amount corresponds to the smallest valid FLAC frame possible.
       FF F8 69 02 00 00 9A 00 00 34 46 */
    if (buf_size < FLAC_MIN_FRAME_SIZE)
        return buf_size;

    /* check for inline header */
    if (AV_RB32(buf) == MKBETAG('f','L','a','C')) {
        if (!s->got_streaminfo && (ret = parse_streaminfo(s, buf, buf_size))) {
            av_log(s->avctx, AV_LOG_ERROR, "invalid header\n");
            return ret;
        }
        return get_metadata_size(buf, buf_size);
    }

    /* decode frame */
    if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
        return ret;
    if ((ret = decode_frame(s)) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "decode_frame() failed\n");
        return ret;
    }
    bytes_read = get_bits_count(&s->gb)/8;

    if ((s->avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_COMPLIANT)) &&
        av_crc(av_crc_get_table(AV_CRC_16_ANSI),
               0, buf, bytes_read)) {
        av_log(s->avctx, AV_LOG_ERROR, "CRC error at PTS %"PRId64"\n", avpkt->pts);
        if (s->avctx->err_recognition & AV_EF_EXPLODE)
            return AVERROR_INVALIDDATA;
    }

    /* get output buffer */
    frame->nb_samples = s->blocksize;
    if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
        return ret;

    s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded, s->channels,
                                   s->blocksize, s->sample_shift);

    if (bytes_read > buf_size) {
        av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
        return AVERROR_INVALIDDATA;
    }
    if (bytes_read < buf_size) {
        av_log(s->avctx, AV_LOG_DEBUG, "underread: %d orig size: %d\n",
               buf_size - bytes_read, buf_size);
    }

    *got_frame_ptr = 1;

    return bytes_read;
}
コード例 #18
0
ファイル: rtmppkt.c プロジェクト: LazyZhu/rt-n56u-1
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
                        int chunk_size, RTMPPacket *prev_pkt)
{
    uint8_t hdr, t, buf[16];
    int channel_id, timestamp, data_size, offset = 0;
    uint32_t extra = 0;
    enum RTMPPacketType type;
    int size = 0;

    if (url_read(h, &hdr, 1) != 1)
        return AVERROR(EIO);
    size++;
    channel_id = hdr & 0x3F;

    if (channel_id < 2) { //special case for channel number >= 64
        buf[1] = 0;
        if (url_read_complete(h, buf, channel_id + 1) != channel_id + 1)
            return AVERROR(EIO);
        size += channel_id + 1;
        channel_id = AV_RL16(buf) + 64;
    }
    data_size = prev_pkt[channel_id].data_size;
    type      = prev_pkt[channel_id].type;
    extra     = prev_pkt[channel_id].extra;

    hdr >>= 6;
    if (hdr == RTMP_PS_ONEBYTE) {
        timestamp = prev_pkt[channel_id].ts_delta;
    } else {
        if (url_read_complete(h, buf, 3) != 3)
            return AVERROR(EIO);
        size += 3;
        timestamp = AV_RB24(buf);
        if (hdr != RTMP_PS_FOURBYTES) {
            if (url_read_complete(h, buf, 3) != 3)
                return AVERROR(EIO);
            size += 3;
            data_size = AV_RB24(buf);
            if (url_read_complete(h, buf, 1) != 1)
                return AVERROR(EIO);
            size++;
            type = buf[0];
            if (hdr == RTMP_PS_TWELVEBYTES) {
                if (url_read_complete(h, buf, 4) != 4)
                    return AVERROR(EIO);
                size += 4;
                extra = AV_RL32(buf);
            }
        }
        if (timestamp == 0xFFFFFF) {
            if (url_read_complete(h, buf, 4) != 4)
                return AVERROR(EIO);
            timestamp = AV_RB32(buf);
        }
    }
    if (hdr != RTMP_PS_TWELVEBYTES)
        timestamp += prev_pkt[channel_id].timestamp;

    if (ff_rtmp_packet_create(p, channel_id, type, timestamp, data_size))
        return -1;
    p->extra = extra;
    // save history
    prev_pkt[channel_id].channel_id = channel_id;
    prev_pkt[channel_id].type       = type;
    prev_pkt[channel_id].data_size  = data_size;
    prev_pkt[channel_id].ts_delta   = timestamp - prev_pkt[channel_id].timestamp;
    prev_pkt[channel_id].timestamp  = timestamp;
    prev_pkt[channel_id].extra      = extra;
    while (data_size > 0) {
        int toread = FFMIN(data_size, chunk_size);
        if (url_read_complete(h, p->data + offset, toread) != toread) {
            ff_rtmp_packet_destroy(p);
            return AVERROR(EIO);
        }
        data_size -= chunk_size;
        offset    += chunk_size;
        size      += chunk_size;
        if (data_size > 0) {
            url_read_complete(h, &t, 1); //marker
            size++;
            if (t != (0xC0 + channel_id))
                return -1;
        }
    }
    return size;
}
コード例 #19
0
ファイル: wmv9.cpp プロジェクト: betaking/LAVFilters
STDMETHODIMP CDecWMV9::InitDecoder(AVCodecID codec, const CMediaType *pmt)
{
  HRESULT hr = S_OK;
  DbgLog((LOG_TRACE, 10, L"CDecWMV9::InitDecoder(): Initializing WMV9 DMO decoder"));

  DestroyDecoder(false);

  BITMAPINFOHEADER *pBMI = NULL;
  REFERENCE_TIME rtAvg = 0;
  DWORD dwARX = 0, dwARY = 0;
  videoFormatTypeHandler(*pmt, &pBMI, &rtAvg, &dwARX, &dwARY);
  
  size_t extralen = 0;
  BYTE *extra = NULL;
  getExtraData(*pmt, NULL, &extralen);
  if (extralen > 0) {
    extra = (BYTE *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
    getExtraData(*pmt, extra, &extralen);
  }

  if (codec == AV_CODEC_ID_VC1 && extralen) {
    size_t i = 0;
    for (i = 0; i < (extralen - 4); i++) {
      uint32_t code = AV_RB32(extra+i);
      if (IS_MARKER(code))
        break;
    }
    if (i == 0) {
      memmove(extra+1, extra, extralen);
      *extra = 0;
      extralen++;
    } else if (i > 1) {
      DbgLog((LOG_TRACE, 10, L"-> VC-1 Header at position %u (should be 0 or 1)", i));
    }
  }

  /* Create input type */

  GUID subtype = codec == AV_CODEC_ID_VC1 ? MEDIASUBTYPE_WVC1 : MEDIASUBTYPE_WMV3;
  m_nCodecId = codec;

  mtIn.SetType(&MEDIATYPE_Video);
  mtIn.SetSubtype(&subtype);
  mtIn.SetFormatType(&FORMAT_VideoInfo);
  mtIn.SetTemporalCompression(TRUE);
  mtIn.SetSampleSize(0);
  mtIn.SetVariableSize();
  
  VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)mtIn.AllocFormatBuffer((ULONG)(sizeof(VIDEOINFOHEADER) + extralen));
  memset(vih, 0, sizeof(VIDEOINFOHEADER));
  vih->bmiHeader.biWidth       = pBMI->biWidth;
  vih->bmiHeader.biHeight      = pBMI->biHeight;
  vih->bmiHeader.biPlanes      = 1;
  vih->bmiHeader.biBitCount    = 24;
  vih->bmiHeader.biSizeImage   = pBMI->biWidth * pBMI->biHeight * 3 / 2;
  vih->bmiHeader.biSize        = (DWORD)(sizeof(BITMAPINFOHEADER) + extralen);
  vih->bmiHeader.biCompression = subtype.Data1;
  vih->AvgTimePerFrame = rtAvg;
  SetRect(&vih->rcSource, 0, 0, pBMI->biWidth, pBMI->biHeight);
  vih->rcTarget = vih->rcSource;
  
  if (extralen > 0) {
    memcpy((BYTE *)vih + sizeof(VIDEOINFOHEADER), extra, extralen);
    av_freep(&extra);
    extra = (BYTE *)vih + sizeof(VIDEOINFOHEADER);
  }

  hr = m_pDMO->SetInputType(0, &mtIn, 0);
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> Failed to set input type on DMO"));
    return hr;
  }

  /* Create output type */
  int idx = 0;
  while(SUCCEEDED(hr = m_pDMO->GetOutputType(0, idx++, &mtOut))) {
    if (mtOut.subtype == MEDIASUBTYPE_NV12) {
      hr = m_pDMO->SetOutputType(0, &mtOut, 0);
      m_OutPixFmt = LAVPixFmt_NV12;
      break;
    } else if (mtOut.subtype == MEDIASUBTYPE_YV12) {
      hr = m_pDMO->SetOutputType(0, &mtOut, 0);
      m_OutPixFmt = LAVPixFmt_YUV420;
      break;
    }
  }

  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> Failed to set output type on DMO"));
    return hr;
  }

  videoFormatTypeHandler(mtOut, &pBMI);
  m_pRawBufferSize = pBMI->biSizeImage + FF_INPUT_BUFFER_PADDING_SIZE;

  m_bInterlaced = FALSE;
  memset(&m_StreamAR, 0, sizeof(m_StreamAR));
  if (extralen > 0) {
    m_vc1Header = new CVC1HeaderParser(extra, extralen, codec);
    if (m_vc1Header->hdr.valid) {
      m_bInterlaced = m_vc1Header->hdr.interlaced;
      m_StreamAR = m_vc1Header->hdr.ar;
    }
  }

  m_bManualReorder = (codec == AV_CODEC_ID_VC1) && !(m_pCallback->GetDecodeFlags() & LAV_VIDEO_DEC_FLAG_VC1_DTS);

  return S_OK;
}
コード例 #20
0
ファイル: movenchint.c プロジェクト: 1c0n/xbmc
/**
 * Write an RTP hint (that may contain one or more RTP packets)
 * for the packets in data. data contains one or more packets with a
 * BE32 size header.
 *
 * @param out buffer where the hints are written
 * @param data buffer containing RTP packets
 * @param size the size of the data buffer
 * @param trk the MOVTrack for the hint track
 * @param pts pointer where the timestamp for the written RTP hint is stored
 * @return the number of RTP packets in the written hint
 */
static int write_hint_packets(AVIOContext *out, const uint8_t *data,
                              int size, MOVTrack *trk, int64_t *pts)
{
    int64_t curpos;
    int64_t count_pos, entries_pos;
    int count = 0, entries;

    count_pos = avio_tell(out);
    /* RTPsample header */
    avio_wb16(out, 0); /* packet count */
    avio_wb16(out, 0); /* reserved */

    while (size > 4) {
        uint32_t packet_len = AV_RB32(data);
        uint16_t seq;
        uint32_t ts;

        data += 4;
        size -= 4;
        if (packet_len > size || packet_len <= 12)
            break;
        if (RTP_PT_IS_RTCP(data[1])) {
            /* RTCP packet, just skip */
            data += packet_len;
            size -= packet_len;
            continue;
        }

        if (packet_len > trk->max_packet_size)
            trk->max_packet_size = packet_len;

        seq = AV_RB16(&data[2]);
        ts = AV_RB32(&data[4]);

        if (trk->prev_rtp_ts == 0)
            trk->prev_rtp_ts = ts;
        /* Unwrap the 32-bit RTP timestamp that wraps around often
         * into a not (as often) wrapping 64-bit timestamp. */
        trk->cur_rtp_ts_unwrapped += (int32_t) (ts - trk->prev_rtp_ts);
        trk->prev_rtp_ts = ts;
        if (*pts == AV_NOPTS_VALUE)
            *pts = trk->cur_rtp_ts_unwrapped;

        count++;
        /* RTPpacket header */
        avio_wb32(out, 0); /* relative_time */
        avio_write(out, data, 2); /* RTP header */
        avio_wb16(out, seq); /* RTPsequenceseed */
        avio_wb16(out, 0); /* reserved + flags */
        entries_pos = avio_tell(out);
        avio_wb16(out, 0); /* entry count */

        data += 12;
        size -= 12;
        packet_len -= 12;

        entries = 0;
        /* Write one or more constructors describing the payload data */
        describe_payload(data, packet_len, out, &entries, &trk->sample_queue);
        data += packet_len;
        size -= packet_len;

        curpos = avio_tell(out);
        avio_seek(out, entries_pos, SEEK_SET);
        avio_wb16(out, entries);
        avio_seek(out, curpos, SEEK_SET);
    }

    curpos = avio_tell(out);
    avio_seek(out, count_pos, SEEK_SET);
    avio_wb16(out, count);
    avio_seek(out, curpos, SEEK_SET);
    return count;
}
コード例 #21
0
ファイル: dirac_parser.c プロジェクト: AWilco/xbmc
static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
                               int next, const uint8_t **buf, int *buf_size)
{
    int parse_timing_info = (s->pts == AV_NOPTS_VALUE &&
                             s->dts == AV_NOPTS_VALUE);
    DiracParseContext *pc = s->priv_data;

    if (pc->overread_index) {
        memcpy(pc->buffer, pc->buffer + pc->overread_index,
               pc->index - pc->overread_index);
        pc->index -= pc->overread_index;
        pc->overread_index = 0;
        if (*buf_size == 0 && pc->buffer[4] == 0x10) {
            *buf      = pc->buffer;
            *buf_size = pc->index;
            return 0;
        }
    }

    if ( next == -1) {
        /* Found a possible frame start but not a frame end */
        void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
                                           pc->index + (*buf_size -
                                                        pc->sync_offset));
        pc->buffer = new_buffer;
        memcpy(pc->buffer+pc->index, (*buf + pc->sync_offset),
               *buf_size - pc->sync_offset);
        pc->index += *buf_size - pc->sync_offset;
        return -1;
    } else {
        /* Found a possible frame start and a  possible frame end */
        DiracParseUnit pu1, pu;
        void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
                                           pc->index + next);
        pc->buffer = new_buffer;
        memcpy(pc->buffer + pc->index, *buf, next);
        pc->index += next;

        /* Need to check if we have a valid Parse Unit. We can't go by the
         * sync pattern 'BBCD' alone because arithmetic coding of the residual
         * and motion data can cause the pattern triggering a false start of
         * frame. So check if the previous parse offset of the next parse unit
         * is equal to the next parse offset of the current parse unit then
         * we can be pretty sure that we have a valid parse unit */
        if (!unpack_parse_unit(&pu1, pc, pc->index - 13)                     ||
            !unpack_parse_unit(&pu, pc, pc->index - 13 - pu1.prev_pu_offset) ||
            pu.next_pu_offset != pu1.prev_pu_offset) {
            pc->index -= 9;
            *buf_size = next-9;
            pc->header_bytes_needed = 9;
            return -1;
        }

        /* All non-frame data must be accompanied by frame data. This is to
         * ensure that pts is set correctly. So if the current parse unit is
         * not frame data, wait for frame data to come along */

        pc->dirac_unit = pc->buffer + pc->index - 13 -
                         pu1.prev_pu_offset - pc->dirac_unit_size;

        pc->dirac_unit_size += pu.next_pu_offset;

        if ((pu.pu_type&0x08) != 0x08) {
            pc->header_bytes_needed = 9;
            *buf_size = next;
            return -1;
        }

        /* Get the picture number to set the pts and dts*/
        if (parse_timing_info) {
            uint8_t *cur_pu = pc->buffer +
                              pc->index - 13 - pu1.prev_pu_offset;
            int pts =  AV_RB32(cur_pu + 13);
            if (s->last_pts == 0 && s->last_dts == 0)
                s->dts = pts - 1;
            else
                s->dts = s->last_dts+1;
            s->pts = pts;
            if (!avctx->has_b_frames && (cur_pu[4] & 0x03))
                avctx->has_b_frames = 1;
        }
        if (avctx->has_b_frames && s->pts == s->dts)
             s->pict_type = FF_B_TYPE;

        /* Finally have a complete Dirac data unit */
        *buf      = pc->dirac_unit;
        *buf_size = pc->dirac_unit_size;

        pc->dirac_unit_size     = 0;
        pc->overread_index      = pc->index-13;
        pc->header_bytes_needed = 9;
    }
    return next;
}
コード例 #22
0
ファイル: demux_vqf.c プロジェクト: Newbleeto/mplayer2
static demuxer_t* demux_open_vqf(demuxer_t* demuxer) {
  sh_audio_t* sh_audio;
  WAVEFORMATEX* w;
  stream_t *s;
  headerInfo *hi;

  s = demuxer->stream;

  sh_audio = new_sh_audio(demuxer,0);
  sh_audio->wf = w = calloc(1, sizeof(*sh_audio->wf)+sizeof(headerInfo));
  hi = (headerInfo *)&w[1];
  w->wFormatTag = 0x1;
  sh_audio->format = mmioFOURCC('T','W','I','N'); /* TWinVQ */
  w->nChannels = sh_audio->channels = 2;
  w->nSamplesPerSec = sh_audio->samplerate = 44100;
  w->nAvgBytesPerSec = w->nSamplesPerSec*sh_audio->channels*2;
  w->nBlockAlign = 0;
  sh_audio->samplesize = 2;
  w->wBitsPerSample = 8*sh_audio->samplesize;
  w->cbSize = 0;
  strcpy(hi->ID,"TWIN");
  stream_read(s,hi->ID+KEYWORD_BYTES,VERSION_BYTES); /* fourcc+version_id */
  while(1)
  {
    char chunk_id[4];
    unsigned chunk_size;
    hi->size=chunk_size=stream_read_dword(s); /* include itself */
    stream_read(s,chunk_id,4);
    if (chunk_size < 8) return NULL;
    chunk_size -= 8;
    if(AV_RL32(chunk_id)==mmioFOURCC('C','O','M','M'))
    {
    char buf[BUFSIZ];
    unsigned i,subchunk_size;
    if (chunk_size > sizeof(buf) || chunk_size < 20) return NULL;
    if(stream_read(s,buf,chunk_size)!=chunk_size) return NULL;
    i=0;
    subchunk_size      = AV_RB32(buf);
    hi->channelMode    = AV_RB32(buf + 4);
    w->nChannels=sh_audio->channels=hi->channelMode+1; /*0-mono;1-stereo*/
    hi->bitRate        = AV_RB32(buf + 8);
    sh_audio->i_bps=hi->bitRate*1000/8; /* bitrate kbit/s */
    w->nAvgBytesPerSec = sh_audio->i_bps;
    hi->samplingRate   = AV_RB32(buf + 12);
    switch(hi->samplingRate){
    case 44:
        w->nSamplesPerSec=44100;
        break;
    case 22:
        w->nSamplesPerSec=22050;
        break;
    case 11:
        w->nSamplesPerSec=11025;
        break;
    default:
        w->nSamplesPerSec=hi->samplingRate*1000;
        break;
    }
    sh_audio->samplerate=w->nSamplesPerSec;
    hi->securityLevel  = AV_RB32(buf + 16);
    w->nBlockAlign = 0;
    sh_audio->samplesize = 4;
    w->wBitsPerSample = 8*sh_audio->samplesize;
    w->cbSize = 0;
    if (subchunk_size > chunk_size - 4) continue;
    i+=subchunk_size+4;
    while(i + 8 < chunk_size)
    {
        unsigned slen,sid;
        char sdata[BUFSIZ];
        sid  = AV_RL32(buf + i); i+=4;
        slen = AV_RB32(buf + i); i+=4;
        if (slen > sizeof(sdata) - 1 || slen > chunk_size - i) break;
        if(sid==mmioFOURCC('D','S','I','Z'))
        {
        hi->Dsiz=AV_RB32(buf + i);
        continue; /* describes the same info as size of DATA chunk */
        }
        memcpy(sdata,&buf[i],slen); sdata[slen]=0; i+=slen;
        if(sid==mmioFOURCC('N','A','M','E'))
        {
        memcpy(hi->Name,sdata,FFMIN(BUFSIZ,slen));
        demux_info_add(demuxer,"Title",sdata);
        }
        else
        if(sid==mmioFOURCC('A','U','T','H'))
        {
        memcpy(hi->Auth,sdata,FFMIN(BUFSIZ,slen));
        demux_info_add(demuxer,"Author",sdata);
        }
        else
        if(sid==mmioFOURCC('C','O','M','T'))
        {
        memcpy(hi->Comt,sdata,FFMIN(BUFSIZ,slen));
        demux_info_add(demuxer,"Comment",sdata);
        }
        else
        if(sid==mmioFOURCC('(','c',')',' '))
        {
        memcpy(hi->Cpyr,sdata,FFMIN(BUFSIZ,slen));
        demux_info_add(demuxer,"Copyright",sdata);
        }
        else
        if(sid==mmioFOURCC('F','I','L','E'))
        {
        memcpy(hi->File,sdata,FFMIN(BUFSIZ,slen));
        }
        else
        if(sid==mmioFOURCC('A','L','B','M')) demux_info_add(demuxer,"Album",sdata);
        else
        if(sid==mmioFOURCC('Y','E','A','R')) demux_info_add(demuxer,"Date",sdata);
        else
        if(sid==mmioFOURCC('T','R','A','C')) demux_info_add(demuxer,"Track",sdata);
        else
        if(sid==mmioFOURCC('E','N','C','D')) demux_info_add(demuxer,"Encoder",sdata);
        else
        mp_msg(MSGT_DEMUX, MSGL_V, "Unhandled subchunk '%c%c%c%c'='%s'\n",((char *)&sid)[0],((char *)&sid)[1],((char *)&sid)[2],((char *)&sid)[3],sdata);
        /* rest not recognized due to untranslatable Japanese expressions */
    }
    }
    else
    if(AV_RL32(chunk_id)==mmioFOURCC('D','A','T','A'))
    {
    demuxer->movi_start=stream_tell(s);
    demuxer->movi_end=demuxer->movi_start+chunk_size;
    mp_msg(MSGT_DEMUX, MSGL_V, "Found data at %"PRIX64" size %"PRIu64"\n",demuxer->movi_start,demuxer->movi_end);
    /* Done! play it */
    break;
    }
    else
    {
    mp_msg(MSGT_DEMUX, MSGL_V, "Unhandled chunk '%c%c%c%c' %u bytes\n",chunk_id[0],chunk_id[1],chunk_id[2],chunk_id[3],chunk_size);
    stream_skip(s,chunk_size); /*unknown chunk type */
    }
  }

  demuxer->audio->id = 0;
  demuxer->audio->sh = sh_audio;
  sh_audio->ds = demuxer->audio;
  stream_seek(s,demuxer->movi_start);
  demuxer->seekable=0;
  return demuxer;
}
コード例 #23
0
ファイル: qdrw.c プロジェクト: Bjelijah/EcamTurnH265
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *got_frame,
                        AVPacket *avpkt)
{
    const uint8_t *buf     = avpkt->data;
    const uint8_t *buf_end = avpkt->data + avpkt->size;
    int buf_size           = avpkt->size;
    AVFrame * const p      = data;
    uint8_t* outdata;
    int colors;
    int i, ret;
    uint32_t *pal;
    int r, g, b;

    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
        return ret;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    outdata = p->data[0];

    if (buf_end - buf < 0x68 + 4)
        return AVERROR_INVALIDDATA;
    buf   += 0x68; /* jump to palette */
    colors = AV_RB32(buf);
    buf   += 4;

    if (colors < 0 || colors > 256) {
        av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors);
        return AVERROR_INVALIDDATA;
    }
    if (buf_end - buf < (colors + 1) * 8)
        return AVERROR_INVALIDDATA;

    pal = (uint32_t*)p->data[1];
    for (i = 0; i <= colors; i++) {
        unsigned int idx;
        idx = AV_RB16(buf); /* color index */
        buf += 2;

        if (idx > 255) {
            av_log(avctx, AV_LOG_ERROR, "Palette index out of range: %u\n", idx);
            buf += 6;
            continue;
        }
        r = *buf++;
        buf++;
        g = *buf++;
        buf++;
        b = *buf++;
        buf++;
        pal[idx] = 0xFFU << 24 | r << 16 | g << 8 | b;
    }
    p->palette_has_changed = 1;

    if (buf_end - buf < 18)
        return AVERROR_INVALIDDATA;
    buf += 18; /* skip unneeded data */
    for (i = 0; i < avctx->height; i++) {
        int size, left, code, pix;
        const uint8_t *next;
        uint8_t *out;
        int tsize = 0;

        /* decode line */
        out  = outdata;
        size = AV_RB16(buf); /* size of packed line */
        buf += 2;
        if (buf_end - buf < size)
            return AVERROR_INVALIDDATA;

        left = size;
        next = buf + size;
        while (left > 0) {
            code = *buf++;
            if (code & 0x80 ) { /* run */
                pix = *buf++;
                if ((out + (257 - code)) > (outdata +  p->linesize[0]))
                    break;
                memset(out, pix, 257 - code);
                out   += 257 - code;
                tsize += 257 - code;
                left  -= 2;
            } else { /* copy */
                if ((out + code) > (outdata +  p->linesize[0]))
                    break;
                if (buf_end - buf < code + 1)
                    return AVERROR_INVALIDDATA;
                memcpy(out, buf, code + 1);
                out   += code + 1;
                buf   += code + 1;
                left  -= 2 + code;
                tsize += code + 1;
            }
        }
        buf = next;
        outdata += p->linesize[0];
    }

    *got_frame      = 1;

    return buf_size;
}
コード例 #24
0
ファイル: libschroedingerenc.c プロジェクト: AWilco/xbmc
static int libschroedinger_encode_frame(AVCodecContext *avccontext,
                                        unsigned char *frame,
                                        int buf_size, void *data)
{
    int enc_size = 0;
    FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data;
    SchroEncoder *encoder = p_schro_params->encoder;
    struct FfmpegDiracSchroEncodedFrame* p_frame_output = NULL;
    int go = 1;
    SchroBuffer *enc_buf;
    int presentation_frame;
    int parse_code;
    int last_frame_in_sequence = 0;

    if (!data) {
        /* Push end of sequence if not already signalled. */
        if (!p_schro_params->eos_signalled) {
            schro_encoder_end_of_stream(encoder);
            p_schro_params->eos_signalled = 1;
        }
    } else {
        /* Allocate frame data to schro input buffer. */
        SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext,
                                                               data);
        /* Load next frame. */
        schro_encoder_push_frame(encoder, in_frame);
    }

    if (p_schro_params->eos_pulled)
        go = 0;

    /* Now check to see if we have any output from the encoder. */
    while (go) {
        SchroStateEnum state;
        state = schro_encoder_wait(encoder);
        switch (state) {
        case SCHRO_STATE_HAVE_BUFFER:
        case SCHRO_STATE_END_OF_STREAM:
            enc_buf = schro_encoder_pull(encoder, &presentation_frame);
            assert(enc_buf->length > 0);
            assert(enc_buf->length <= buf_size);
            parse_code = enc_buf->data[4];

            /* All non-frame data is prepended to actual frame data to
             * be able to set the pts correctly. So we don't write data
             * to the frame output queue until we actually have a frame
             */
            p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf,
                                                 p_schro_params->enc_buf_size + enc_buf->length);

            memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size,
                   enc_buf->data, enc_buf->length);
            p_schro_params->enc_buf_size += enc_buf->length;


            if (state == SCHRO_STATE_END_OF_STREAM) {
                p_schro_params->eos_pulled = 1;
                go = 0;
            }

            if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) {
                schro_buffer_unref(enc_buf);
                break;
            }

            /* Create output frame. */
            p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame));
            /* Set output data. */
            p_frame_output->size     = p_schro_params->enc_buf_size;
            p_frame_output->p_encbuf = p_schro_params->enc_buf;
            if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) &&
                SCHRO_PARSE_CODE_IS_REFERENCE(parse_code))
                p_frame_output->key_frame = 1;

            /* Parse the coded frame number from the bitstream. Bytes 14
             * through 17 represesent the frame number. */
            p_frame_output->frame_num = AV_RB32(enc_buf->data + 13);

            ff_dirac_schro_queue_push_back(&p_schro_params->enc_frame_queue,
                                           p_frame_output);
            p_schro_params->enc_buf_size = 0;
            p_schro_params->enc_buf      = NULL;

            schro_buffer_unref(enc_buf);

            break;

        case SCHRO_STATE_NEED_FRAME:
            go = 0;
            break;

        case SCHRO_STATE_AGAIN:
            break;

        default:
            av_log(avccontext, AV_LOG_ERROR, "Unknown Schro Encoder state\n");
            return -1;
        }
    }

    /* Copy 'next' frame in queue. */

    if (p_schro_params->enc_frame_queue.size == 1 &&
        p_schro_params->eos_pulled)
        last_frame_in_sequence = 1;

    p_frame_output = ff_dirac_schro_queue_pop(&p_schro_params->enc_frame_queue);

    if (!p_frame_output)
        return 0;

    memcpy(frame, p_frame_output->p_encbuf, p_frame_output->size);
    avccontext->coded_frame->key_frame = p_frame_output->key_frame;
    /* Use the frame number of the encoded frame as the pts. It is OK to
     * do so since Dirac is a constant frame rate codec. It expects input
     * to be of constant frame rate. */
    avccontext->coded_frame->pts = p_frame_output->frame_num;
    enc_size = p_frame_output->size;

    /* Append the end of sequence information to the last frame in the
     * sequence. */
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
        memcpy(frame + enc_size, p_schro_params->enc_buf,
               p_schro_params->enc_buf_size);
        enc_size += p_schro_params->enc_buf_size;
        av_freep(&p_schro_params->enc_buf);
        p_schro_params->enc_buf_size = 0;
    }

    /* free frame */
    SchroedingerFreeFrame(p_frame_output);

    return enc_size;
}
コード例 #25
0
ファイル: rv10.c プロジェクト: AndyA/ffmbc
static av_cold int rv10_decode_init(AVCodecContext *avctx)
{
    MpegEncContext *s = avctx->priv_data;
    static int done=0;

    if (avctx->extradata_size < 8) {
        av_log(avctx, AV_LOG_ERROR, "Extradata is too small.\n");
        return -1;
    }

    MPV_decode_defaults(s);

    s->avctx= avctx;
    s->out_format = FMT_H263;
    s->codec_id= avctx->codec_id;

    s->orig_width = s->width  = avctx->coded_width;
    s->orig_height= s->height = avctx->coded_height;

    s->h263_long_vectors= ((uint8_t*)avctx->extradata)[3] & 1;
    avctx->sub_id= AV_RB32((uint8_t*)avctx->extradata + 4);

    if (avctx->sub_id == 0x10000000) {
        s->rv10_version= 0;
        s->low_delay=1;
    } else if (avctx->sub_id == 0x10001000) {
        s->rv10_version= 3;
        s->low_delay=1;
    } else if (avctx->sub_id == 0x10002000) {
        s->rv10_version= 3;
        s->low_delay=1;
        s->obmc=1;
    } else if (avctx->sub_id == 0x10003000) {
        s->rv10_version= 3;
        s->low_delay=1;
    } else if (avctx->sub_id == 0x10003001) {
        s->rv10_version= 3;
        s->low_delay=1;
    } else if (    avctx->sub_id == 0x20001000
               || (avctx->sub_id >= 0x20100000 && avctx->sub_id < 0x201a0000)) {
        s->low_delay=1;
    } else if (    avctx->sub_id == 0x30202002
               ||  avctx->sub_id == 0x30203002
               || (avctx->sub_id >= 0x20200002 && avctx->sub_id < 0x20300000)) {
        s->low_delay=0;
        s->avctx->has_b_frames=1;
    } else
        av_log(s->avctx, AV_LOG_ERROR, "unknown header %X\n", avctx->sub_id);

    if(avctx->debug & FF_DEBUG_PICT_INFO){
        av_log(avctx, AV_LOG_DEBUG, "ver:%X ver0:%X\n", avctx->sub_id, avctx->extradata_size >= 4 ? ((uint32_t*)avctx->extradata)[0] : -1);
    }

    avctx->pix_fmt = PIX_FMT_YUV420P;

    if (MPV_common_init(s) < 0)
        return -1;

    h263_decode_init_vlc(s);

    /* init rv vlc */
    if (!done) {
        INIT_VLC_STATIC(&rv_dc_lum, DC_VLC_BITS, 256,
                 rv_lum_bits, 1, 1,
                 rv_lum_code, 2, 2, 16384);
        INIT_VLC_STATIC(&rv_dc_chrom, DC_VLC_BITS, 256,
                 rv_chrom_bits, 1, 1,
                 rv_chrom_code, 2, 2, 16388);
        done = 1;
    }

    return 0;
}
コード例 #26
0
ファイル: qdrw.c プロジェクト: Unhelpful/ffmpeg
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *data_size,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    QdrawContext * const a = avctx->priv_data;
    AVFrame * const p= (AVFrame*)&a->pic;
    uint8_t* outdata;
    int colors;
    int i;
    uint32_t *pal;
    int r, g, b;

    if(p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference= 0;
    if(avctx->get_buffer(avctx, p) < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
    p->pict_type= AV_PICTURE_TYPE_I;
    p->key_frame= 1;

    outdata = a->pic.data[0];

    buf += 0x68; /* jump to palette */
    colors = AV_RB32(buf);
    buf += 4;

    if(colors < 0 || colors > 256) {
        av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors);
        return -1;
    }

    pal = (uint32_t*)p->data[1];
    for (i = 0; i <= colors; i++) {
        unsigned int idx;
        idx = AV_RB16(buf); /* color index */
        buf += 2;

        if (idx > 255) {
            av_log(avctx, AV_LOG_ERROR, "Palette index out of range: %u\n", idx);
            buf += 6;
            continue;
        }
        r = *buf++;
        buf++;
        g = *buf++;
        buf++;
        b = *buf++;
        buf++;
        pal[idx] = (r << 16) | (g << 8) | b;
    }
    p->palette_has_changed = 1;

    buf += 18; /* skip unneeded data */
    for (i = 0; i < avctx->height; i++) {
        int size, left, code, pix;
        const uint8_t *next;
        uint8_t *out;
        int tsize = 0;

        /* decode line */
        out = outdata;
        size = AV_RB16(buf); /* size of packed line */
        buf += 2;
        left = size;
        next = buf + size;
        while (left > 0) {
            code = *buf++;
            if (code & 0x80 ) { /* run */
                pix = *buf++;
                if ((out + (257 - code)) > (outdata +  a->pic.linesize[0]))
                    break;
                memset(out, pix, 257 - code);
                out += 257 - code;
                tsize += 257 - code;
                left -= 2;
            } else { /* copy */
                if ((out + code) > (outdata +  a->pic.linesize[0]))
                    break;
                memcpy(out, buf, code + 1);
                out += code + 1;
                buf += code + 1;
                left -= 2 + code;
                tsize += code + 1;
            }
        }
        buf = next;
        outdata += a->pic.linesize[0];
    }

    *data_size = sizeof(AVFrame);
    *(AVFrame*)data = a->pic;

    return buf_size;
}
コード例 #27
0
ファイル: apngdec.c プロジェクト: alikuro/FFmpeg
static int apng_read_header(AVFormatContext *s)
{
    APNGDemuxContext *ctx = s->priv_data;
    AVIOContext *pb = s->pb;
    uint32_t len, tag;
    AVStream *st;
    int ret = AVERROR_INVALIDDATA, acTL_found = 0;

    /* verify PNGSIG */
    if (avio_rb64(pb) != PNGSIG)
        return ret;

    /* parse IHDR (must be first chunk) */
    len = avio_rb32(pb);
    tag = avio_rl32(pb);
    if (len != 13 || tag != MKTAG('I', 'H', 'D', 'R'))
        return ret;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id   = AV_CODEC_ID_APNG;
    st->codec->width      = avio_rb32(pb);
    st->codec->height     = avio_rb32(pb);
    if ((ret = av_image_check_size(st->codec->width, st->codec->height, 0, s)) < 0)
        return ret;

    /* extradata will contain every chunk up to the first fcTL (excluded) */
    st->codec->extradata = av_malloc(len + 12 + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!st->codec->extradata)
        return AVERROR(ENOMEM);
    st->codec->extradata_size = len + 12;
    AV_WB32(st->codec->extradata,    len);
    AV_WL32(st->codec->extradata+4,  tag);
    AV_WB32(st->codec->extradata+8,  st->codec->width);
    AV_WB32(st->codec->extradata+12, st->codec->height);
    if ((ret = avio_read(pb, st->codec->extradata+16, 9)) < 0)
        goto fail;

    while (!avio_feof(pb)) {
        if (acTL_found && ctx->num_play != 1) {
            int64_t size   = avio_size(pb);
            int64_t offset = avio_tell(pb);
            if (size < 0) {
                ret = size;
                goto fail;
            } else if (offset < 0) {
                ret = offset;
                goto fail;
            } else if ((ret = ffio_ensure_seekback(pb, size - offset)) < 0) {
                av_log(s, AV_LOG_WARNING, "Could not ensure seekback, will not loop\n");
                ctx->num_play = 1;
            }
        }
        if ((ctx->num_play == 1 || !acTL_found) &&
            ((ret = ffio_ensure_seekback(pb, 4 /* len */ + 4 /* tag */)) < 0))
            goto fail;

        len = avio_rb32(pb);
        if (len > 0x7fffffff) {
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }

        tag = avio_rl32(pb);
        switch (tag) {
        case MKTAG('a', 'c', 'T', 'L'):
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
                (ret = append_extradata(st->codec, pb, len + 12)) < 0)
                goto fail;
            acTL_found = 1;
            ctx->num_frames = AV_RB32(st->codec->extradata + ret + 8);
            ctx->num_play   = AV_RB32(st->codec->extradata + ret + 12);
            av_log(s, AV_LOG_DEBUG, "num_frames: %"PRIu32", num_play: %"PRIu32"\n",
                                    ctx->num_frames, ctx->num_play);
            break;
        case MKTAG('f', 'c', 'T', 'L'):
            if (!acTL_found) {
               ret = AVERROR_INVALIDDATA;
               goto fail;
            }
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0)
                goto fail;
            return 0;
        default:
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
                (ret = append_extradata(st->codec, pb, len + 12)) < 0)
                goto fail;
        }
    }

fail:
    if (st->codec->extradata_size) {
        av_freep(&st->codec->extradata);
        st->codec->extradata_size = 0;
    }
    return ret;
}
コード例 #28
0
ファイル: libopenjpeg.c プロジェクト: eugenehp/ffmbc
static int libopenjpeg_decode_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    AVPacket *avpkt)
{
    uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    LibOpenJPEGContext *ctx = avctx->priv_data;
    AVFrame *picture = &ctx->image, *output = data;
    opj_dinfo_t *dec;
    opj_cio_t *stream;
    opj_image_t *image;
    int width, height, has_alpha = 0, ret = -1;
    int x, y, index;
    uint8_t *img_ptr;
    int adjust[4];

    *data_size = 0;

    // Check if input is a raw jpeg2k codestream or in jp2 wrapping
    if((AV_RB32(buf) == 12) &&
       (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
       (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
        dec = opj_create_decompress(CODEC_JP2);
    } else {
        // If the AVPacket contains a jp2c box, then skip to
        // the starting byte of the codestream.
        if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
            buf += 8;
        dec = opj_create_decompress(CODEC_J2K);
    }

    if(!dec) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
        return -1;
    }
    opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);

    ctx->dec_params.cp_reduce = avctx->lowres;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        opj_destroy_decompress(dec);
        return -1;
    }

    // Decode the codestream
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);
    if(!image) {
        av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
        opj_destroy_decompress(dec);
        return -1;
    }
    width  = image->comps[0].w << avctx->lowres;
    height = image->comps[0].h << avctx->lowres;
    if(av_image_check_size(width, height, 0, avctx) < 0) {
        av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
        goto done;
    }
    avcodec_set_dimensions(avctx, width, height);

    switch(image->numcomps)
    {
        case 1:  avctx->pix_fmt = PIX_FMT_GRAY8;
                 break;
        case 3:  if(check_image_attributes(image)) {
                     avctx->pix_fmt = PIX_FMT_RGB24;
                 } else {
                     avctx->pix_fmt = PIX_FMT_GRAY8;
                     av_log(avctx, AV_LOG_ERROR, "Only first component will be used.\n");
                 }
                 break;
        case 4:  has_alpha = 1;
                 avctx->pix_fmt = PIX_FMT_RGBA;
                 break;
        default: av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps);
                 goto done;
    }

    if(picture->data[0])
        avctx->release_buffer(avctx, picture);

    if(avctx->get_buffer(avctx, picture) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Couldn't allocate image buffer.\n");
        return -1;
    }

    for(x = 0; x < image->numcomps; x++) {
        adjust[x] = FFMAX(image->comps[x].prec - 8, 0);
    }

    for(y = 0; y < avctx->height; y++) {
        index = y*avctx->width;
        img_ptr = picture->data[0] + y*picture->linesize[0];
        for(x = 0; x < avctx->width; x++, index++) {
            *img_ptr++ = image->comps[0].data[index] >> adjust[0];
            if(image->numcomps > 2 && check_image_attributes(image)) {
                *img_ptr++ = image->comps[1].data[index] >> adjust[1];
                *img_ptr++ = image->comps[2].data[index] >> adjust[2];
                if(has_alpha)
                    *img_ptr++ = image->comps[3].data[index] >> adjust[3];
            }
        }
    }
コード例 #29
0
ファイル: vmnc.c プロジェクト: CodeAsm/ffplay360
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    VmncContext * const c = avctx->priv_data;
    uint8_t *outptr;
    const uint8_t *src = buf;
    int dx, dy, w, h, depth, enc, chunks, res, size_left;

    c->pic.reference = 1;
    c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if(avctx->reget_buffer(avctx, &c->pic) < 0){
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    c->pic.key_frame = 0;
    c->pic.pict_type = FF_P_TYPE;

    //restore screen after cursor
    if(c->screendta) {
        int i;
        w = c->cur_w;
        if(c->width < c->cur_x + w) w = c->width - c->cur_x;
        h = c->cur_h;
        if(c->height < c->cur_y + h) h = c->height - c->cur_y;
        dx = c->cur_x;
        if(dx < 0) {
            w += dx;
            dx = 0;
        }
        dy = c->cur_y;
        if(dy < 0) {
            h += dy;
            dy = 0;
        }
        if((w > 0) && (h > 0)) {
            outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
            for(i = 0; i < h; i++) {
                memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, w * c->bpp2);
                outptr += c->pic.linesize[0];
            }
        }
    }
    src += 2;
    chunks = AV_RB16(src); src += 2;
    while(chunks--) {
        dx = AV_RB16(src); src += 2;
        dy = AV_RB16(src); src += 2;
        w  = AV_RB16(src); src += 2;
        h  = AV_RB16(src); src += 2;
        enc = AV_RB32(src); src += 4;
        outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
        size_left = buf_size - (src - buf);
        switch(enc) {
        case MAGIC_WMVd: // cursor
            if(size_left < 2 + w * h * c->bpp2 * 2) {
                av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", 2 + w * h * c->bpp2 * 2, size_left);
                return -1;
            }
            src += 2;
            c->cur_w = w;
            c->cur_h = h;
            c->cur_hx = dx;
            c->cur_hy = dy;
            if((c->cur_hx > c->cur_w) || (c->cur_hy > c->cur_h)) {
                av_log(avctx, AV_LOG_ERROR, "Cursor hot spot is not in image: %ix%i of %ix%i cursor size\n", c->cur_hx, c->cur_hy, c->cur_w, c->cur_h);
                c->cur_hx = c->cur_hy = 0;
            }
            c->curbits = av_realloc(c->curbits, c->cur_w * c->cur_h * c->bpp2);
            c->curmask = av_realloc(c->curmask, c->cur_w * c->cur_h * c->bpp2);
            c->screendta = av_realloc(c->screendta, c->cur_w * c->cur_h * c->bpp2);
            load_cursor(c, src);
            src += w * h * c->bpp2 * 2;
            break;
        case MAGIC_WMVe: // unknown
            src += 2;
            break;
        case MAGIC_WMVf: // update cursor position
            c->cur_x = dx - c->cur_hx;
            c->cur_y = dy - c->cur_hy;
            break;
        case MAGIC_WMVg: // unknown
            src += 10;
            break;
        case MAGIC_WMVh: // unknown
            src += 4;
            break;
        case MAGIC_WMVi: // ServerInitialization struct
            c->pic.key_frame = 1;
            c->pic.pict_type = FF_I_TYPE;
            depth = *src++;
            if(depth != c->bpp) {
                av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth);
            }
            src++;
            c->bigendian = *src++;
            if(c->bigendian & (~1)) {
                av_log(avctx, AV_LOG_INFO, "Invalid header: bigendian flag = %i\n", c->bigendian);
                return -1;
            }
            //skip the rest of pixel format data
            src += 13;
            break;
        case MAGIC_WMVj: // unknown
            src += 2;
            break;
        case 0x00000000: // raw rectangle data
            if((dx + w > c->width) || (dy + h > c->height)) {
                av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
                return -1;
            }
            if(size_left < w * h * c->bpp2) {
                av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", w * h * c->bpp2, size_left);
                return -1;
            }
            paint_raw(outptr, w, h, src, c->bpp2, c->bigendian, c->pic.linesize[0]);
            src += w * h * c->bpp2;
            break;
        case 0x00000005: // HexTile encoded rectangle
            if((dx + w > c->width) || (dy + h > c->height)) {
                av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
                return -1;
            }
            res = decode_hextile(c, outptr, src, size_left, w, h, c->pic.linesize[0]);
            if(res < 0)
                return -1;
            src += res;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported block type 0x%08X\n", enc);
            chunks = 0; // leave chunks decoding loop
        }
    }
    if(c->screendta){
        int i;
        //save screen data before painting cursor
        w = c->cur_w;
        if(c->width < c->cur_x + w) w = c->width - c->cur_x;
        h = c->cur_h;
        if(c->height < c->cur_y + h) h = c->height - c->cur_y;
        dx = c->cur_x;
        if(dx < 0) {
            w += dx;
            dx = 0;
        }
        dy = c->cur_y;
        if(dy < 0) {
            h += dy;
            dy = 0;
        }
        if((w > 0) && (h > 0)) {
            outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
            for(i = 0; i < h; i++) {
                memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, w * c->bpp2);
                outptr += c->pic.linesize[0];
            }
            outptr = c->pic.data[0];
            put_cursor(outptr, c->pic.linesize[0], c, c->cur_x, c->cur_y);
        }
    }
    *data_size = sizeof(AVFrame);
    *(AVFrame*)data = c->pic;

    /* always report that the buffer was completely consumed */
    return buf_size;
}
コード例 #30
0
ファイル: dpx.c プロジェクト: AVbin/libav
static int decode_frame(AVCodecContext *avctx,
                        void *data,
                        int *got_frame,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end = avpkt->data + avpkt->size;
    int buf_size       = avpkt->size;
    AVFrame *const p = data;
    uint8_t *ptr;

    unsigned int offset;
    int magic_num, endian;
    int x, y, ret;
    int w, h, stride, bits_per_color, descriptor, elements, target_packet_size, source_packet_size;

    unsigned int rgbBuffer;

    if (avpkt->size <= 1634) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small for DPX header\n");
        return AVERROR_INVALIDDATA;
    }

    magic_num = AV_RB32(buf);
    buf += 4;

    /* Check if the files "magic number" is "SDPX" which means it uses
     * big-endian or XPDS which is for little-endian files */
    if (magic_num == AV_RL32("SDPX")) {
        endian = 0;
    } else if (magic_num == AV_RB32("SDPX")) {
        endian = 1;
    } else {
        av_log(avctx, AV_LOG_ERROR, "DPX marker not found\n");
        return AVERROR_INVALIDDATA;
    }

    offset = read32(&buf, endian);
    if (avpkt->size <= offset) {
        av_log(avctx, AV_LOG_ERROR, "Invalid data start offset\n");
        return AVERROR_INVALIDDATA;
    }
    // Need to end in 0x304 offset from start of file
    buf = avpkt->data + 0x304;
    w = read32(&buf, endian);
    h = read32(&buf, endian);

    // Need to end in 0x320 to read the descriptor
    buf += 20;
    descriptor = buf[0];

    // Need to end in 0x323 to read the bits per color
    buf += 3;
    avctx->bits_per_raw_sample =
    bits_per_color = buf[0];

    buf += 825;
    avctx->sample_aspect_ratio.num = read32(&buf, endian);
    avctx->sample_aspect_ratio.den = read32(&buf, endian);

    switch (descriptor) {
        case 51: // RGBA
            elements = 4;
            break;
        case 50: // RGB
            elements = 3;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported descriptor %d\n", descriptor);
            return AVERROR_INVALIDDATA;
    }

    switch (bits_per_color) {
        case 8:
            if (elements == 4) {
                avctx->pix_fmt = AV_PIX_FMT_RGBA;
            } else {
                avctx->pix_fmt = AV_PIX_FMT_RGB24;
            }
            source_packet_size = elements;
            target_packet_size = elements;
            break;
        case 10:
            avctx->pix_fmt = AV_PIX_FMT_RGB48;
            target_packet_size = 6;
            source_packet_size = 4;
            break;
        case 12:
        case 16:
            if (endian) {
                avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
            } else {
                avctx->pix_fmt = AV_PIX_FMT_RGB48LE;
            }
            target_packet_size = 6;
            source_packet_size = elements * 2;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported color depth : %d\n", bits_per_color);
            return AVERROR_INVALIDDATA;
    }

    if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
        return ret;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);
    if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    // Move pointer to offset from start of file
    buf =  avpkt->data + offset;

    ptr    = p->data[0];
    stride = p->linesize[0];

    if (source_packet_size*avctx->width*avctx->height > buf_end - buf) {
        av_log(avctx, AV_LOG_ERROR, "Overread buffer. Invalid header?\n");
        return AVERROR_INVALIDDATA;
    }
    switch (bits_per_color) {
        case 10:
            for (x = 0; x < avctx->height; x++) {
               uint16_t *dst = (uint16_t*)ptr;
               for (y = 0; y < avctx->width; y++) {
                   rgbBuffer = read32(&buf, endian);
                   // Read out the 10-bit colors and convert to 16-bit
                   *dst++ = make_16bit(rgbBuffer >> 16);
                   *dst++ = make_16bit(rgbBuffer >>  6);
                   *dst++ = make_16bit(rgbBuffer <<  4);
               }
               ptr += stride;
            }
            break;
        case 8:
        case 12: // Treat 12-bit as 16-bit
        case 16:
            if (source_packet_size == target_packet_size) {
                for (x = 0; x < avctx->height; x++) {
                    memcpy(ptr, buf, target_packet_size*avctx->width);
                    ptr += stride;
                    buf += source_packet_size*avctx->width;
                }
            } else {
                for (x = 0; x < avctx->height; x++) {
                    uint8_t *dst = ptr;
                    for (y = 0; y < avctx->width; y++) {
                        memcpy(dst, buf, target_packet_size);
                        dst += target_packet_size;
                        buf += source_packet_size;
                    }
                    ptr += stride;
                }
            }
            break;
    }

    *got_frame = 1;

    return buf_size;
}