コード例 #1
0
 std::string RTMPSession::parseStatusCode(uint8_t *p) {
     //uint8_t *start = p;
     std::map<std::string, std::string> props;
     
     // skip over the packet id
     get_double(p+1); // num
     p += sizeof(double) + 1;
     
     // keep reading until we find an AMF Object
     bool foundObject = false;
     while (!foundObject) {
         if (p[0] == AMF_DATA_TYPE_OBJECT) {
             p += 1;
             foundObject = true;
             continue;
         } else {
             p += amfPrimitiveObjectSize(p);
         }
     }
     
     // read the properties of the object
     uint16_t nameLen, valLen;
     char propName[128], propVal[128];
     do {
         nameLen = get_be16(p);
         p += sizeof(nameLen);
         strncpy(propName, (char*)p, nameLen);
         propName[nameLen] = '\0';
         p += nameLen;
         if (p[0] == AMF_DATA_TYPE_STRING) {
             valLen = get_be16(p+1);
             p += sizeof(valLen) + 1;
             strncpy(propVal, (char*)p, valLen);
             propVal[valLen] = '\0';
             p += valLen;
             props[propName] = propVal;
         } else {
             // treat non-string property values as empty
             p += amfPrimitiveObjectSize(p);
             props[propName] = "";
         }
     } while (get_be24(p) != AMF_DATA_TYPE_OBJECT_END);
     
     //p = start;
     return props["code"];
 }
コード例 #2
0
ファイル: isom.c プロジェクト: AWilco/xbmc
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, ByteIOContext *pb)
{
    int len, tag;
    int object_type_id = get_byte(pb);
    get_byte(pb); /* stream type */
    get_be24(pb); /* buffer size db */
    get_be32(pb); /* max bitrate */
    get_be32(pb); /* avg bitrate */

    st->codec->codec_id= ff_codec_get_id(ff_mp4_obj_type, object_type_id);
    av_dlog(fc, "esds object type id 0x%02x\n", object_type_id);
    len = ff_mp4_read_descr(fc, pb, &tag);
    if (tag == MP4DecSpecificDescrTag) {
        av_dlog(fc, "Specific MPEG4 header len=%d\n", len);
        if((uint64_t)len > (1<<30))
            return -1;
        av_free(st->codec->extradata);
        st->codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!st->codec->extradata)
            return AVERROR(ENOMEM);
        get_buffer(pb, st->codec->extradata, len);
        st->codec->extradata_size = len;
        if (st->codec->codec_id == CODEC_ID_AAC) {
            MPEG4AudioConfig cfg;
            ff_mpeg4audio_get_config(&cfg, st->codec->extradata,
                                     st->codec->extradata_size);
            st->codec->channels = cfg.channels;
            if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4
                st->codec->sample_rate = ff_mpa_freq_tab[cfg.sampling_index];
            else if (cfg.ext_sample_rate)
                st->codec->sample_rate = cfg.ext_sample_rate;
            else
                st->codec->sample_rate = cfg.sample_rate;
            av_dlog(fc, "mp4a config channels %d obj %d ext obj %d "
                    "sample rate %d ext sample rate %d\n", st->codec->channels,
                    cfg.object_type, cfg.ext_object_type,
                    cfg.sample_rate, cfg.ext_sample_rate);
            if (!(st->codec->codec_id = ff_codec_get_id(mp4_audio_types,
                                                        cfg.object_type)))
                st->codec->codec_id = CODEC_ID_AAC;
        }
    }
    return 0;
}
コード例 #3
0
ファイル: flvdec.c プロジェクト: foogywoo/drone
static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, i, type, size, pts, flags, is_audio;
    AVStream *st;
    
 for(;;){
    url_fskip(&s->pb, 4); /* size of previous packet */
    type = get_byte(&s->pb);
    size = get_be24(&s->pb);
    pts = get_be24(&s->pb);
//    av_log(s, AV_LOG_DEBUG, "type:%d, size:%d, pts:%d\n", type, size, pts);
    if (url_feof(&s->pb))
        return AVERROR_IO;
    url_fskip(&s->pb, 4); /* reserved */
    flags = 0;
    
    if(size == 0)
        continue;
    
    if (type == 8) {
        is_audio=1;
        flags = get_byte(&s->pb);
        size--;
    } else if (type == 9) {
        is_audio=0;
        flags = get_byte(&s->pb);
        size--;
    } else {
        /* skip packet */
        av_log(s, AV_LOG_ERROR, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
        url_fskip(&s->pb, size);
        continue;
    }

    /* now find stream */
    for(i=0;i<s->nb_streams;i++) {
        st = s->streams[i];
        if (st->id == is_audio)
            break;
    }
    if(i == s->nb_streams){
        st = av_new_stream(s, is_audio);
        if (!st)
            return AVERROR_NOMEM;

        av_set_pts_info(st, 24, 1, 1000); /* 24 bit pts in ms */
        st->codec.frame_rate_base= 1;
        st->codec.frame_rate= 1000;
    }
    if(st->discard){
        url_fskip(&s->pb, size);
        continue;
    }
    break;
 }

    if(is_audio){
        if(st->codec.sample_rate == 0){
            st->codec.codec_type = CODEC_TYPE_AUDIO;
            st->codec.channels = (flags&1)+1;
            if((flags >> 4) == 5)
                st->codec.sample_rate= 8000;
            else
                st->codec.sample_rate = (44100<<((flags>>2)&3))>>3;
            switch(flags >> 4){/* 0: uncompressed 1: ADPCM 2: mp3 5: Nellymoser 8kHz mono 6: Nellymoser*/
            case 2: st->codec.codec_id = CODEC_ID_MP3; break;
            default:
                st->codec.codec_tag= (flags >> 4);
            }
        }
コード例 #4
0
ファイル: util.cpp プロジェクト: tfauck/upx
int __acc_cdecl_qsort be24_compare(const void *e1, const void *e2)
{
    const unsigned d1 = get_be24(e1);
    const unsigned d2 = get_be24(e2);
    return (d1 < d2) ? -1 : ((d1 > d2) ? 1 : 0);
}
コード例 #5
0
static void id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t flags)
{
    int isv34, tlen;
    uint32_t tag;
    offset_t next;
    char tmp[16];
    int taghdrlen;
    const char *reason;

    switch(version) {
    case 2:
        if(flags & 0x40) {
            reason = "compression";
            goto error;
        }
        isv34 = 0;
        taghdrlen = 6;
        break;

    case 3:
    case 4:
        isv34 = 1;
        taghdrlen = 10;
        break;

    default:
        reason = "version";
        goto error;
    }

    if(flags & 0x80) {
        reason = "unsynchronization";
        goto error;
    }

    if(isv34 && flags & 0x40) /* Extended header present, just skip over it */
        url_fskip(s->pb, id3v2_get_size(s->pb, 4));

    while(len >= taghdrlen) {
        if(isv34) {
            tag  = get_be32(s->pb);
            tlen = id3v2_get_size(s->pb, 4);
            get_be16(s->pb); /* flags */
        } else {
            tag  = get_be24(s->pb);
            tlen = id3v2_get_size(s->pb, 3);
        }
        len -= taghdrlen + tlen;

        if(len < 0)
            break;

        next = url_ftell(s->pb) + tlen;

        switch(tag) {
        case MKBETAG('T', 'I', 'T', '2'):
        case MKBETAG(0,   'T', 'T', '2'):
            id3v2_read_ttag(s, tlen, s->title, sizeof(s->title));
            break;
        case MKBETAG('T', 'P', 'E', '1'):
        case MKBETAG(0,   'T', 'P', '1'):
            id3v2_read_ttag(s, tlen, s->author, sizeof(s->author));
            break;
        case MKBETAG('T', 'A', 'L', 'B'):
        case MKBETAG(0,   'T', 'A', 'L'):
            id3v2_read_ttag(s, tlen, s->album, sizeof(s->album));
            break;
        case MKBETAG('T', 'C', 'O', 'N'):
        case MKBETAG(0,   'T', 'C', 'O'):
            id3v2_read_ttag(s, tlen, s->genre, sizeof(s->genre));
            break;
        case MKBETAG('T', 'C', 'O', 'P'):
        case MKBETAG(0,   'T', 'C', 'R'):
            id3v2_read_ttag(s, tlen, s->copyright, sizeof(s->copyright));
            break;
        case MKBETAG('T', 'R', 'C', 'K'):
        case MKBETAG(0,   'T', 'R', 'K'):
            id3v2_read_ttag(s, tlen, tmp, sizeof(tmp));
            s->track = atoi(tmp);
            break;
        case 0:
            /* padding, skip to end */
            url_fskip(s->pb, len);
            len = 0;
            continue;
        }
        /* Skip to end of tag */
        url_fseek(s->pb, next, SEEK_SET);
    }

    if(version == 4 && flags & 0x10) /* Footer preset, always 10 bytes, skip over it */
        url_fskip(s->pb, 10);
    return;

  error:
    av_log(s, AV_LOG_INFO, "ID3v2.%d tag skipped, cannot handle %s\n", version, reason);
    url_fskip(s->pb, len);
}
コード例 #6
0
ファイル: pva.c プロジェクト: 119/dropcam_for_iphone
static int read_part_of_packet(AVFormatContext *s, int64_t *pts,
                               int *len, int *strid, int read_packet) {
    ByteIOContext *pb = s->pb;
    PVAContext *pvactx = s->priv_data;
    int syncword, streamid, reserved, flags, length, pts_flag;
    int64_t pva_pts = AV_NOPTS_VALUE, startpos;

recover:
    startpos = url_ftell(pb);

    syncword = get_be16(pb);
    streamid = get_byte(pb);
    get_byte(pb);               /* counter not used */
    reserved = get_byte(pb);
    flags    = get_byte(pb);
    length   = get_be16(pb);

    pts_flag = flags & 0x10;

    if (syncword != PVA_MAGIC) {
        pva_log(s, AV_LOG_ERROR, "invalid syncword\n");
        return AVERROR(EIO);
    }
    if (streamid != PVA_VIDEO_PAYLOAD && streamid != PVA_AUDIO_PAYLOAD) {
        pva_log(s, AV_LOG_ERROR, "invalid streamid\n");
        return AVERROR(EIO);
    }
    if (reserved != 0x55) {
        pva_log(s, AV_LOG_WARNING, "expected reserved byte to be 0x55\n");
    }
    if (length > PVA_MAX_PAYLOAD_LENGTH) {
        pva_log(s, AV_LOG_ERROR, "invalid payload length %u\n", length);
        return AVERROR(EIO);
    }

    if (streamid == PVA_VIDEO_PAYLOAD && pts_flag) {
        pva_pts = get_be32(pb);
        length -= 4;
    } else if (streamid == PVA_AUDIO_PAYLOAD) {
        /* PVA Audio Packets either start with a signaled PES packet or
         * are a continuation of the previous PES packet. New PES packets
         * always start at the beginning of a PVA Packet, never somewhere in
         * the middle. */
        if (!pvactx->continue_pes) {
            int pes_signal, pes_header_data_length, pes_packet_length,
                pes_flags;
            unsigned char pes_header_data[256];

            pes_signal             = get_be24(pb);
            get_byte(pb);
            pes_packet_length      = get_be16(pb);
            pes_flags              = get_be16(pb);
            pes_header_data_length = get_byte(pb);

            if (pes_signal != 1) {
                pva_log(s, AV_LOG_WARNING, "expected signaled PES packet, "
                                          "trying to recover\n");
                url_fskip(pb, length - 9);
                if (!read_packet)
                    return AVERROR(EIO);
                goto recover;
            }

            get_buffer(pb, pes_header_data, pes_header_data_length);
            length -= 9 + pes_header_data_length;

            pes_packet_length -= 3 + pes_header_data_length;

            pvactx->continue_pes = pes_packet_length;

            if (pes_flags & 0x80 && (pes_header_data[0] & 0xf0) == 0x20)
                pva_pts = ff_parse_pes_pts(pes_header_data);
        }

        pvactx->continue_pes -= length;

        if (pvactx->continue_pes < 0) {
            pva_log(s, AV_LOG_WARNING, "audio data corruption\n");
            pvactx->continue_pes = 0;
        }
    }

    if (pva_pts != AV_NOPTS_VALUE)
        av_add_index_entry(s->streams[streamid-1], startpos, pva_pts, 0, 0, AVINDEX_KEYFRAME);

    *pts   = pva_pts;
    *len   = length;
    *strid = streamid;
    return 0;
}
コード例 #7
0
ファイル: rtpdec_asf.c プロジェクト: RJVB/FFusion
/**
 * @return 0 when a packet was written into /p pkt, and no more data is left;
 *         1 when a packet was written into /p pkt, and more packets might be left;
 *        <0 when not enough data was provided to return a full packet, or on error.
 */
static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
                               AVStream *st, AVPacket *pkt,
                               uint32_t *timestamp,
                               const uint8_t *buf, int len, int flags)
{
    ByteIOContext *pb = &asf->pb;
    int res, mflags, len_off;
    RTSPState *rt = s->priv_data;

    if (!rt->asf_ctx)
        return -1;

    if (len > 0) {
        int off, out_len = 0;

        if (len < 4)
            return -1;

        av_freep(&asf->buf);

        init_put_byte(pb, buf, len, 0, NULL, NULL, NULL, NULL);

        while (url_ftell(pb) + 4 < len) {
            int start_off = url_ftell(pb);

            mflags = get_byte(pb);
            if (mflags & 0x80)
                flags |= RTP_FLAG_KEY;
            len_off = get_be24(pb);
            if (mflags & 0x20)   /**< relative timestamp */
                url_fskip(pb, 4);
            if (mflags & 0x10)   /**< has duration */
                url_fskip(pb, 4);
            if (mflags & 0x8)    /**< has location ID */
                url_fskip(pb, 4);
            off = url_ftell(pb);

            if (!(mflags & 0x40)) {
                /**
                 * If 0x40 is not set, the len_off field specifies an offset
                 * of this packet's payload data in the complete (reassembled)
                 * ASF packet. This is used to spread one ASF packet over
                 * multiple RTP packets.
                 */
                if (asf->pktbuf && len_off != url_ftell(asf->pktbuf)) {
                    uint8_t *p;
                    url_close_dyn_buf(asf->pktbuf, &p);
                    asf->pktbuf = NULL;
                    av_free(p);
                }
                if (!len_off && !asf->pktbuf &&
                    (res = url_open_dyn_buf(&asf->pktbuf)) < 0)
                    return res;
                if (!asf->pktbuf)
                    return AVERROR(EIO);

                put_buffer(asf->pktbuf, buf + off, len - off);
                url_fskip(pb, len - off);
                if (!(flags & RTP_FLAG_MARKER))
                    return -1;
                out_len     = url_close_dyn_buf(asf->pktbuf, &asf->buf);
                asf->pktbuf = NULL;
            } else {
                /**
                 * If 0x40 is set, the len_off field specifies the length of
                 * the next ASF packet that can be read from this payload
                 * data alone. This is commonly the same as the payload size,
                 * but could be less in case of packet splitting (i.e.
                 * multiple ASF packets in one RTP packet).
                 */

                int cur_len = start_off + len_off - off;
                int prev_len = out_len;
                out_len += cur_len;
                asf->buf = av_realloc(asf->buf, out_len);
                memcpy(asf->buf + prev_len, buf + off,
                       FFMIN(cur_len, len - off));
                url_fskip(pb, cur_len);
            }
        }

        init_packetizer(pb, asf->buf, out_len);
        pb->pos += rt->asf_pb_pos;
        pb->eof_reached = 0;
        rt->asf_ctx->pb = pb;
    }

    for (;;) {
        int i;

        res = av_read_packet(rt->asf_ctx, pkt);
        rt->asf_pb_pos = url_ftell(pb);
        if (res != 0)
            break;
        for (i = 0; i < s->nb_streams; i++) {
            if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) {
                pkt->stream_index = i;
                return 1; // FIXME: return 0 if last packet
            }
        }
        av_free_packet(pkt);
    }

    return res == 1 ? -1 : res;
}
コード例 #8
0
 bool
 RTMPSession::parseCurrentData()
 {
     const size_t size = m_streamInBuffer->size();
     
     uint8_t buf[size], *p, *start ;
     
     p = &buf[0];
     
     long ret = m_streamInBuffer->get(p, size, false);
     
     if(!p) return false;
     
     while (ret>0) {
         int header_type = (p[0] & 0xC0) >> 6;
         p++;
         ret--;
         
         if (ret <= 0) {
             break;
         }
         
         switch(header_type) {
             case RTMP_HEADER_TYPE_FULL:
             {
                 
                 RTMPChunk_0 chunk;
                 memcpy(&chunk, p, sizeof(RTMPChunk_0));
                 chunk.msg_length.data = get_be24((uint8_t*)&chunk.msg_length);
                 
                 p+=sizeof(chunk);
                 ret -= sizeof(chunk);
                 
                 bool success = handleMessage(p, chunk.msg_type_id);
                 
                 if(!success) {
                     ret = 0; break;
                 }
                 p+=chunk.msg_length.data;
                 ret -= chunk.msg_length.data;
             }
                 break;
                 
             case RTMP_HEADER_TYPE_NO_MSG_STREAM_ID:
             {
                 RTMPChunk_1 chunk;
                 memcpy(&chunk, p, sizeof(RTMPChunk_1));
                 p+=sizeof(chunk);
                 ret -= sizeof(chunk);
                 chunk.msg_length.data = get_be24((uint8_t*)&chunk.msg_length);
                 
                 bool success = handleMessage(p, chunk.msg_type_id);
                 if(!success) {
                     ret = 0; break;
                 }
                 p+=chunk.msg_length.data;
                 ret -= chunk.msg_length.data;
                 
             }
                 break;
                 
             case RTMP_HEADER_TYPE_TIMESTAMP:
             {
                 RTMPChunk_2 chunk;
                 memcpy(&chunk, p, sizeof(RTMPChunk_2));
                 
                 p+=sizeof(chunk)+std::min(ret, long(m_inChunkSize));
                 ret -= sizeof(chunk)+std::min(ret, long(m_inChunkSize));
             }
                 break;
                 
             case RTMP_HEADER_TYPE_ONLY:
             {
                 p += std::min(ret, long(m_inChunkSize));
                 ret -= std::min(ret, long(m_inChunkSize));
             }
                 break;
                 
             default:
                 return false;
         }
     }
     
     return true;
 }