コード例 #1
0
ファイル: MediaRecorder.cpp プロジェクト: 1981khj/rainbow
/*
 * Theora stream headers
 */
nsresult
MediaRecorder::SetupTheoraHeaders()
{
    int ret;
    nsresult rv;
    PRUint32 wr;

    /* Create rest of the theora headers */
    for (;;) {
        ret = th_encode_flushheader(
            vState->th, &vState->tc, &vState->op
        );
        if (ret < 0){
            PR_LOG(log, PR_LOG_NOTICE, ("Internal Theora library error\n"));
            return NS_ERROR_FAILURE;
        } else if (!ret) break;
        ogg_stream_packetin(&vState->os, &vState->op);
    }
    /* Flush the rest of theora headers. */
    for (;;) {
        ret = ogg_stream_flush(&vState->os, &vState->og);
        if (ret < 0){
            PR_LOG(log, PR_LOG_NOTICE, ("Internal Ogg library error\n"));
            return NS_ERROR_FAILURE;
        }
        if (ret == 0) break;
        rv = WriteData(vState->og.header, vState->og.header_len, &wr);
        rv = WriteData(vState->og.body, vState->og.body_len, &wr);
    }

    return NS_OK;
}
コード例 #2
0
ファイル: MediaRecorder.cpp プロジェクト: 1981khj/rainbow
/*
 * Theora beginning of stream
 */
nsresult
MediaRecorder::SetupTheoraBOS()
{
    int i;
    nsresult rv;
    PRUint32 wr;
    ogg_uint32_t keyframe;

    if (ogg_stream_init(&vState->os, rand())) {
        PR_LOG(log, PR_LOG_NOTICE, ("Failed ogg_stream_init\n"));
        return NS_ERROR_FAILURE;
    }

    th_info_init(&vState->ti);

    /* Must be multiples of 16 */
    vState->ti.frame_width = (params->width + 15) & ~0xF;
    vState->ti.frame_height = (params->height + 15) & ~0xF;
    vState->ti.pic_width = params->width;
    vState->ti.pic_height = params->height;
    vState->ti.pic_x = (vState->ti.frame_width - params->width) >> 1 & ~1;
    vState->ti.pic_y = (vState->ti.frame_height - params->height) >> 1 & ~1;
    vState->ti.fps_numerator = params->fps_n;
    vState->ti.fps_denominator = params->fps_d;

    /* Are these the right values? */
    keyframe = 64 - 1;
    for (i = 0; keyframe; i++)
        keyframe >>= 1;
    vState->ti.quality = (int)(params->qual * 100);
    vState->ti.colorspace = TH_CS_ITU_REC_470M;
    vState->ti.pixel_fmt = TH_PF_420;
    vState->ti.keyframe_granule_shift = i;

    vState->th = th_encode_alloc(&vState->ti);
    th_info_clear(&vState->ti);

    /* Header init */
    th_comment_init(&vState->tc);
    th_comment_add_tag(&vState->tc, (char *)"ENCODER", (char *)"rainbow");
    if (th_encode_flushheader(
        vState->th, &vState->tc, &vState->op) <= 0) {
        PR_LOG(log, PR_LOG_NOTICE, ("Internal Theora library error\n"));
        return NS_ERROR_FAILURE;
    }
    th_comment_clear(&vState->tc);

    ogg_stream_packetin(&vState->os, &vState->op);
    if (ogg_stream_pageout(&vState->os, &vState->og) != 1) {
        PR_LOG(log, PR_LOG_NOTICE, ("Internal Ogg library error\n"));
        return NS_ERROR_FAILURE;
    }

    rv = WriteData(vState->og.header, vState->og.header_len, &wr);
    rv = WriteData(vState->og.body, vState->og.body_len, &wr);

    return NS_OK;
}
コード例 #3
0
ファイル: enc.c プロジェクト: mgorlick/CRESTaceans
int theoraenc_foreach_header (TheoraEnc *enc, theoraenc_each_packet f) {
  ogg_packet p;

  if (!enc) return 0;

  while (th_encode_flushheader (enc->ctx, enc->comment, &p) > 0)
    f (&p);
  
  return 1;
}
コード例 #4
0
ファイル: xx_theora.c プロジェクト: biddyweb/xwbot
static void
__gobee_theora_send_headers(int sock, struct sockaddr *addr, int addrlen,
    int stamp)
{
  size_t len = 0;
  ogg_packet op;

  th_encode_flushheader(tctx, &tcmnt, &op);

  pconf = (theora_packed_header_t*) realloc(pconf,
      sizeof(theora_packed_header_t) + 12 + op.bytes);

  memcpy(&pconf->data[12], op.packet, op.bytes);
  pconf->plen = op.bytes;
  ((int *) pconf->data)[0] = htonl(3);
  ((int *) pconf->data)[1] = htonl(op.bytes);

  th_encode_flushheader(tctx, &tcmnt, &op);
  //printf("op.bytes=%d\n", op.bytes);
  pconf = (theora_packed_header_t*) realloc(pconf,
      sizeof(theora_packed_header_t) + 12 + pconf->plen + op.bytes);

  memcpy(&pconf->data[12 + pconf->plen], op.packet, op.bytes);
  pconf->plen += op.bytes;
  ((int *) pconf->data)[2] = htonl(op.bytes);

  th_encode_flushheader(tctx, &tcmnt, &op);
  //printf("op.bytes=%d\n", op.bytes);
  pconf = (theora_packed_header_t*) realloc(pconf,
      sizeof(theora_packed_header_t) + 12 + pconf->plen + op.bytes);
  memcpy(&pconf->data[12 + pconf->plen], op.packet, op.bytes);
  pconf->plen += op.bytes;

  len = sizeof(*pconf) + 12 + pconf->plen;
  pconf->plen = htons(pconf->plen);
  pconf->ident = htonl((1 << 4) | 0xffff1100);

  rtp_send(sock, (void *) pconf, (int) len, addr, addrlen, stamp, THEORA_PTYPE,
      seqno++);

}
コード例 #5
0
ファイル: encapiwrapper.c プロジェクト: 03050903/Torque3D
int theora_encode_tables(theora_state *_te,ogg_packet *_op){
  oc_enc_ctx     *enc;
  th_api_wrapper *api;
  int             ret;
  api=(th_api_wrapper *)_te->i->codec_setup;
  enc=api->encode;
  /*If we've already started encoding, fail.*/
  if(enc->packet_state>OC_PACKET_EMPTY||enc->state.granpos!=0){
    return TH_EINVAL;
  }
  /*Reset the state to make sure we output a setup packet.*/
  enc->packet_state=OC_PACKET_SETUP_HDR;
  ret=th_encode_flushheader(api->encode,NULL,_op);
  return ret>=0?0:ret;
}
コード例 #6
0
void CHolly_Theora_Video::WriteHeader()
{
	th_comment       tc;
	th_comment_init( &tc );
	tc.vendor = (char *)"Holly";

	ogg_packet header;
	ogg_packet header_comm;
	ogg_packet header_code;

	while ( th_encode_flushheader( m_Encoder, &tc, &header ) > 0 )
	{
		Encoder()->Container()->WriteHeader( this, &header );
	}
}
コード例 #7
0
ファイル: libtheoraenc.c プロジェクト: Acidburn0zzz/libav
static av_cold int encode_init(AVCodecContext* avc_context)
{
    th_info t_info;
    th_comment t_comment;
    ogg_packet o_packet;
    unsigned int offset;
    TheoraContext *h = avc_context->priv_data;
    uint32_t gop_size = avc_context->gop_size;

    /* Set up the theora_info struct */
    th_info_init(&t_info);
    t_info.frame_width  = FFALIGN(avc_context->width,  16);
    t_info.frame_height = FFALIGN(avc_context->height, 16);
    t_info.pic_width    = avc_context->width;
    t_info.pic_height   = avc_context->height;
    t_info.pic_x        = 0;
    t_info.pic_y        = 0;
    /* Swap numerator and denominator as time_base in AVCodecContext gives the
     * time period between frames, but theora_info needs the framerate.  */
    t_info.fps_numerator   = avc_context->time_base.den;
    t_info.fps_denominator = avc_context->time_base.num;
    if (avc_context->sample_aspect_ratio.num) {
        t_info.aspect_numerator   = avc_context->sample_aspect_ratio.num;
        t_info.aspect_denominator = avc_context->sample_aspect_ratio.den;
    } else {
        t_info.aspect_numerator   = 1;
        t_info.aspect_denominator = 1;
    }

    if (avc_context->color_primaries == AVCOL_PRI_BT470M)
        t_info.colorspace = TH_CS_ITU_REC_470M;
    else if (avc_context->color_primaries == AVCOL_PRI_BT470BG)
        t_info.colorspace = TH_CS_ITU_REC_470BG;
    else
        t_info.colorspace = TH_CS_UNSPECIFIED;

    if (avc_context->pix_fmt == AV_PIX_FMT_YUV420P)
        t_info.pixel_fmt = TH_PF_420;
    else if (avc_context->pix_fmt == AV_PIX_FMT_YUV422P)
        t_info.pixel_fmt = TH_PF_422;
    else if (avc_context->pix_fmt == AV_PIX_FMT_YUV444P)
        t_info.pixel_fmt = TH_PF_444;
    else {
        av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n");
        return -1;
    }
    av_pix_fmt_get_chroma_sub_sample(avc_context->pix_fmt,
                                     &h->uv_hshift, &h->uv_vshift);

    if (avc_context->flags & CODEC_FLAG_QSCALE) {
        /* to be constant with the libvorbis implementation, clip global_quality to 0 - 10
           Theora accepts a quality parameter p, which is:
                * 0 <= p <=63
                * an int value
         */
        t_info.quality        = av_clipf(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3;
        t_info.target_bitrate = 0;
    } else {
        t_info.target_bitrate = avc_context->bit_rate;
        t_info.quality        = 0;
    }

    /* Now initialise libtheora */
    h->t_state = th_encode_alloc(&t_info);
    if (!h->t_state) {
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n");
        return -1;
    }

    h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
    /* Clear up theora_info struct */
    th_info_clear(&t_info);

    if (th_encode_ctl(h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
                      &gop_size, sizeof(gop_size))) {
        av_log(avc_context, AV_LOG_ERROR, "Error setting GOP size\n");
        return -1;
    }

    // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers
    if (avc_context->flags & CODEC_FLAG_PASS1) {
        if (get_stats(avc_context, 0))
            return -1;
    } else if (avc_context->flags & CODEC_FLAG_PASS2) {
        if (submit_stats(avc_context))
            return -1;
    }

    /*
        Output first header packet consisting of theora
        header, comment, and tables.

        Each one is prefixed with a 16bit size, then they
        are concatenated together into libavcodec's extradata.
    */
    offset = 0;

    /* Headers */
    th_comment_init(&t_comment);

    while (th_encode_flushheader(h->t_state, &t_comment, &o_packet))
        if (concatenate_packet(&offset, avc_context, &o_packet))
            return -1;

    th_comment_clear(&t_comment);

    /* Set up the output AVFrame */
    avc_context->coded_frame = av_frame_alloc();

    return 0;
}
コード例 #8
0
ファイル: theora.cpp プロジェクト: kellyrowland/hertools
    void writeTheora(std::vector<std::vector<uint8_t>> vidframes, std::string writeTo, uint16_t vidWidth, uint16_t vidHeight) {
        // get paddings to nearest multiple of 0x10
        uint32_t padW = 16 - vidWidth % 16;
        uint32_t padH = 16 - vidHeight % 16;
        uint32_t frmWidth = vidWidth + padW;
        uint32_t frmHeight = vidHeight + padH;

        // initialize theora stream
        th_info vidinfo;
        th_info_init(&vidinfo);
        vidinfo.frame_width     = frmWidth;
        vidinfo.frame_height    = frmHeight;
        vidinfo.pic_width       = vidWidth;
        vidinfo.pic_height      = vidHeight;
        vidinfo.pic_x           = 0;
        vidinfo.pic_y           = 0;
        vidinfo.colorspace      = TH_CS_ITU_REC_470M; // what our RGB->YCbCr function operates on
        vidinfo.pixel_fmt       = TH_PF_444;          // we want the bestest video possible, so no decimation
        vidinfo.target_bitrate  = 0;                  // prefer VBR with quality level...
        vidinfo.quality         = 63;                 // ...which we want as high as possible (since we aren't using photographic frames, lossy compression ruins things)
        vidinfo.fps_numerator   = 15;                 // framerate is 15 fps
        vidinfo.fps_denominator = 1;

        // initialize theora encoding context
        th_enc_ctx * videnc = th_encode_alloc(&vidinfo);

        // initialize theora comment
        th_comment vidcomment;
        th_comment_init(&vidcomment);

        // initialize ogg container
        ogg_stream_state vidcont;
        // serial number chosen by fair dice roll
        if (ogg_stream_init(&vidcont, 42)) { // returned -1, thus failed
            std::cerr << "Failed to initialize ogg container :(\n";
            throw 42;
        }

        // get generic ogg packet & page holders
        ogg_packet vidpacket;
        ogg_page vidpage;

        // generic YCbCr frame, and initial data
        const int Y = 0;
        const int Cb = 1;
        const int Cr = 2; // clarity bonuses
        th_ycbcr_buffer rawdata;
        for (auto & i : rawdata) {
            i.width = i.stride = frmWidth;
            i.height = frmHeight;
            i.data = new unsigned char [frmWidth * frmHeight];
        }
        

        // open file for writing
        std::ofstream vidfile; // because god forbid this thing supports an unsigned char unit
        vidfile.open(writeTo, std::ios::binary);

        // factor out the ogg page writing process a bit
        auto writePage = [&](){
            vidfile.write((char*)vidpage.header, vidpage.header_len);
            if (!vidfile) {
                std::cerr << "An error occured in writing Ogg page header to file. Exiting...\n";
                vidfile.close();
                throw 42;
            }

            vidfile.write((char*)vidpage.body, vidpage.body_len);
            if (!vidfile) {
                std::cerr << "An error occured in writing Ogg page body to file. Exiting...\n";
                vidfile.close();
                throw 42;
            }
        };




        // send header packets to ogg stream
        bool gotone = false;
        while (true) {
            int mkpacket = th_encode_flushheader(videnc, &vidcomment, &vidpacket);
            if (mkpacket == 0) {
                if (gotone) {
                    break;
                } else {
                    std::cerr << "Theora didn't return any header packets.\n";
                    throw 42;
                }
            }

            if (mkpacket < 0) {
                std::cerr << "Theora header flushing failed with error code " << mkpacket << ". Exiting...\n";
                throw 42;
            }

            if (ogg_stream_packetin(&vidcont, &vidpacket)) {
                std::cerr << "Giving packet to Ogg failed, sorry.\n";
                throw 42;
            }

            gotone = true;
        }

        // write ogg pages (and then the remainder via flush) to file
        while (ogg_stream_pageout(&vidcont, &vidpage)) {
            writePage();
        }

        while (ogg_stream_flush(&vidcont, &vidpage)) {
            writePage();
        }

        //////////////////////
        // WRITE THE FRAMES //
        //////////////////////

        for (int FRNO = 0; FRNO < vidframes.size(); FRNO++) {
            auto * VFR = &vidframes.at(FRNO);
            // since we set an offset of (0,0) for the picture, we fill up the
            // top and right edges of the frame with junk. This is us filling
            // the top part
            for (int i = 0; i < padH; i++) {
                for (int j = 0; j < frmWidth; j++) {
                    rawdata[Y].data[i * frmWidth + j] = 0;
                    rawdata[Cb].data[i * frmWidth + j] = 0;
                    rawdata[Cr].data[i * frmWidth + j] = 0;
                }
            }

            // now for the picture itself (every row we add more junk to the right
            // of the image)
            int vecAt = 0; // where we are in the VFR vector
            for (int i = 0; i < vidHeight; i++) {
                for (int j = 0; j < vidWidth; j++) {
                    rawdata[Y].data[i * frmWidth + j]  = VFR->at(vecAt); vecAt++;
                    rawdata[Cb].data[i * frmWidth + j] = VFR->at(vecAt); vecAt++;
                    rawdata[Cr].data[i * frmWidth + j] = VFR->at(vecAt); vecAt++;
                }

                // get right-side padding (fill with junk)
                for (int j = vidWidth; j < frmWidth; j++) {
                    rawdata[Y].data[i * frmWidth + j]  = 0;
                    rawdata[Cb].data[i * frmWidth + j] = 0;
                    rawdata[Cr].data[i * frmWidth + j] = 0;
                }
            }

            // frame made, send through theora
            if (th_encode_ycbcr_in(videnc, rawdata)) {
                std::cerr << "Error in sending frame " << FRNO + 1 << " of " << vidframes.size() << " to Theora.\n";
                throw 42;
            }

            // send theora packets into ogg
            while (true) {
                int packok = th_encode_packetout(videnc, FRNO + 1 == vidframes.size(), &vidpacket);
                if (packok == 0) { break; }
                if (packok < 0) {
                    std::cerr << "Retrieving packet from Theora failed with error code " << packok << ".\n";
                    throw 42;
                }

                if (ogg_stream_packetin(&vidcont, &vidpacket)) {
                    std::cerr << "Giving frame packet to Ogg failed.\n";
                    throw 42;
                }
            }

            // send complete pages from frame to file (we won't flush until
            // after all frames are accounted for, to avoid an abundance of
            // undersized pages)

            while (ogg_stream_pageout(&vidcont, &vidpage)) {
                writePage();
            }
        }

        // take care of any remaining undersized page(s)

        while (ogg_stream_flush(&vidcont, &vidpage)) {
            writePage();
        }

        //// Free/close/etc all relevant structures

        // fstream
        vidfile.close();

        // theora items
        //th_encode_free(videnc); // causes a corrupted double-linked list, somehow, so you'll have to live with unfree'd memory :(
        th_info_clear(&vidinfo);
        th_comment_clear(&vidcomment);

        for (auto & i : rawdata) {
            delete[] i.data;
        }

        // ogg items
        ogg_packet_clear(&vidpacket);
        ogg_stream_clear(&vidcont);
    }
コード例 #9
0
ファイル: demo_recorder.cpp プロジェクト: Kebein/teeworlds
void CDemoVideoRecorder::Init(int Width, int Height, int FPS, int Format, const char *pName)
{
    m_pSound = Kernel()->RequestInterface<ISound>();
    m_FPS = FPS;
    m_ScreenWidth = Width;
    m_ScreenHeight = Height;
    m_Format = Format;

    if (m_Format == IClient::DEMO_RECORD_FORMAT_OGV)
    {
        ogg_stream_init(&m_TheoraOggStreamState, rand());
        ogg_stream_init(&m_VorbisOggStreamState, rand());

        char aBuf[1024];
        if (str_find_rev(pName, "/"))
            str_format(aBuf, sizeof(aBuf), "%s.ogv", str_find_rev(pName, "/"));
        else if (str_find_rev(aBuf, "\\"))
            str_format(aBuf, sizeof(pName), "%s.ogv", str_find_rev(pName, "\\"));
        else
            str_format(aBuf, sizeof(aBuf), "%s.ogv", pName);
        m_OggFile = io_open(aBuf, IOFLAG_WRITE);

        //thread_sleep(10000);
        vorbis_info_init(&m_VorbisEncodingInfo);
        vorbis_encode_init_vbr(&m_VorbisEncodingInfo, 2, g_Config.m_SndRate, 1.0f); //2 ch - samplerate - quality 1
        vorbis_analysis_init(&m_VorbisState, &m_VorbisEncodingInfo);
        vorbis_block_init(&m_VorbisState, &m_VorbisBlock);

        vorbis_comment_init(&m_VorbisComment);
        ogg_packet header;
        ogg_packet header_comm;
        ogg_packet header_code;
        vorbis_analysis_headerout(&m_VorbisState, &m_VorbisComment, &header, &header_comm, &header_code);
        ogg_stream_packetin(&m_VorbisOggStreamState, &header);
        ogg_stream_packetin(&m_VorbisOggStreamState, &header_comm);
        ogg_stream_packetin(&m_VorbisOggStreamState, &header_code);


        th_info_init(&m_TheoraEncodingInfo);
        m_TheoraEncodingInfo.frame_width = m_ScreenWidth+15&~0xF;
        m_TheoraEncodingInfo.frame_height = m_ScreenHeight+15&~0xF;
        m_TheoraEncodingInfo.pic_width = m_ScreenWidth;
        m_TheoraEncodingInfo.pic_height = m_ScreenHeight;
        m_TheoraEncodingInfo.pic_x = m_TheoraEncodingInfo.frame_width - m_ScreenWidth>>1&~1;
        m_TheoraEncodingInfo.pic_y = m_TheoraEncodingInfo.frame_height - m_ScreenHeight>>1&~1;
        m_TheoraEncodingInfo.colorspace = TH_CS_UNSPECIFIED;
        m_TheoraEncodingInfo.fps_numerator = FPS; //fps
        m_TheoraEncodingInfo.fps_denominator = 1;
        m_TheoraEncodingInfo.aspect_numerator = -1;
        m_TheoraEncodingInfo.aspect_denominator = -1;
        m_TheoraEncodingInfo.pixel_fmt = TH_PF_444;
        m_TheoraEncodingInfo.target_bitrate = (int)(64870*(ogg_int64_t)48000>>16);
        m_TheoraEncodingInfo.quality = 32;
        m_TheoraEncodingInfo.keyframe_granule_shift = 0;
        m_pThreoraContext = th_encode_alloc(&m_TheoraEncodingInfo);
        int arg = TH_RATECTL_CAP_UNDERFLOW;
        th_encode_ctl(m_pThreoraContext, TH_ENCCTL_SET_RATE_FLAGS, &arg, sizeof(arg));
        th_comment CommentHeader;
        ogg_packet OggPacket;
        th_comment_init(&CommentHeader);
        mem_zero(&OggPacket, sizeof(OggPacket));


            //Flush
        //Step 1
        th_encode_flushheader(m_pThreoraContext, &CommentHeader, &OggPacket); // first header

        ogg_stream_packetin(&m_TheoraOggStreamState, &OggPacket);
        //
        ogg_page OggPage;
        ogg_stream_pageout(&m_TheoraOggStreamState, &OggPage);
        io_write(m_OggFile, OggPage.header, OggPage.header_len);
        io_write(m_OggFile, OggPage.body, OggPage.body_len);

        while(1)
        {
            ogg_page OggPage;
            if (ogg_stream_flush(&m_VorbisOggStreamState,&OggPage) == 0)
                break;
            io_write(m_OggFile, OggPage.header, OggPage.header_len);
            io_write(m_OggFile, OggPage.body, OggPage.body_len);
        }

        while(th_encode_flushheader(m_pThreoraContext, &CommentHeader, &OggPacket))
        {
            ogg_stream_packetin(&m_TheoraOggStreamState, &OggPacket);
        }

        ogg_stream_flush(&m_TheoraOggStreamState, &OggPage);
        io_write(m_OggFile, OggPage.header, OggPage.header_len);
        io_write(m_OggFile, OggPage.body, OggPage.body_len);
    }
コード例 #10
0
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}
コード例 #11
0
ファイル: avi_theora.c プロジェクト: cr88192/bgbtech_engine
static BGBBTJ_VidCodecCTX *theo_begin_compress(int fcc,
	BGBBTJ_BMPInfoHeader *in, BGBBTJ_BMPInfoHeader *out)
{
	byte tbuf[16384];
	ogg_packet op;
	BGBBTJ_VidCodecCTX *ctx;
	vfw_ctxinfo *info;
	byte *ct;
	int err, sz, xs, ys, xs1, ys1, px, py;

	if((fcc!=RIFF_TAG_THEO) && (fcc!=RIFF_TAG_theo) &&
			(fcc!=RIFF_TAG_ther))
		return(NULL);

	ctx=BGBBTJ_VidCodecCTX_New();
	info=gcalloc(sizeof(vfw_ctxinfo));
	ctx->data=info;

	info->ihead=gcalloc(sizeof(BGBBTJ_BMPInfoHeader));
	memset(info->ihead, 0, sizeof(BGBBTJ_BMPInfoHeader));
	info->ihead->biSize		= sizeof(BGBBTJ_BMPInfoHeader);
	info->ihead->biWidth		= in->biWidth;
	info->ihead->biHeight		= in->biHeight;
	info->ihead->biPlanes		= in->biPlanes;
	info->ihead->biBitCount		= in->biBitCount;
	info->ihead->biCompression	= in->biCompression;
	info->ihead->biSizeImage	=
		in->biWidth*in->biHeight*in->biBitCount/8;

//	out->biCompression=RIFF_TAG_ther;
	out->biCompression=RIFF_TAG_theo;

	info->ohead=gcalloc(sizeof(BGBBTJ_BMPInfoHeader));
	memset(info->ohead, 0, sizeof(BGBBTJ_BMPInfoHeader));
	info->ohead->biSize		= sizeof(BGBBTJ_BMPInfoHeader);
	info->ohead->biWidth		= out->biWidth;
//	info->ohead->biHeight		= -out->biHeight;
	info->ohead->biHeight		= out->biHeight;
	info->ohead->biPlanes		= out->biPlanes;
	info->ohead->biBitCount		= out->biBitCount;
	info->ohead->biCompression	= out->biCompression;
	info->ihead->biSizeImage	=
		out->biWidth*out->biHeight*out->biBitCount/8;

//	info->buffer=malloc(out->width*out->height*out->bpp/8);

	theora_info_init(&(info->ti));
//	if(err<0)printf("theo_begin_compress: A Err=%d\n");

	xs=(out->biWidth+15)&(~15);
	ys=(out->biHeight+15)&(~15);
	xs1=xs>>1;
	ys1=ys>>1;

    px=((xs-out->biWidth)>>1)&(~1);
    py=((ys-out->biHeight)>>1)&(~1);

	info->ti.width = out->biWidth;
	info->ti.height = out->biHeight;
	info->ti.frame_width = xs;
	info->ti.frame_height = ys;
    info->ti.offset_x=px;
	info->ti.offset_y=py;
//	info->ti.pixel_fmt=TH_PF_420;
	info->ti.pixelformat=TH_PF_420;
//	info->ti.quality=56;
	info->ti.quality=63;
	info->ti.colorspace=TH_CS_UNSPECIFIED;

	info->ti.fps_numerator=24;
	info->ti.fps_denominator=1;
	info->ti.aspect_numerator=1;
	info->ti.aspect_denominator=1;

	th_info_init(&(info->thi));
//	if(err<0)printf("theo_begin_compress: B Err=%d\n");

	info->thi.pic_width = out->biWidth;
	info->thi.pic_height = out->biHeight;
	info->thi.frame_width = xs;
	info->thi.frame_height = ys;
    info->thi.pic_x=px;
	info->thi.pic_y=py;
	info->thi.fps_numerator=24;
	info->thi.fps_denominator=1;

	info->thi.aspect_numerator=1;
	info->thi.aspect_denominator=1;
	
	info->thi.colorspace=TH_CS_UNSPECIFIED;
	info->thi.pixel_fmt=TH_PF_420;
//	info->thi.pixelformat=TH_PF_420;
//	info->thi.quality=48;
	info->thi.quality=63;

	if(theora_encode_init(&(info->th), &(info->ti)) != OC_DISABLED)
		{ theora_clear(&(info->th)); }

	info->td=th_encode_alloc(&(info->thi));
	th_info_clear(&(info->thi));
//	if(err<0)printf("theo_begin_compress: C Err=%d\n");

	th_comment_init(&(info->tc));
//	if(err<0)printf("theo_begin_compress: D Err=%d\n");

	ct=tbuf;
	
    err=th_encode_flushheader(info->td, &(info->tc), &op);
	if(err<0)printf("theo_begin_compress: E Err=%d\n");

	ct[0]=(op.bytes>>8)&255;
	ct[1]=op.bytes&255;
	ct+=2;

    memcpy(ct, op.packet, op.bytes);
    ct+=op.bytes;

    while(1)
    {
		err=th_encode_flushheader(info->td, &(info->tc), &op);
		if(err<=0)break;

		ct[0]=(op.bytes>>8)&255;
		ct[1]=op.bytes&255;
		ct+=2;

		memcpy(ct, op.packet, op.bytes);
		ct+=op.bytes;
    }

	if(err<0)printf("theo_begin_compress: F Err=%d\n");

	sz=ct-tbuf;
	ctx->vidStrd=gcalloc(sz);
	ctx->sz_vidStrd=sz;
	memcpy(ctx->vidStrd, tbuf, sz);

	info->ycbbuf[0].width=xs;
	info->ycbbuf[0].height=ys;
	info->ycbbuf[0].stride=xs;
	info->ycbbuf[0].data=malloc(xs*ys);

	info->ycbbuf[1].width=xs1;
	info->ycbbuf[1].height=ys1;
	info->ycbbuf[1].stride=xs1;
	info->ycbbuf[1].data=malloc(xs1*ys1);

	info->ycbbuf[2].width=xs1;
	info->ycbbuf[2].height=ys1;
	info->ycbbuf[2].stride=xs1;
	info->ycbbuf[2].data=malloc(xs1*ys1);

	ctx->compress_frame=&theo_compress_frame;

	return(ctx);
}
コード例 #12
0
ファイル: gsttheoraenc.c プロジェクト: ChinnaSuhas/ossbuild
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;
  gboolean force_keyframe;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);

  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
  if ((gint64) running_time < 0) {
    GST_DEBUG_OBJECT (enc, "Dropping buffer, timestamp: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
    gst_buffer_unref (buffer);
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK (enc);
  if (enc->bitrate_changed) {
    long int bitrate = enc->video_bitrate;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
        sizeof (long int));
    enc->bitrate_changed = FALSE;
  }

  if (enc->quality_changed) {
    long int quality = enc->video_quality;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
        sizeof (long int));
    enc->quality_changed = FALSE;
  }

  /* see if we need to schedule a keyframe */
  force_keyframe = enc->force_keyframe;
  enc->force_keyframe = FALSE;
  GST_OBJECT_UNLOCK (enc);

  if (force_keyframe) {
    GstClockTime stream_time;
    GstStructure *s;

    stream_time = gst_segment_to_stream_time (&enc->segment,
        GST_FORMAT_TIME, timestamp);

    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    theora_enc_force_keyframe (enc);

    gst_pad_push_event (enc->srcpad,
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf;
    GSList *buffers = NULL;
    int result;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* create the remaining theora headers */
    th_comment_clear (&enc->comment);
    th_comment_init (&enc->comment);

    while ((result =
            th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
      ret =
          theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
          GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf);
      if (ret != GST_FLOW_OK) {
        goto header_buffer_alloc;
      }
      buffers = g_slist_prepend (buffers, buf);
    }
    if (result < 0) {
      g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
      g_slist_free (buffers);
      goto encoder_disabled;
    }

    buffers = g_slist_reverse (buffers);

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buffers);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    g_slist_foreach (buffers, (GFunc) gst_buffer_set_caps, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    while (buffers) {
      buf = buffers->data;
      buffers = g_slist_delete_link (buffers, buffers);
      if ((ret = theora_push_buffer (enc, buf)) != GST_FLOW_OK) {
        g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
        g_slist_free (buffers);
        goto header_push;
      }
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    th_ycbcr_buffer ycbcr;
    gint res;

    theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
      if (!theora_enc_read_multipass_cache (enc)) {
        ret = GST_FLOW_ERROR;
        goto multipass_read_failed;
      }
    }

    res = th_encode_ycbcr_in (enc->encoder, ycbcr);
    /* none of the failure cases can happen here */
    g_assert (res == 0);

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
      if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
        ret = GST_FLOW_ERROR;
        goto multipass_write_failed;
      }
    }

    ret = GST_FLOW_OK;
    while (th_encode_packetout (enc->encoder, 0, &op)) {
      GstClockTime next_time;

      next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
multipass_read_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
multipass_write_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
コード例 #13
0
ファイル: theora.c プロジェクト: Jheengut/gmerlin
static gavl_video_sink_t *
init_theora(void * data, gavl_compression_info_t * ci,
            gavl_video_format_t * format,
            gavl_metadata_t * stream_metadata)
  {
  int sub_h, sub_v;
  int arg_i1, arg_i2;
  uint8_t * ptr;
  ogg_packet op;
  int header_packets;
  
  theora_t * theora = data;

  theora->format = format;

  bg_encoder_set_framerate(&theora->fr, format);
  
  /* Set video format */
  theora->ti.frame_width  = ((format->image_width  + 15)/16*16);
  theora->ti.frame_height = ((format->image_height + 15)/16*16);
  theora->ti.pic_width = format->image_width;
  theora->ti.pic_height = format->image_height;
  
  theora->ti.fps_numerator      = format->timescale;
  theora->ti.fps_denominator    = format->frame_duration;
  theora->ti.aspect_numerator   = format->pixel_width;
  theora->ti.aspect_denominator = format->pixel_height;

  format->interlace_mode = GAVL_INTERLACE_NONE;
    
  format->frame_width  = theora->ti.frame_width;
  format->frame_height = theora->ti.frame_height;
  
  if(theora->cbr)
    theora->ti.quality = 0;
  else
    theora->ti.target_bitrate = 0;

  /* Granule shift */
  theora->ti.keyframe_granule_shift = 0;

  while(1 << theora->ti.keyframe_granule_shift < theora->max_keyframe_interval)
    theora->ti.keyframe_granule_shift++;
  
  theora->ti.colorspace=TH_CS_UNSPECIFIED;

  format->pixelformat =
    gavl_pixelformat_get_best(format->pixelformat,
                              supported_pixelformats, NULL);
    
  switch(format->pixelformat)
    {
    case GAVL_YUV_420_P:
      theora->ti.pixel_fmt = TH_PF_420;
      break;
    case GAVL_YUV_422_P:
      theora->ti.pixel_fmt = TH_PF_422;
      break;
    case GAVL_YUV_444_P:
      theora->ti.pixel_fmt = TH_PF_444;
      break;
    default:
      return 0;
    }
  
  /* Initialize encoder */
  if(!(theora->ts = th_encode_alloc(&theora->ti)))
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,  "th_encode_alloc failed");
    return 0;
    }
  /* Build comment (comments are UTF-8, good for us :-) */

  // build_comment(&theora->tc, metadata);

  /* Call encode CTLs */
  
  // Keyframe frequency

  th_encode_ctl(theora->ts,
                TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
                &theora->max_keyframe_interval, sizeof(theora->max_keyframe_interval));

#ifdef THEORA_1_1  
  // Rate flags

  th_encode_ctl(theora->ts,
                TH_ENCCTL_SET_RATE_FLAGS,
                &theora->rate_flags,sizeof(theora->rate_flags));
#endif
  // Maximum speed

  if(th_encode_ctl(theora->ts,
                   TH_ENCCTL_GET_SPLEVEL_MAX,
                   &arg_i1, sizeof(arg_i1)) != TH_EIMPL)
    {
    arg_i2 = (int)((float)arg_i1 * theora->speed + 0.5);

    if(arg_i2 > arg_i1)
      arg_i2 = arg_i1;
    
    th_encode_ctl(theora->ts, TH_ENCCTL_SET_SPLEVEL,
                  &arg_i2, sizeof(arg_i2));
    }
  
  /* Encode initial packets */

  ci->id = GAVL_CODEC_ID_THEORA;
  ci->flags = GAVL_COMPRESSION_HAS_P_FRAMES;
  
  header_packets = 0;

  /* Build global header */
  while(th_encode_flushheader(theora->ts, &theora->tc, &op) > 0)
    {
    gavl_append_xiph_header(&ci->global_header,
                            (int*)&ci->global_header_len,
                            op.packet, op.bytes);
    
    if(header_packets == 1)
      {
      char * vendor;
      int vendor_len;
      
      /* Extract vendor ID */
      ptr = op.packet + 7;
      vendor_len = GAVL_PTR_2_32LE(ptr); ptr += 4;
      vendor = calloc(1, vendor_len + 1);
      memcpy(vendor, ptr, vendor_len);
      gavl_metadata_set_nocpy(stream_metadata, GAVL_META_SOFTWARE, vendor);
      }
    header_packets++;
    }
  
  if(header_packets < 3)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "Got %d header packets instead of 3", header_packets);
    return 0;
    }

  /* Initialize buffer */
  
  gavl_pixelformat_chroma_sub(theora->format->pixelformat, &sub_h, &sub_v);
  theora->buf[0].width  = theora->format->frame_width;
  theora->buf[0].height = theora->format->frame_height;
  theora->buf[1].width  = theora->format->frame_width  / sub_h;
  theora->buf[1].height = theora->format->frame_height / sub_v;
  theora->buf[2].width  = theora->format->frame_width  / sub_h;
  theora->buf[2].height = theora->format->frame_height / sub_v;
  
  return gavl_video_sink_create(NULL, write_video_frame_theora, theora,
                                theora->format);
  }