Exemple #1
0
void
gobee_theora_push_frame(void *frame, int sock, struct sockaddr *addr,
    int addrlen)
{
  int err;
  ogg_packet op;
  time_t stamp = time(NULL);

  fprintf(stderr, "[YUV/JNI] %s() ENTER, framebuf=%p\n", __FUNCTION__, frame);

  __gobee_yuv_set_data(&s_ycbcr, frame);

  th_encode_ycbcr_in(tctx, &s_ycbcr);

  fprintf(stderr, "[YUV/JNI] %s() EXIT\n", __FUNCTION__);

  if ((err = th_encode_packetout(tctx, 0, &op)) > 0
  /* && op.bytes > 0*/)
    {
      gobee_theora_send_fragmented_rtp(&op, sock, addr, addrlen, stamp);
    }
  else
    {
      fprintf(stderr, "error encoding packet, %d\n", err);
    }
}
Exemple #2
0
int krad_theora_encoder_write (krad_theora_encoder_t *krad_theora,
                               uint8_t **packet, int *keyframe) {

    int ret;
    int key;

    if (krad_theora->update_config) {
        th_encode_ctl (krad_theora->encoder,
                       TH_ENCCTL_SET_QUALITY, &krad_theora->quality, sizeof(int));
        krad_theora->update_config = 0;
    }


    ret = th_encode_ycbcr_in (krad_theora->encoder, krad_theora->ycbcr);
    if (ret != 0) {
        failfast ("krad_theora_encoder_write th_encode_ycbcr_in failed! %d", ret);
    }

    // Note: Currently the encoder operates in a one-frame-in,
    // one-packet-out manner. However, this may be changed in the future.

    ret = th_encode_packetout (krad_theora->encoder,
                               krad_theora->finish,
                               &krad_theora->packet);
    if (ret < 1) {
        failfast ("krad_theora_encoder_write th_encode_packetout failed! %d",
                  ret);
    }

    *packet = krad_theora->packet.packet;

    key = th_packet_iskeyframe (&krad_theora->packet);
    *keyframe = key;
    if (*keyframe == -1) {
        failfast ("krad_theora_encoder_write th_packet_iskeyframe failed! %d",
                  *keyframe);
    }

    if (key) {
        //printk ("its a keyframe\n");
    }

    // Double check
    //ogg_packet test_packet;
    //ret = th_encode_packetout (krad_theora->encoder,
    // krad_theora->finish, &test_packet);
    //if (ret != 0) {
    //  printf("krad_theora_encoder_write th_encode_packetout
    //  offerd up an extra packet! %d\n", ret);
    //  exit(1);
    //}

    krad_theora->frames++;

    krad_theora->bytes += krad_theora->packet.bytes;

    return krad_theora->packet.bytes;
}
Exemple #3
0
/*
 * Encode length bytes of video from the packet into Ogg stream
 */
PRBool
MediaRecorder::EncodeVideo(PRUint8 *v_frame, int len)
{
    nsresult rv;
    PRUint32 wr;
    th_ycbcr_buffer v_buffer;

    /* Convert i420 to YCbCr */
    v_buffer[0].width = params->width;
    v_buffer[0].stride = params->width;
    v_buffer[0].height = params->height;

    v_buffer[1].width = (v_buffer[0].width >> 1);
    v_buffer[1].height = (v_buffer[0].height >> 1);
    v_buffer[1].stride = v_buffer[1].width;

    v_buffer[2].width = v_buffer[1].width;
    v_buffer[2].height = v_buffer[1].height;
    v_buffer[2].stride = v_buffer[1].stride;

    v_buffer[0].data = v_frame;
    v_buffer[1].data = v_frame + v_buffer[0].width * v_buffer[0].height;
    v_buffer[2].data =
        v_buffer[1].data + v_buffer[0].width * v_buffer[0].height / 4;

    /* Encode 'er up */
    if (th_encode_ycbcr_in(vState->th, v_buffer) != 0) {
        PR_LOG(log, PR_LOG_NOTICE, ("Could not encode frame\n"));
        return PR_FALSE;
    }
    if (!th_encode_packetout(vState->th, 0, &vState->op)) {
        PR_LOG(log, PR_LOG_NOTICE, ("Could not read packet\n"));
        return PR_FALSE;
    }

    ogg_stream_packetin(&vState->os, &vState->op);
    while (ogg_stream_pageout(&vState->os, &vState->og)) {
        rv = WriteData(vState->og.header, vState->og.header_len, &wr);
        rv = WriteData(vState->og.body, vState->og.body_len, &wr);
    }
    
    return PR_TRUE;
}
Exemple #4
0
int theoraenc_data_in (TheoraEnc *enc, unsigned char *buffer,
                       long buffer_length,
                       theoraenc_each_packet f) {
  
  ogg_packet p;
  th_ycbcr_buffer y;
  
  if (!enc) return 0;

  yuv422_to_yuv420p (enc->postconv_buffer, buffer, enc_frame_width, enc_frame_height);
  init_ycbcr (y, enc->info, enc->postconv_buffer);
  
  if (0 == th_encode_ycbcr_in (enc->ctx, y))
    while (th_encode_packetout (enc->ctx, 0, &p)) f (&p);
  else
    return 0;
  
  return 1;
}
void CHolly_Theora_Video::ProcessFrame( bool bLast )
{
	if ( !m_bFrameWaiting ) return;

	m_bFrameWaiting = false;
	Holly::IContainer* pContainer = Encoder()->Container();

	if ( th_encode_ycbcr_in( m_Encoder, m_Frame ) != 0 )
	{
		return;
	}

	ogg_packet Packet;

	while ( th_encode_packetout( m_Encoder, bLast ? 1 : 0, &Packet ) > 0 )
	{
		pContainer->AddFrame( this, &Packet, 0, 0, 0 );
	}

	
}
Exemple #6
0
int theora_encode_YUVin(theora_state *_te,yuv_buffer *_yuv){
  th_api_wrapper  *api;
  th_ycbcr_buffer  buf;
  int              ret;
  api=(th_api_wrapper *)_te->i->codec_setup;
  buf[0].width=_yuv->y_width;
  buf[0].height=_yuv->y_height;
  buf[0].stride=_yuv->y_stride;
  buf[0].data=_yuv->y;
  buf[1].width=_yuv->uv_width;
  buf[1].height=_yuv->uv_height;
  buf[1].stride=_yuv->uv_stride;
  buf[1].data=_yuv->u;
  buf[2].width=_yuv->uv_width;
  buf[2].height=_yuv->uv_height;
  buf[2].stride=_yuv->uv_stride;
  buf[2].data=_yuv->v;
  ret=th_encode_ycbcr_in(api->encode,buf);
  if(ret<0)return ret;
  _te->granulepos=api->encode->state.granpos;
  return ret;
}
Exemple #7
0
static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    th_ycbcr_buffer t_yuv_buffer;
    TheoraContext *h = avc_context->priv_data;
    ogg_packet o_packet;
    int result, i, ret;

    // EOS, finish and get 1st pass stats if applicable
    if (!frame) {
        th_encode_packetout(h->t_state, 1, &o_packet);
        if (avc_context->flags & CODEC_FLAG_PASS1)
            if (get_stats(avc_context, 1))
                return -1;
        return 0;
    }

    /* Copy planes to the theora yuv_buffer */
    for (i = 0; i < 3; i++) {
        t_yuv_buffer[i].width  = FFALIGN(avc_context->width,  16) >> (i && h->uv_hshift);
        t_yuv_buffer[i].height = FFALIGN(avc_context->height, 16) >> (i && h->uv_vshift);
        t_yuv_buffer[i].stride = frame->linesize[i];
        t_yuv_buffer[i].data   = frame->data[i];
    }

    if (avc_context->flags & CODEC_FLAG_PASS2)
        if (submit_stats(avc_context))
            return -1;

    /* Now call into theora_encode_YUVin */
    result = th_encode_ycbcr_in(h->t_state, t_yuv_buffer);
    if (result) {
        const char* message;
        switch (result) {
        case -1:
            message = "differing frame sizes";
            break;
        case TH_EINVAL:
            message = "encoder is not ready or is finished";
            break;
        default:
            message = "unknown reason";
            break;
        }
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_YUVin failed (%s) [%d]\n", message, result);
        return -1;
    }

    if (avc_context->flags & CODEC_FLAG_PASS1)
        if (get_stats(avc_context, 0))
            return -1;

    /* Pick up returned ogg_packet */
    result = th_encode_packetout(h->t_state, 0, &o_packet);
    switch (result) {
    case 0:
        /* No packet is ready */
        return 0;
    case 1:
        /* Success, we have a packet */
        break;
    default:
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_packetout failed [%d]\n", result);
        return -1;
    }

    /* Copy ogg_packet content out to buffer */
    if ((ret = ff_alloc_packet(pkt, o_packet.bytes)) < 0) {
        av_log(avc_context, AV_LOG_ERROR, "Error getting output packet of size %ld.\n", o_packet.bytes);
        return ret;
    }
    memcpy(pkt->data, o_packet.packet, o_packet.bytes);

    // HACK: assumes no encoder delay, this is true until libtheora becomes
    // multithreaded (which will be disabled unless explicitly requested)
    pkt->pts = pkt->dts = frame->pts;
    avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask);
    if (avc_context->coded_frame->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Exemple #8
0
    void writeTheora(std::vector<std::vector<uint8_t>> vidframes, std::string writeTo, uint16_t vidWidth, uint16_t vidHeight) {
        // get paddings to nearest multiple of 0x10
        uint32_t padW = 16 - vidWidth % 16;
        uint32_t padH = 16 - vidHeight % 16;
        uint32_t frmWidth = vidWidth + padW;
        uint32_t frmHeight = vidHeight + padH;

        // initialize theora stream
        th_info vidinfo;
        th_info_init(&vidinfo);
        vidinfo.frame_width     = frmWidth;
        vidinfo.frame_height    = frmHeight;
        vidinfo.pic_width       = vidWidth;
        vidinfo.pic_height      = vidHeight;
        vidinfo.pic_x           = 0;
        vidinfo.pic_y           = 0;
        vidinfo.colorspace      = TH_CS_ITU_REC_470M; // what our RGB->YCbCr function operates on
        vidinfo.pixel_fmt       = TH_PF_444;          // we want the bestest video possible, so no decimation
        vidinfo.target_bitrate  = 0;                  // prefer VBR with quality level...
        vidinfo.quality         = 63;                 // ...which we want as high as possible (since we aren't using photographic frames, lossy compression ruins things)
        vidinfo.fps_numerator   = 15;                 // framerate is 15 fps
        vidinfo.fps_denominator = 1;

        // initialize theora encoding context
        th_enc_ctx * videnc = th_encode_alloc(&vidinfo);

        // initialize theora comment
        th_comment vidcomment;
        th_comment_init(&vidcomment);

        // initialize ogg container
        ogg_stream_state vidcont;
        // serial number chosen by fair dice roll
        if (ogg_stream_init(&vidcont, 42)) { // returned -1, thus failed
            std::cerr << "Failed to initialize ogg container :(\n";
            throw 42;
        }

        // get generic ogg packet & page holders
        ogg_packet vidpacket;
        ogg_page vidpage;

        // generic YCbCr frame, and initial data
        const int Y = 0;
        const int Cb = 1;
        const int Cr = 2; // clarity bonuses
        th_ycbcr_buffer rawdata;
        for (auto & i : rawdata) {
            i.width = i.stride = frmWidth;
            i.height = frmHeight;
            i.data = new unsigned char [frmWidth * frmHeight];
        }
        

        // open file for writing
        std::ofstream vidfile; // because god forbid this thing supports an unsigned char unit
        vidfile.open(writeTo, std::ios::binary);

        // factor out the ogg page writing process a bit
        auto writePage = [&](){
            vidfile.write((char*)vidpage.header, vidpage.header_len);
            if (!vidfile) {
                std::cerr << "An error occured in writing Ogg page header to file. Exiting...\n";
                vidfile.close();
                throw 42;
            }

            vidfile.write((char*)vidpage.body, vidpage.body_len);
            if (!vidfile) {
                std::cerr << "An error occured in writing Ogg page body to file. Exiting...\n";
                vidfile.close();
                throw 42;
            }
        };




        // send header packets to ogg stream
        bool gotone = false;
        while (true) {
            int mkpacket = th_encode_flushheader(videnc, &vidcomment, &vidpacket);
            if (mkpacket == 0) {
                if (gotone) {
                    break;
                } else {
                    std::cerr << "Theora didn't return any header packets.\n";
                    throw 42;
                }
            }

            if (mkpacket < 0) {
                std::cerr << "Theora header flushing failed with error code " << mkpacket << ". Exiting...\n";
                throw 42;
            }

            if (ogg_stream_packetin(&vidcont, &vidpacket)) {
                std::cerr << "Giving packet to Ogg failed, sorry.\n";
                throw 42;
            }

            gotone = true;
        }

        // write ogg pages (and then the remainder via flush) to file
        while (ogg_stream_pageout(&vidcont, &vidpage)) {
            writePage();
        }

        while (ogg_stream_flush(&vidcont, &vidpage)) {
            writePage();
        }

        //////////////////////
        // WRITE THE FRAMES //
        //////////////////////

        for (int FRNO = 0; FRNO < vidframes.size(); FRNO++) {
            auto * VFR = &vidframes.at(FRNO);
            // since we set an offset of (0,0) for the picture, we fill up the
            // top and right edges of the frame with junk. This is us filling
            // the top part
            for (int i = 0; i < padH; i++) {
                for (int j = 0; j < frmWidth; j++) {
                    rawdata[Y].data[i * frmWidth + j] = 0;
                    rawdata[Cb].data[i * frmWidth + j] = 0;
                    rawdata[Cr].data[i * frmWidth + j] = 0;
                }
            }

            // now for the picture itself (every row we add more junk to the right
            // of the image)
            int vecAt = 0; // where we are in the VFR vector
            for (int i = 0; i < vidHeight; i++) {
                for (int j = 0; j < vidWidth; j++) {
                    rawdata[Y].data[i * frmWidth + j]  = VFR->at(vecAt); vecAt++;
                    rawdata[Cb].data[i * frmWidth + j] = VFR->at(vecAt); vecAt++;
                    rawdata[Cr].data[i * frmWidth + j] = VFR->at(vecAt); vecAt++;
                }

                // get right-side padding (fill with junk)
                for (int j = vidWidth; j < frmWidth; j++) {
                    rawdata[Y].data[i * frmWidth + j]  = 0;
                    rawdata[Cb].data[i * frmWidth + j] = 0;
                    rawdata[Cr].data[i * frmWidth + j] = 0;
                }
            }

            // frame made, send through theora
            if (th_encode_ycbcr_in(videnc, rawdata)) {
                std::cerr << "Error in sending frame " << FRNO + 1 << " of " << vidframes.size() << " to Theora.\n";
                throw 42;
            }

            // send theora packets into ogg
            while (true) {
                int packok = th_encode_packetout(videnc, FRNO + 1 == vidframes.size(), &vidpacket);
                if (packok == 0) { break; }
                if (packok < 0) {
                    std::cerr << "Retrieving packet from Theora failed with error code " << packok << ".\n";
                    throw 42;
                }

                if (ogg_stream_packetin(&vidcont, &vidpacket)) {
                    std::cerr << "Giving frame packet to Ogg failed.\n";
                    throw 42;
                }
            }

            // send complete pages from frame to file (we won't flush until
            // after all frames are accounted for, to avoid an abundance of
            // undersized pages)

            while (ogg_stream_pageout(&vidcont, &vidpage)) {
                writePage();
            }
        }

        // take care of any remaining undersized page(s)

        while (ogg_stream_flush(&vidcont, &vidpage)) {
            writePage();
        }

        //// Free/close/etc all relevant structures

        // fstream
        vidfile.close();

        // theora items
        //th_encode_free(videnc); // causes a corrupted double-linked list, somehow, so you'll have to live with unfree'd memory :(
        th_info_clear(&vidinfo);
        th_comment_clear(&vidcomment);

        for (auto & i : rawdata) {
            delete[] i.data;
        }

        // ogg items
        ogg_packet_clear(&vidpacket);
        ogg_stream_clear(&vidcont);
    }
static GstFlowReturn
theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
    GstTheoraEnc *enc;
    ogg_packet op;
    GstClockTime timestamp, running_time;
    GstFlowReturn ret;
    gboolean force_keyframe;

    enc = GST_THEORA_ENC (benc);

    /* we keep track of two timelines.
     * - The timestamps from the incomming buffers, which we copy to the outgoing
     *   encoded buffers as-is. We need to do this as we simply forward the
     *   newsegment events.
     * - The running_time of the buffers, which we use to construct the granulepos
     *   in the packets.
     */
    timestamp = frame->pts;

    /* incoming buffers are clipped, so this should be positive */
    running_time =
        gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
                                     GST_FORMAT_TIME, timestamp);
    g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);

    GST_OBJECT_LOCK (enc);
    if (enc->bitrate_changed) {
        long int bitrate = enc->video_bitrate;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
                       sizeof (long int));
        enc->bitrate_changed = FALSE;
    }

    if (enc->quality_changed) {
        long int quality = enc->video_quality;

        th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
                       sizeof (long int));
        enc->quality_changed = FALSE;
    }

    /* see if we need to schedule a keyframe */
    force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
    GST_OBJECT_UNLOCK (enc);

    if (enc->packetno == 0) {
        /* no packets written yet, setup headers */
        GstCaps *caps;
        GstBuffer *buf;
        GList *buffers = NULL;
        int result;
        GstVideoCodecState *state;

        enc->granulepos_offset = 0;
        enc->timestamp_offset = 0;

        GST_DEBUG_OBJECT (enc, "output headers");
        /* Theora streams begin with three headers; the initial header (with
           most of the codec setup parameters) which is mandated by the Ogg
           bitstream spec.  The second header holds any comment fields.  The
           third header holds the bitstream codebook.  We merely need to
           make the headers, then pass them to libtheora one at a time;
           libtheora handles the additional Ogg bitstream constraints */

        /* create the remaining theora headers */
        th_comment_clear (&enc->comment);
        th_comment_init (&enc->comment);

        while ((result =
                    th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
            buf = theora_enc_buffer_from_header_packet (enc, &op);
            buffers = g_list_prepend (buffers, buf);
        }
        if (result < 0) {
            g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
            g_list_free (buffers);
            goto encoder_disabled;
        }

        buffers = g_list_reverse (buffers);

        /* mark buffers and put on caps */
        caps = gst_caps_new_empty_simple ("video/x-theora");
        caps = theora_set_header_on_caps (caps, buffers);
        state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);

        GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);

        gst_video_codec_state_unref (state);

        gst_video_encoder_negotiate (GST_VIDEO_ENCODER (enc));

        gst_video_encoder_set_headers (benc, buffers);

        theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
    }

    {
        th_ycbcr_buffer ycbcr;
        gint res;
        GstVideoFrame vframe;

        if (force_keyframe) {
            theora_enc_reset (enc);
            theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
        }

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
            if (!theora_enc_read_multipass_cache (enc)) {
                ret = GST_FLOW_ERROR;
                goto multipass_read_failed;
            }
        }

        gst_video_frame_map (&vframe, &enc->input_state->info, frame->input_buffer,
                             GST_MAP_READ);
        theora_enc_init_buffer (ycbcr, &vframe);

        res = th_encode_ycbcr_in (enc->encoder, ycbcr);
        gst_video_frame_unmap (&vframe);

        /* none of the failure cases can happen here */
        g_assert (res == 0);

        if (enc->multipass_cache_fd
                && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
            if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
                ret = GST_FLOW_ERROR;
                goto multipass_write_failed;
            }
        }

        ret = GST_FLOW_OK;
        while (th_encode_packetout (enc->encoder, 0, &op)) {
            ret = theora_push_packet (enc, &op);
            if (ret != GST_FLOW_OK)
                goto beach;
        }
    }

beach:
    gst_video_codec_frame_unref (frame);
    return ret;

    /* ERRORS */
multipass_read_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
multipass_write_failed:
    {
        gst_video_codec_frame_unref (frame);
        return ret;
    }
encoder_disabled:
    {
        gst_video_codec_frame_unref (frame);
        GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
                           ("libtheora has been compiled with the encoder disabled"));
        return GST_FLOW_ERROR;
    }
}
Exemple #10
0
int theo_compress_frame(BGBBTJ_VidCodecCTX *ctx,
	void *src, void *dst, int dsz, int qfl, int clrs, int *rfl)
{
	vfw_ctxinfo *info;
	ogg_packet op;
	int err, i, j, k, ib, ob, w, h, xs, ys, xs1, ys1;
	int cr0, cg0, cb0, ca0;
	int cr1, cg1, cb1, ca1;
	int cr2, cg2, cb2, ca2;
	int cr3, cg3, cb3, ca3;
	int i0, i1;
	int cy0, cy1, cy2, cy3;
	int cu0, cu1, cu2, cu3;
	int cv0, cv1, cv2, cv3;
	int cu, cv, ca;
	byte *sbuf, *tbuf, *ct;

	info=ctx->data;

//	printf("theo_compress_frame: Debug\n");

	if(!dst)
	{
		printf("theo_compress_frame: No Dest\n");
		return(0);
	}

	w=info->ohead->biWidth;
	h=info->ohead->biHeight;
	xs=(w+15)&(~15);
	ys=(h+15)&(~15);
	xs1=xs>>1;
	ys1=ys>>1;

	sbuf=src;
	for(i=0; i<ys1; i++)
		for(j=0; j<xs1; j++)
	{
//		i0=((i*2+0)*w;
//		i1=((i*2+1)*w;

		i0=(ys-i*2-1)*w;
		i1=(ys-i*2-2)*w;

		cr0=sbuf[(i0+(j*2+0))*4+0];	cr1=sbuf[(i0+(j*2+1))*4+0];
		cr2=sbuf[(i1+(j*2+0))*4+0];	cr3=sbuf[(i1+(j*2+1))*4+0];
		cg0=sbuf[(i0+(j*2+0))*4+1];	cg1=sbuf[(i0+(j*2+1))*4+1];
		cg2=sbuf[(i1+(j*2+0))*4+1];	cg3=sbuf[(i1+(j*2+1))*4+1];
		cb0=sbuf[(i0+(j*2+0))*4+2];	cb1=sbuf[(i0+(j*2+1))*4+2];
		cb2=sbuf[(i1+(j*2+0))*4+2];	cb3=sbuf[(i1+(j*2+1))*4+2];
		ca0=sbuf[(i0+(j*2+0))*4+3];	ca1=sbuf[(i0+(j*2+1))*4+3];
		ca2=sbuf[(i1+(j*2+0))*4+3];	ca3=sbuf[(i1+(j*2+1))*4+3];

		ca=(ca0+ca1+ca2+ca3)>>2;

		BGBBTJ_SuperMagenta8_ConvRGBA2YUV(cr0, cg0, cb0, ca,
			&cy0, &cu0, &cv0);
		BGBBTJ_SuperMagenta8_ConvRGBA2YUV(cr1, cg1, cb1, ca,
			&cy1, &cu1, &cv1);
		BGBBTJ_SuperMagenta8_ConvRGBA2YUV(cr2, cg2, cb2, ca,
			&cy2, &cu2, &cv2);
		BGBBTJ_SuperMagenta8_ConvRGBA2YUV(cr3, cg3, cb3, ca,
			&cy3, &cu3, &cv3);

#if 0
		cy0=0.299*cr0+0.587*cg0+0.114*cb0;
		cy1=0.299*cr1+0.587*cg1+0.114*cb1;
		cy2=0.299*cr2+0.587*cg2+0.114*cb2;
		cy3=0.299*cr3+0.587*cg3+0.114*cb3;

		cu0=-0.1687*cr0-0.3313*cg0+0.5*cb0+128;
		cu1=-0.1687*cr1-0.3313*cg1+0.5*cb1+128;
		cu2=-0.1687*cr2-0.3313*cg2+0.5*cb2+128;
		cu3=-0.1687*cr3-0.3313*cg3+0.5*cb3+128;

		cv0=0.5*cr0-0.4187*cg0-0.0813*cb0+128;
		cv1=0.5*cr1-0.4187*cg1-0.0813*cb1+128;
		cv2=0.5*cr2-0.4187*cg2-0.0813*cb2+128;
		cv3=0.5*cr3-0.4187*cg3-0.0813*cb3+128;
#endif

		cu=((cu0+cu1+cu2+cu3)>>2);
		cv=((cv0+cv1+cv2+cv3)>>2);

		cy0=(cy0<0)?0:((cy0>255)?255:cy0);
		cy1=(cy1<0)?0:((cy1>255)?255:cy1);
		cy2=(cy2<0)?0:((cy2>255)?255:cy2);
		cy3=(cy3<0)?0:((cy3>255)?255:cy3);

		cu=(cu<0)?0:((cu>255)?255:cu);
		cv=(cv<0)?0:((cv>255)?255:cv);

//		cy0=(cr0+2*cg0+cb0)>>2;
//		cy1=(cr1+2*cg1+cb1)>>2;
//		cy2=(cr2+2*cg2+cb2)>>2;
//		cy3=(cr3+2*cg3+cb3)>>2;
		
//		k=((cg0+cg1+cg2+cg3)>>2);
//		k=((cy0+cy1+cy2+cy3)>>2);
//		cu=(((((cb0+cb1+cb2+cb3)>>2)-k)*3+8)>>4)+128;
//		cv=(((((cr0+cr1+cr2+cr3)>>2)-k)*5+8)>>4)+128;

		info->ycbbuf[0].data[(i*2+0)*xs+j*2+0]=cy0;
		info->ycbbuf[0].data[(i*2+0)*xs+j*2+1]=cy1;
		info->ycbbuf[0].data[(i*2+1)*xs+j*2+0]=cy2;
		info->ycbbuf[0].data[(i*2+1)*xs+j*2+1]=cy3;
		info->ycbbuf[1].data[i*xs1+j]=cu;
		info->ycbbuf[2].data[i*xs1+j]=cv;
	}

	err=th_encode_ycbcr_in(info->td, info->ycbbuf);
	if(err)
	{
		printf("theo_compress_frame: Encode Error=%d\n", err);
	}
	
	tbuf=dst;
	ct=tbuf;
	while(1)
	{
		err=th_encode_packetout(info->td, 0, &op);
		if(err<=0)break;
		memcpy(ct, op.packet, op.bytes);
		ct+=op.bytes;
	}
	
	if(err<0)
	{
		printf("theo_compress_frame: Err=%d Sz=%d  \n", err, ct-tbuf);
	}

	if(rfl)
	{
		qfl&=(~(BGBBTJ_QFL_IFRAME|BGBBTJ_QFL_PFRAME));
		if(tbuf[0]&0x40)
			{ qfl|=BGBBTJ_QFL_PFRAME; }
		else { qfl|=BGBBTJ_QFL_IFRAME; }
		*rfl=qfl;
	}

//	printf("theo_compress_frame: Sz=%d  \n", ct-tbuf);
	
	return(ct-tbuf);
}
Exemple #11
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;
  gboolean force_keyframe;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);

  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
  if ((gint64) running_time < 0) {
    GST_DEBUG_OBJECT (enc, "Dropping buffer, timestamp: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
    gst_buffer_unref (buffer);
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK (enc);
  if (enc->bitrate_changed) {
    long int bitrate = enc->video_bitrate;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_BITRATE, &bitrate,
        sizeof (long int));
    enc->bitrate_changed = FALSE;
  }

  if (enc->quality_changed) {
    long int quality = enc->video_quality;

    th_encode_ctl (enc->encoder, TH_ENCCTL_SET_QUALITY, &quality,
        sizeof (long int));
    enc->quality_changed = FALSE;
  }

  /* see if we need to schedule a keyframe */
  force_keyframe = enc->force_keyframe;
  enc->force_keyframe = FALSE;
  GST_OBJECT_UNLOCK (enc);

  if (force_keyframe) {
    GstClockTime stream_time;
    GstStructure *s;

    stream_time = gst_segment_to_stream_time (&enc->segment,
        GST_FORMAT_TIME, timestamp);

    s = gst_structure_new ("GstForceKeyUnit",
        "timestamp", G_TYPE_UINT64, timestamp,
        "stream-time", G_TYPE_UINT64, stream_time,
        "running-time", G_TYPE_UINT64, running_time, NULL);

    theora_enc_force_keyframe (enc);

    gst_pad_push_event (enc->srcpad,
        gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
  }

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf;
    GSList *buffers = NULL;
    int result;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* create the remaining theora headers */
    th_comment_clear (&enc->comment);
    th_comment_init (&enc->comment);

    while ((result =
            th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
      ret =
          theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
          GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf);
      if (ret != GST_FLOW_OK) {
        goto header_buffer_alloc;
      }
      buffers = g_slist_prepend (buffers, buf);
    }
    if (result < 0) {
      g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
      g_slist_free (buffers);
      goto encoder_disabled;
    }

    buffers = g_slist_reverse (buffers);

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buffers);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    g_slist_foreach (buffers, (GFunc) gst_buffer_set_caps, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    while (buffers) {
      buf = buffers->data;
      buffers = g_slist_delete_link (buffers, buffers);
      if ((ret = theora_push_buffer (enc, buf)) != GST_FLOW_OK) {
        g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
        g_slist_free (buffers);
        goto header_push;
      }
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    th_ycbcr_buffer ycbcr;
    gint res;

    theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
      if (!theora_enc_read_multipass_cache (enc)) {
        ret = GST_FLOW_ERROR;
        goto multipass_read_failed;
      }
    }

    res = th_encode_ycbcr_in (enc->encoder, ycbcr);
    /* none of the failure cases can happen here */
    g_assert (res == 0);

    if (enc->multipass_cache_fd
        && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
      if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
        ret = GST_FLOW_ERROR;
        goto multipass_write_failed;
      }
    }

    ret = GST_FLOW_OK;
    while (th_encode_packetout (enc->encoder, 0, &op)) {
      GstClockTime next_time;

      next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
multipass_read_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
multipass_write_failed:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
static int encode_frame(AVCodecContext* avc_context, uint8_t *outbuf,
                        int buf_size, void *data)
{
    th_ycbcr_buffer t_yuv_buffer;
    TheoraContext *h = avc_context->priv_data;
    AVFrame *frame = data;
    ogg_packet o_packet;
    int result, i;

    // EOS, finish and get 1st pass stats if applicable
    if (!frame) {
        th_encode_packetout(h->t_state, 1, &o_packet);
        if (avc_context->flags & CODEC_FLAG_PASS1)
            if (get_stats(avc_context, 1))
                return -1;
        return 0;
    }

    /* Copy planes to the theora yuv_buffer */
    for (i = 0; i < 3; i++) {
        t_yuv_buffer[i].width  = FFALIGN(avc_context->width,  16) >> (i && h->uv_hshift);
        t_yuv_buffer[i].height = FFALIGN(avc_context->height, 16) >> (i && h->uv_vshift);
        t_yuv_buffer[i].stride = frame->linesize[i];
        t_yuv_buffer[i].data   = frame->data[i];
    }

    if (avc_context->flags & CODEC_FLAG_PASS2)
        if (submit_stats(avc_context))
            return -1;

    /* Now call into theora_encode_YUVin */
    result = th_encode_ycbcr_in(h->t_state, t_yuv_buffer);
    if (result) {
        const char* message;
        switch (result) {
        case -1:
            message = "differing frame sizes";
            break;
        case TH_EINVAL:
            message = "encoder is not ready or is finished";
            break;
        default:
            message = "unknown reason";
            break;
        }
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_YUVin failed (%s) [%d]\n", message, result);
        return -1;
    }

    if (avc_context->flags & CODEC_FLAG_PASS1)
        if (get_stats(avc_context, 0))
            return -1;

    /* Pick up returned ogg_packet */
    result = th_encode_packetout(h->t_state, 0, &o_packet);
    switch (result) {
    case 0:
        /* No packet is ready */
        return 0;
    case 1:
        /* Success, we have a packet */
        break;
    default:
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_packetout failed [%d]\n", result);
        return -1;
    }

    /* Copy ogg_packet content out to buffer */
    if (buf_size < o_packet.bytes) {
        av_log(avc_context, AV_LOG_ERROR, "encoded frame too large\n");
        return -1;
    }
    memcpy(outbuf, o_packet.packet, o_packet.bytes);

    // HACK: does not take codec delay into account (neither does the decoder though)
    avc_context->coded_frame->pts = frame->pts;

    return o_packet.bytes;
}
Exemple #13
0
static gavl_sink_status_t
write_video_frame_theora(void * data, gavl_video_frame_t * frame)
  {
  theora_t * theora;
  int result;
  int i;
  ogg_packet op;
  gavl_packet_t gp;
  //  int64_t frame_index;

  //  fprintf(stderr, "Write frame theora\n");
  
  theora = data;
  
  for(i = 0; i < 3; i++)
    {
    theora->buf[i].stride = frame->strides[i];
    theora->buf[i].data   = frame->planes[i];
    }

#ifdef THEORA_1_1
  if(theora->pass == 2)
    {
    /* Input pass data */
    int ret;
    
    while(theora->stats_ptr - theora->stats_buf < theora->stats_size)
      {
      
      ret = th_encode_ctl(theora->ts,
                          TH_ENCCTL_2PASS_IN,
                          theora->stats_ptr,
                          theora->stats_size -
                          (theora->stats_ptr - theora->stats_buf));

      if(ret < 0)
        {
        bg_log(BG_LOG_ERROR, LOG_DOMAIN, "passing 2 pass data failed");
        return GAVL_SINK_ERROR;
        }
      else if(!ret)
        break;
      else
        {
        theora->stats_ptr += ret;
        }
      }
    }
#endif
  
  result = th_encode_ycbcr_in(theora->ts, theora->buf);

#ifdef THEORA_1_1
  /* Output pass data */
  if(theora->pass == 1)
    {
    int ret;
    char * buf;
    ret = th_encode_ctl(theora->ts,
                        TH_ENCCTL_2PASS_OUT,
                        &buf, sizeof(buf));
    if(ret < 0)
      {
      bg_log(BG_LOG_ERROR, LOG_DOMAIN, "getting 2 pass data failed");
      return GAVL_SINK_ERROR;
      }
    fwrite(buf, 1, ret, theora->stats_file);
    }
#endif

  /* Output packet */
  
  if(!th_encode_packetout(theora->ts, 0, &op))
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "Theora encoder produced no packet");
    return GAVL_SINK_ERROR;
    }
  
  gavl_packet_init(&gp);
  bg_ogg_packet_to_gavl(&op, &gp, NULL);
  
  gp.pts      = theora->pts;
  gp.duration = theora->format->frame_duration;

  theora->pts += theora->format->frame_duration;
  
  if(op.bytes && !(op.packet[0] & 0x40)) // Keyframe
    gp.flags |= GAVL_PACKET_TYPE_I | GAVL_PACKET_KEYFRAME;
  else
    gp.flags |= GAVL_PACKET_TYPE_P;
  
#if 0
  fprintf(stderr, "Encoding granulepos: %lld %lld / %d\n",
          op.granulepos,
          op.granulepos >> theora->ti.keyframe_granule_shift,
          op.granulepos & ((1<<theora->ti.keyframe_granule_shift)-1));
#endif
  //  fprintf(stderr, "Write frame theora done\n");
  //  gavl_packet_dump(&gp);
  return gavl_packet_sink_put_packet(theora->psink, &gp);
  }