Example #1
0
 CX264Codec(DWORD nCodecContextId, const CCodecContextBase& CodecContext) : m_X264CodecContext(static_cast<const CX264CodecContext&>(CodecContext)), m_Context(sws_getContext(m_X264CodecContext.GetWidth(), m_X264CodecContext.GetHeight(), AV_PIX_FMT_RGBA, m_X264CodecContext.GetWidth(), m_X264CodecContext.GetHeight(), AV_PIX_FMT_YUV420P, 0, 0, 0, 0)),
     m_pFrame(av_frame_alloc()), m_pYuvFrame(new unsigned char[m_X264CodecContext.GetWidth() * m_X264CodecContext.GetHeight() * 3 / 2])
 {
     auto pAVCodec = avcodec_find_encoder(AVCodecID::AV_CODEC_ID_H264);
     m_pAVCodecContext.reset(avcodec_alloc_context3(pAVCodec));
     AVDictionary* pOptions = nullptr;
     std::map<std::string, std::string> Options;
     Options["threads"] = "1";
     Options["preset"] = m_X264CodecContext.GetPreset();
     Options["tune"] = "zerolatency";
     for (auto& Option : Options)
     {
         av_dict_set(&pOptions, Option.first.c_str(), Option.second.c_str(), 0);
     }
     m_pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
     m_pAVCodecContext->width = m_X264CodecContext.GetWidth();
     m_pAVCodecContext->height = m_X264CodecContext.GetHeight();
     m_pAVCodecContext->gop_size = m_X264CodecContext.GetFrameCount();
     m_pAVCodecContext->time_base.num = 1;
     m_pAVCodecContext->time_base.den = m_X264CodecContext.GetFps();
     m_pAVCodecContext->field_order = AV_FIELD_PROGRESSIVE;
     CHECK_FFMPEG(avcodec_open2(m_pAVCodecContext.get(), nullptr, &pOptions));
     av_packet_from_data(&m_Packet, reinterpret_cast<uint8_t*>(av_malloc(2 * 1024 * 1024)), 2 * 1024 * 1024);
     m_pYuvPlanes[0] = m_pYuvFrame.get();
     m_pYuvPlanes[1] = m_pYuvFrame.get() + m_X264CodecContext.GetWidth() * m_X264CodecContext.GetHeight();
     m_pYuvPlanes[2] = m_pYuvFrame.get() + m_X264CodecContext.GetWidth() * m_X264CodecContext.GetHeight() + m_X264CodecContext.GetWidth() * m_X264CodecContext.GetHeight() / 4;
     m_nYuvStrides[0] = m_X264CodecContext.GetWidth();
     m_nYuvStrides[1] = m_X264CodecContext.GetWidth() / 2;
     m_nYuvStrides[2] = m_X264CodecContext.GetWidth() / 2;
     m_pFrame->format = AV_PIX_FMT_YUV420P;
     m_pFrame->width = m_X264CodecContext.GetWidth();
     m_pFrame->height = m_X264CodecContext.GetHeight();
     if (m_X264CodecContext.GetSaveOutputToFile())
     {
         char pFilename[MAX_PATH];
         sprintf_s(pFilename, MAX_PATH, "x264-%d.h264", nCodecContextId);
         if (fopen_s(&m_pOutputFile, pFilename, "wb") != 0)
         {
             throw std::runtime_error(std::string("could not open output file ").append(pFilename));
         }
     }
 }
Example #2
0
        bool ffmpeg_save_flv::save_frame() {

// there is no av_packet_from_data in older versions
#if (LIBAVCODEC_VERSION_MAJOR >= 56)
            if (!save_done) {
                //TODO: timeout
               return false;
            }
            save_done = false;

            AVPacket *t_flv_packet = new AVPacket();

            t_flv_packet->data=NULL;
            t_flv_packet->size=0;

            av_packet_from_data(t_flv_packet, frame->encoded_buffer, frame->encoded_buffer_size);

            t_flv_packet->flags=1;
            // set ts;
            t_flv_packet->dts = frame->ts;
            t_flv_packet->pts = frame->ts;

            av_write_frame (flv_format_context, t_flv_packet);

            set_outer_stream_state(VSTR_OST_STATE_RUNNING, "");

            t_flv_packet->data=NULL;
            t_flv_packet->size=0;
            delete t_flv_packet;

            save_done = true;
            return true;
#else
            set_outer_stream_state(VSTR_OST_STATE_ERROR, "Streaming functionality is not available on this platform.");
            return false;
#endif
        }
static int mpeg4_unpack_bframes_filter(AVBSFContext *ctx, AVPacket *out)
{
    UnpackBFramesBSFContext *s = ctx->priv_data;
    int pos_p = -1, nb_vop = 0, pos_vop2 = -1, ret = 0;
    AVPacket *in;

    ret = ff_bsf_get_packet(ctx, &in);
    if (ret < 0)
        return ret;

    scan_buffer(in->data, in->size, &pos_p, &nb_vop, &pos_vop2);
    av_log(ctx, AV_LOG_DEBUG, "Found %d VOP startcode(s) in this packet.\n", nb_vop);

    if (pos_vop2 >= 0) {
        if (s->b_frame_buf) {
            av_log(ctx, AV_LOG_WARNING,
                   "Missing one N-VOP packet, discarding one B-frame.\n");
            av_freep(&s->b_frame_buf);
            s->b_frame_buf_size = 0;
        }
        /* store the packed B-frame in the BSFContext */
        s->b_frame_buf_size = in->size - pos_vop2;
        s->b_frame_buf      = create_new_buffer(in->data + pos_vop2, s->b_frame_buf_size);
        if (!s->b_frame_buf) {
            s->b_frame_buf_size = 0;
            av_packet_free(&in);
            return AVERROR(ENOMEM);
        }
    }

    if (nb_vop > 2) {
        av_log(ctx, AV_LOG_WARNING,
       "Found %d VOP headers in one packet, only unpacking one.\n", nb_vop);
    }

    if (nb_vop == 1 && s->b_frame_buf) {
        /* use frame from BSFContext */
        ret = av_packet_copy_props(out, in);
        if (ret < 0) {
            av_packet_free(&in);
            return ret;
        }

        ret = av_packet_from_data(out, s->b_frame_buf, s->b_frame_buf_size);
        if (ret < 0) {
            av_packet_free(&in);
            return ret;
        }
        if (in->size <= MAX_NVOP_SIZE) {
            /* N-VOP */
            av_log(ctx, AV_LOG_DEBUG, "Skipping N-VOP.\n");
            s->b_frame_buf      = NULL;
            s->b_frame_buf_size = 0;
        } else {
            /* copy packet into BSFContext */
            s->b_frame_buf_size = in->size;
            s->b_frame_buf      = create_new_buffer(in->data, in->size);
            if (!s->b_frame_buf) {
                s->b_frame_buf_size = 0;
                av_packet_unref(out);
                av_packet_free(&in);
                return AVERROR(ENOMEM);
            }
        }
    } else if (nb_vop >= 2) {
        /* use first frame of the packet */
        av_packet_move_ref(out, in);
        out->size = pos_vop2;
    } else if (pos_p >= 0) {
        av_log(ctx, AV_LOG_DEBUG, "Updating DivX userdata (remove trailing 'p').\n");
        av_packet_move_ref(out, in);
        /* remove 'p' (packed) from the end of the (DivX) userdata string */
        out->data[pos_p] = '\0';
    } else {
        /* copy packet */
        av_packet_move_ref(out, in);
    }

    av_packet_free(&in);

    return 0;
}
Example #4
0
static int omx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *frame, int *got_packet)
{
    OMXCodecContext *s = avctx->priv_data;
    int ret = 0;
    OMX_BUFFERHEADERTYPE* buffer;
    OMX_ERRORTYPE err;

    if (frame) {
        uint8_t *dst[4];
        int linesize[4];
        int need_copy;
        buffer = get_buffer(&s->input_mutex, &s->input_cond,
                            &s->num_free_in_buffers, s->free_in_buffers, 1);

        buffer->nFilledLen = av_image_fill_arrays(dst, linesize, buffer->pBuffer, avctx->pix_fmt, s->stride, s->plane_size, 1);

        if (s->input_zerocopy) {
            uint8_t *src[4] = { NULL };
            int src_linesize[4];
            av_image_fill_arrays(src, src_linesize, frame->data[0], avctx->pix_fmt, s->stride, s->plane_size, 1);
            if (frame->linesize[0] == src_linesize[0] &&
                frame->linesize[1] == src_linesize[1] &&
                frame->linesize[2] == src_linesize[2] &&
                frame->data[1] == src[1] &&
                frame->data[2] == src[2]) {
                // If the input frame happens to have all planes stored contiguously,
                // with the right strides, just clone the frame and set the OMX
                // buffer header to point to it
                AVFrame *local = av_frame_clone(frame);
                if (!local) {
                    // Return the buffer to the queue so it's not lost
                    append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
                    return AVERROR(ENOMEM);
                } else {
                    buffer->pAppPrivate = local;
                    buffer->pOutputPortPrivate = NULL;
                    buffer->pBuffer = local->data[0];
                    need_copy = 0;
                }
            } else {
                // If not, we need to allocate a new buffer with the right
                // size and copy the input frame into it.
                uint8_t *buf = NULL;
                int image_buffer_size = av_image_get_buffer_size(avctx->pix_fmt, s->stride, s->plane_size, 1);
                if (image_buffer_size >= 0)
                    buf = av_malloc(image_buffer_size);
                if (!buf) {
                    // Return the buffer to the queue so it's not lost
                    append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
                    return AVERROR(ENOMEM);
                } else {
                    buffer->pAppPrivate = buf;
                    // Mark that pAppPrivate is an av_malloc'ed buffer, not an AVFrame
                    buffer->pOutputPortPrivate = (void*) 1;
                    buffer->pBuffer = buf;
                    need_copy = 1;
                    buffer->nFilledLen = av_image_fill_arrays(dst, linesize, buffer->pBuffer, avctx->pix_fmt, s->stride, s->plane_size, 1);
                }
            }
        } else {
            need_copy = 1;
        }
        if (need_copy)
            av_image_copy(dst, linesize, (const uint8_t**) frame->data, frame->linesize, avctx->pix_fmt, avctx->width, avctx->height);
        buffer->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
        buffer->nOffset = 0;
        // Convert the timestamps to microseconds; some encoders can ignore
        // the framerate and do VFR bit allocation based on timestamps.
        buffer->nTimeStamp = to_omx_ticks(av_rescale_q(frame->pts, avctx->time_base, AV_TIME_BASE_Q));
        err = OMX_EmptyThisBuffer(s->handle, buffer);
        if (err != OMX_ErrorNone) {
            append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
            av_log(avctx, AV_LOG_ERROR, "OMX_EmptyThisBuffer failed: %x\n", err);
            return AVERROR_UNKNOWN;
        }
        s->num_in_frames++;
    }

    while (!*got_packet && ret == 0) {
        // Only wait for output if flushing and not all frames have been output
        buffer = get_buffer(&s->output_mutex, &s->output_cond,
                            &s->num_done_out_buffers, s->done_out_buffers,
                            !frame && s->num_out_frames < s->num_in_frames);
        if (!buffer)
            break;

        if (buffer->nFlags & OMX_BUFFERFLAG_CODECCONFIG && avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
            if ((ret = av_reallocp(&avctx->extradata, avctx->extradata_size + buffer->nFilledLen + AV_INPUT_BUFFER_PADDING_SIZE)) < 0) {
                avctx->extradata_size = 0;
                goto end;
            }
            memcpy(avctx->extradata + avctx->extradata_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
            avctx->extradata_size += buffer->nFilledLen;
            memset(avctx->extradata + avctx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
        } else {
            if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME)
                s->num_out_frames++;
            if (!(buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) || !pkt->data) {
                // If the output packet isn't preallocated, just concatenate everything in our
                // own buffer
                int newsize = s->output_buf_size + buffer->nFilledLen + AV_INPUT_BUFFER_PADDING_SIZE;
                if ((ret = av_reallocp(&s->output_buf, newsize)) < 0) {
                    s->output_buf_size = 0;
                    goto end;
                }
                memcpy(s->output_buf + s->output_buf_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
                s->output_buf_size += buffer->nFilledLen;
                if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) {
                    if ((ret = av_packet_from_data(pkt, s->output_buf, s->output_buf_size)) < 0) {
                        av_freep(&s->output_buf);
                        s->output_buf_size = 0;
                        goto end;
                    }
                    s->output_buf = NULL;
                    s->output_buf_size = 0;
                }
            } else {
                // End of frame, and the caller provided a preallocated frame
                if ((ret = ff_alloc_packet2(avctx, pkt, s->output_buf_size + buffer->nFilledLen, 0)) < 0) {
                    av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n",
                           (int)(s->output_buf_size + buffer->nFilledLen));
                    goto end;
                }
                memcpy(pkt->data, s->output_buf, s->output_buf_size);
                memcpy(pkt->data + s->output_buf_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
                av_freep(&s->output_buf);
                s->output_buf_size = 0;
            }
            if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) {
                pkt->pts = av_rescale_q(from_omx_ticks(buffer->nTimeStamp), AV_TIME_BASE_Q, avctx->time_base);
                // We don't currently enable B-frames for the encoders, so set
                // pkt->dts = pkt->pts. (The calling code behaves worse if the encoder
                // doesn't set the dts).
                pkt->dts = pkt->pts;
                if (buffer->nFlags & OMX_BUFFERFLAG_SYNCFRAME)
                    pkt->flags |= AV_PKT_FLAG_KEY;
                *got_packet = 1;
            }
        }
end:
        err = OMX_FillThisBuffer(s->handle, buffer);
        if (err != OMX_ErrorNone) {
            append_buffer(&s->output_mutex, &s->output_cond, &s->num_done_out_buffers, s->done_out_buffers, buffer);
            av_log(avctx, AV_LOG_ERROR, "OMX_FillThisBuffer failed: %x\n", err);
            ret = AVERROR_UNKNOWN;
        }
    }
    return ret;
}
Example #5
0
JNIEXPORT jobject JNICALL Java_sender_FFmpeg_encodeFrame (JNIEnv *env, jclass thisclass){
	AVPacket dec_packet;
	av_init_packet(&packet);
	av_init_packet(&enc_packet);
	packet.size = 0;
	enc_packet.size = 0;
	packet.data =  NULL;
	enc_packet.data = NULL;
	int got_frame;
	do {
		fflush(stdout);
		while(av_read_frame(ifmt_ctx, &packet)>= 0) {
			ret = avcodec_decode_video2(codec_ctx, dec_frame,
					&got_frame, &packet);
			if(ret <= 0) {
				printf("Ret: %d\n", ret);
				fflush(stdout);
			}
			if(got_frame) {
				counter++;
				encode_frame(&enc_packet);
				av_packet_from_data(&dec_packet, enc_packet.data, enc_packet.size);
				break;
			}
		}
		//discard some frames according to framerate
	} while(((counter % 3 == 0) && (current_mode == 2)) || (0 && (current_mode == 3)) || ((counter % 3 != 0) && (current_mode == 1)));
	fflush(stdout);
	printf("Size: %d\n", enc_packet.size);
	if(enc_packet.size == 0) {
		fwrite(endcode, 1, sizeof(endcode), f);
		printf("Closing file.");
		fclose(f);
	} else {
		fwrite(save_packet.data, 1, save_packet.size, f);
	}

	//--------------------------------------------------------------------------------------------------
	//Create return object

	jclass cls = (*env)->FindClass(env, "sender/AVPacket");
	if(!cls) {
		printf("Class 'AVPacket' not found!\n");
		fflush(stdout);
	}

	jmethodID constructor = (*env)->GetMethodID(env, cls, "<init>", "([BIJI)V");
	if(!constructor) {
		printf("Constructor could not be found!\n");
		fflush(stdout);
	}

	jbyteArray data;
	jint size;
	jint mode;

	size = enc_packet.size;
	mode = current_mode;

	data = (*env)->NewByteArray(env, size);
	if (data == NULL) {
		return NULL; /* out of memory error thrown */
	}

	// move from the temp structure to the java structure
	(*env)->SetByteArrayRegion(env, data, 0, size, enc_packet.data);

	jobject return_packet = (*env)->NewObject(env, cls, constructor, data, size, (jlong) sequence, mode);
	sequence++;

	if(!return_packet) {
		printf("Couldn't create jobject!");
		fflush(stdout);
	}
	//	} else {
	//		printf("jobject created.");
	//		fflush(stdout);
	//	}

	//av_free_packet(&enc_packet);
	//av_free_packet(&dec_packet);
	return return_packet;

}
Example #6
0
int main() {
    // init accessory
    Accessory acc;
    if (!acc.init(VID, PID)) {
        fprintf(stderr, "Can't init accessory\n");
        return 1;
    }

    printf("AOA init succeed\n");

    AVCodecContext *pCodecCtx = NULL;
    AVCodec *pCodec = NULL;
    AVFrame *pFrame = NULL;

    // init ffmpeg codec
    avcodec_register_all();
    pCodec=avcodec_find_decoder(CODEC_ID_H264);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return 1;
    }

    pCodecCtx = avcodec_alloc_context3(pCodec);
    pCodecCtx->width = WIDTH;
    pCodecCtx->height = HEIGHT;
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
        fprintf(stderr, "codec open failed\n");
        return 1;
    }

    pFrame=av_frame_alloc();

    // init SDL
    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }
    SDL_Surface *screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    SDL_Overlay *bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);

    // init swscaler
    struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P,  SWS_BILINEAR, NULL,  NULL, NULL);

    SDL_Rect rect;
    rect.x = 0;
    rect.y = 0;
    rect.w = pCodecCtx->width;
    rect.h = pCodecCtx->height;

    AVPacket packet;
    int gotFrame;
    unsigned char buf[2];
    int r;
    while(1) {
        r = acc.readUsb(buf, 2);
        if (r < 0) {
            fprintf(stderr, "EOF\n");
            break;
        }

        int length = (((int)buf[0]) << 8) | buf[1];
        //printf("length = %d\n", length);

        char *packet_buf = (char *)av_malloc(length);

        r = acc.readUsb((unsigned char *)packet_buf, length);
        memset(&packet, 0, sizeof(packet));
        av_packet_from_data(&packet, (uint8_t *)packet_buf, length);
        avcodec_decode_video2(pCodecCtx, pFrame, &gotFrame, &packet);
        //av_free(packet_buf);

        if(gotFrame) {
            SDL_LockYUVOverlay(bmp);

            AVPicture pict;
            pict.data[0] = bmp->pixels[0];
            pict.data[1] = bmp->pixels[2];
            pict.data[2] = bmp->pixels[1];

            pict.linesize[0] = bmp->pitches[0];
            pict.linesize[1] = bmp->pitches[2];
            pict.linesize[2] = bmp->pitches[1];

            // Convert the image into YUV format that SDL uses
            sws_scale(sws_ctx, (uint8_t const *const *) pFrame->data,
                      pFrame->linesize, 0, pCodecCtx->height,
                      pict.data, pict.linesize);

            SDL_UnlockYUVOverlay(bmp);
            SDL_DisplayYUVOverlay(bmp, &rect);
        }

        av_free_packet(&packet);
    }

    return 0;
}