IoSeq *IoAVCode_frameSeqForAVFrame_(IoAVCodec *self, AVFrame *avframe, int srcPixelFormat, int width, int height) { AVPicture *rgbPicture = IoAVCode_allocDstPictureIfNeeded(self, PIX_FMT_RGB24, width, height); int result; struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getContext(width, height, srcPixelFormat, width, height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL); result = sws_scale(img_convert_ctx, avframe->data, avframe->linesize, 0, height, rgbPicture->data, rgbPicture->linesize); sws_freeContext(img_convert_ctx); if (result) { printf("AVCodec: sws_scale error?\n"); } UArray *data = UArray_newWithData_type_encoding_size_copy_(rgbPicture->data[0], CTYPE_uint8_t, CENCODING_NUMBER, width * height * 3, 1); return IoSeq_newWithUArray_copy_(IOSTATE, data, 0); }
void IoAVCodec_createContextIfNeeded(IoAVCodec *self) { if(!DATA(self)->packet) { DATA(self)->packet = calloc(1, sizeof(AVPacket)); } // video // frames if (!DATA(self)->frames) { DATA(self)->frames = IoList_new(IOSTATE); IoObject_setSlot_to_(self, IOSYMBOL("frames"), DATA(self)->frames); } // videoSize { UArray *sizeUArray = UArray_newWithData_type_encoding_size_copy_("", CTYPE_float32_t, CENCODING_NUMBER, 2, 1); IoSeq *sizeSeq = IoSeq_newWithUArray_copy_(IOSTATE, sizeUArray, 0); IoObject_setSlot_to_(self, IOSYMBOL("videoSize"), sizeSeq); } if (!DATA(self)->decodedFrame) { DATA(self)->decodedFrame = avcodec_alloc_frame(); } // audio if(!DATA(self)->audioOutBuffer) { DATA(self)->audioOutBuffer = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); } }
UArray *Date_asSerialization(Date *self) { int32_t *data = malloc(4 * sizeof(int32_t)); data[0] = self->tv.tv_sec; data[1] = self->tv.tv_usec; data[2] = self->tz.tz_minuteswest; data[3] = self->tz.tz_dsttime; return UArray_newWithData_type_encoding_size_copy_(data, CTYPE_int32_t, CENCODING_NUMBER, 4, 0); }
IoSeq *IoAVCode_frameSeqForAVFrame_(IoAVCodec *self, AVFrame *avframe, int srcPixelFormat, int width, int height) { AVPicture *rgbPicture = IoAVCode_allocDstPictureIfNeeded(self, srcPixelFormat, width, height); AVPicture srcPicture; int result; memcpy(srcPicture.data, avframe->data, sizeof(uint8_t *) * 4); memcpy(srcPicture.linesize, avframe->linesize, sizeof(int) * 4); result = img_convert(rgbPicture, PIX_FMT_RGB24, &srcPicture, srcPixelFormat, width, height); if (result) { printf("AVCodec: img_convert error?\n"); } UArray *data = UArray_newWithData_type_encoding_size_copy_(rgbPicture->data[0], CTYPE_uint8_t, CENCODING_NUMBER, width * height * 3, 1); return IoSeq_newWithUArray_copy_(IOSTATE, data, 0); }
int IoAVCodec_findStreams(IoAVCodec *self) { AVFormatContext *formatContext = DATA(self)->formatContext; int i; av_find_stream_info(formatContext); //printf("formatContext = %p streams = %i\n", (void *)formatContext, formatContext->nb_streams); for(i = 0; i < formatContext->nb_streams; i++) { AVStream *stream = formatContext->streams[i]; AVCodecContext *codecContext = stream->codec; switch(codecContext->codec_type) { case CODEC_TYPE_AUDIO: DATA(self)->audioStreamIndex = i; { AVCodec *codec = avcodec_find_decoder(codecContext->codec_id); if (codec) { int err = avcodec_open(codecContext, codec); if (err == 0) { DATA(self)->audioContext = codecContext; } } } //printf("audioStreamIndex = %i\n", DATA(self)->audioStreamIndex); IoObject_setSlot_to_(self, IOSYMBOL("audioChannels"), IONUMBER(codecContext->channels)); IoObject_setSlot_to_(self, IOSYMBOL("audioSampleRate"), IONUMBER(codecContext->sample_rate)); IoObject_setSlot_to_(self, IOSYMBOL("audioBitRate"), IONUMBER(codecContext->bit_rate)); IoObject_setSlot_to_(self, IOSYMBOL("audioDuration"), IONUMBER(stream->duration)); IoObject_setSlot_to_(self, IOSYMBOL("audioFrameCount"), IONUMBER(stream->nb_frames)); break; case CODEC_TYPE_VIDEO: { DATA(self)->videoStreamIndex = i; { AVCodec *codec = avcodec_find_decoder(codecContext->codec_id); if (codec) { int err = avcodec_open(codecContext, codec); if (err == 0) { DATA(self)->videoContext = codecContext; } } } //printf("videoStreamIndex = %i\n", DATA(self)->videoStreamIndex); { float framePeriod = (((float)codecContext->time_base.num)/((float)codecContext->time_base.den)); //UArray *sizeUArray = UArray_newWithData_type_encoding_size_copy_("", CTYPE_float32_t, CENCODING_NUMBER, 2, 1); IoObject_setSlot_to_(self, IOSYMBOL("framePeriod"), IONUMBER(framePeriod)); IoObject_setSlot_to_(self, IOSYMBOL("videoDuration"), IONUMBER(stream->duration)); IoObject_setSlot_to_(self, IOSYMBOL("videoFrameCount"), IONUMBER(stream->nb_frames)); } { UArray *sizeUArray = UArray_newWithData_type_encoding_size_copy_("", CTYPE_float32_t, CENCODING_NUMBER, 2, 1); IoSeq *sizeSeq = IoSeq_newWithUArray_copy_(IOSTATE, sizeUArray, 0); UArray_at_putDouble_(sizeUArray, 0, codecContext->width); UArray_at_putDouble_(sizeUArray, 1, codecContext->height); IoObject_setSlot_to_(self, IOSYMBOL("videoSize"), sizeSeq); } break; } case CODEC_TYPE_UNKNOWN: continue; case CODEC_TYPE_DATA: continue; case CODEC_TYPE_SUBTITLE: continue; case CODEC_TYPE_NB: continue; default: continue; } } return 0; }