static int read_video( lsmash_handler_t *h, int sample_number, void *buf ) { avs_handler_t *hp = (avs_handler_t *)h->video_private; int buf_linesize = MAKE_AVIUTL_PITCH( hp->vi->width * avs_bits_per_pixel( hp->vi ) ); AVS_VideoFrame *frame = hp->func.avs_get_frame( hp->clip, sample_number ); if( hp->func.avs_clip_get_error( hp->clip ) ) return 0; hp->func.avs_bit_blt( hp->env, buf, buf_linesize, avs_get_read_ptr( frame ), avs_get_pitch( frame ), avs_get_row_size( frame ), hp->vi->height ); hp->func.avs_release_video_frame( frame ); return avs_bmp_size( hp->vi ); }
static int prepare_video_decoding( lsmash_handler_t *h, video_option_t *opt ) { avs_handler_t *hp = (avs_handler_t *)h->video_private; h->video_sample_count = hp->vi->num_frames; h->framerate_num = hp->vi->fps_numerator; h->framerate_den = hp->vi->fps_denominator; /* BITMAPINFOHEADER */ h->video_format.biSize = sizeof( BITMAPINFOHEADER ); h->video_format.biWidth = hp->vi->width; h->video_format.biHeight = hp->vi->height; h->video_format.biBitCount = avs_bits_per_pixel( hp->vi ); h->video_format.biCompression = avs_is_rgb( hp->vi ) ? OUTPUT_TAG_RGB : OUTPUT_TAG_YUY2; return 0; }
/* Copy AviSynth clip data into an AVPacket. */ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard) { AviSynthContext *avs = s->priv_data; AVS_VideoFrame *frame; unsigned char *dst_p; const unsigned char *src_p; int n, i, plane, rowsize, planeheight, pitch, bits; const char *error; if (avs->curr_frame >= avs->vi->num_frames) return AVERROR_EOF; /* This must happen even if the stream is discarded to prevent desync. */ n = avs->curr_frame++; if (discard) return 0; #ifdef USING_AVISYNTH /* Define the bpp values for the new AviSynth 2.6 colorspaces. * Since AvxSynth doesn't have these functions, special-case * it in order to avoid implicit declaration errors. */ if (avs_library.avs_is_yv24(avs->vi)) bits = 24; else if (avs_library.avs_is_yv16(avs->vi)) bits = 16; else if (avs_library.avs_is_yv411(avs->vi)) bits = 12; else if (avs_library.avs_is_y8(avs->vi)) bits = 8; else bits = avs_library.avs_bits_per_pixel(avs->vi); #else bits = avs_bits_per_pixel(avs->vi); #endif /* Without the cast to int64_t, calculation overflows at about 9k x 9k * resolution. */ pkt->size = (((int64_t)avs->vi->width * (int64_t)avs->vi->height) * bits) / 8; if (!pkt->size) return AVERROR_UNKNOWN; if (av_new_packet(pkt, pkt->size) < 0) return AVERROR(ENOMEM); pkt->pts = n; pkt->dts = n; pkt->duration = 1; pkt->stream_index = avs->curr_stream; frame = avs_library.avs_get_frame(avs->clip, n); error = avs_library.avs_clip_get_error(avs->clip); if (error) { av_log(s, AV_LOG_ERROR, "%s\n", error); avs->error = 1; av_packet_unref(pkt); return AVERROR_UNKNOWN; } dst_p = pkt->data; for (i = 0; i < avs->n_planes; i++) { plane = avs->planes[i]; #ifdef USING_AVISYNTH src_p = avs_library.avs_get_read_ptr_p(frame, plane); pitch = avs_library.avs_get_pitch_p(frame, plane); rowsize = avs_library.avs_get_row_size_p(frame, plane); planeheight = avs_library.avs_get_height_p(frame, plane); #else src_p = avs_get_read_ptr_p(frame, plane); pitch = avs_get_pitch_p(frame, plane); rowsize = avs_get_row_size_p(frame, plane); planeheight = avs_get_height_p(frame, plane); #endif /* Flip RGB video. */ if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) { src_p = src_p + (planeheight - 1) * pitch; pitch = -pitch; } avs_library.avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight); dst_p += rowsize * planeheight; } avs_library.avs_release_video_frame(frame); return 0; }
static demuxer_t* demux_open_avs(demuxer_t* demuxer) { int found = 0; AVS_T *AVS = demuxer->priv; int audio_samplesize = 0; AVS->frameno = 0; AVS->sampleno = 0; mp_msg(MSGT_DEMUX, MSGL_V, "AVS: demux_open_avs()\n"); demuxer->seekable = 1; AVS->clip = AVS->avs_take_clip(AVS->handler, AVS->avs_env); if(!AVS->clip) { mp_msg(MSGT_DEMUX, MSGL_V, "AVS: avs_take_clip() failed\n"); return NULL; } AVS->video_info = AVS->avs_get_video_info(AVS->clip); if (!AVS->video_info) { mp_msg(MSGT_DEMUX, MSGL_V, "AVS: avs_get_video_info() call failed\n"); return NULL; } if (!avs_is_yv12(AVS->video_info)) { AVS->handler = AVS->avs_invoke(AVS->avs_env, "ConvertToYV12", avs_new_value_array(&AVS->handler, 1), 0); if (avs_is_error(AVS->handler)) { mp_msg(MSGT_DEMUX, MSGL_V, "AVS: Cannot convert input video to YV12: %s\n", avs_as_string(AVS->handler)); return NULL; } AVS->clip = AVS->avs_take_clip(AVS->handler, AVS->avs_env); if(!AVS->clip) { mp_msg(MSGT_DEMUX, MSGL_V, "AVS: avs_take_clip() failed\n"); return NULL; } AVS->video_info = AVS->avs_get_video_info(AVS->clip); if (!AVS->video_info) { mp_msg(MSGT_DEMUX, MSGL_V, "AVS: avs_get_video_info() call failed\n"); return NULL; } } // TODO check field-based ?? /* Video */ if (avs_has_video(AVS->video_info)) { sh_video_t *sh_video = new_sh_video(demuxer, 0); found = 1; if (demuxer->video->id == -1) demuxer->video->id = 0; if (demuxer->video->id == 0) demuxer->video->sh = sh_video; sh_video->ds = demuxer->video; sh_video->disp_w = AVS->video_info->width; sh_video->disp_h = AVS->video_info->height; //sh_video->format = get_mmioFOURCC(AVS->video_info); sh_video->format = mmioFOURCC('Y', 'V', '1', '2'); sh_video->fps = (double) AVS->video_info->fps_numerator / (double) AVS->video_info->fps_denominator; sh_video->frametime = 1.0 / sh_video->fps; sh_video->bih = malloc(sizeof(BITMAPINFOHEADER) + (256 * 4)); sh_video->bih->biCompression = sh_video->format; sh_video->bih->biBitCount = avs_bits_per_pixel(AVS->video_info); //sh_video->bih->biPlanes = 2; sh_video->bih->biWidth = AVS->video_info->width; sh_video->bih->biHeight = AVS->video_info->height; sh_video->num_frames = 0; sh_video->num_frames_decoded = 0; } /* Audio */ if (avs_has_audio(AVS->video_info)) switch (AVS->video_info->sample_type) { case AVS_SAMPLE_INT8: audio_samplesize = 1; break; case AVS_SAMPLE_INT16: audio_samplesize = 2; break; case AVS_SAMPLE_INT24: audio_samplesize = 3; break; case AVS_SAMPLE_INT32: case AVS_SAMPLE_FLOAT: audio_samplesize = 4; break; default: mp_msg(MSGT_DEMUX, MSGL_ERR, "AVS: unknown audio type, disabling\n"); } if (audio_samplesize) { sh_audio_t *sh_audio = new_sh_audio(demuxer, 0); found = 1; mp_msg(MSGT_DEMUX, MSGL_V, "AVS: Clip has audio -> Channels = %d - Freq = %d\n", AVS->video_info->nchannels, AVS->video_info->audio_samples_per_second); if (demuxer->audio->id == -1) demuxer->audio->id = 0; if (demuxer->audio->id == 0) demuxer->audio->sh = sh_audio; sh_audio->ds = demuxer->audio; sh_audio->wf = malloc(sizeof(WAVEFORMATEX)); sh_audio->wf->wFormatTag = sh_audio->format = (AVS->video_info->sample_type == AVS_SAMPLE_FLOAT) ? 0x3 : 0x1; sh_audio->wf->nChannels = sh_audio->channels = AVS->video_info->nchannels; sh_audio->wf->nSamplesPerSec = sh_audio->samplerate = AVS->video_info->audio_samples_per_second; sh_audio->samplesize = audio_samplesize; sh_audio->wf->nAvgBytesPerSec = sh_audio->channels * sh_audio->samplesize * sh_audio->samplerate; sh_audio->wf->nBlockAlign = sh_audio->channels * sh_audio->samplesize; sh_audio->wf->wBitsPerSample = sh_audio->samplesize * 8; sh_audio->wf->cbSize = 0; sh_audio->i_bps = sh_audio->wf->nAvgBytesPerSec; } AVS->init = 1; if (found) return demuxer; else return NULL; }
// Copy AviSynth clip data into an AVPacket. static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard) { AviSynthContext *avs = s->priv_data; AVS_VideoFrame *frame; unsigned char *dst_p; const unsigned char *src_p; int n, i, plane, rowsize, planeheight, pitch, bits; const char *error; if (avs->curr_frame >= avs->vi->num_frames) return AVERROR_EOF; // This must happen even if the stream is discarded to prevent desync. n = avs->curr_frame++; if (discard) return 0; pkt->pts = n; pkt->dts = n; pkt->duration = 1; // Define the bpp values for the new AviSynth 2.6 colorspaces if (avs_is_yv24(avs->vi)) { bits = 24; } else if (avs_is_yv16(avs->vi)) { bits = 16; } else if (avs_is_yv411(avs->vi)) { bits = 12; } else if (avs_is_y8(avs->vi)) { bits = 8; } else { bits = avs_bits_per_pixel(avs->vi); } // Without cast to int64_t, calculation overflows at about 9k x 9k resolution. pkt->size = (((int64_t)avs->vi->width * (int64_t)avs->vi->height) * bits) / 8; if (!pkt->size) return AVERROR_UNKNOWN; pkt->data = av_malloc(pkt->size); if (!pkt->data) return AVERROR_UNKNOWN; frame = avs_library->avs_get_frame(avs->clip, n); error = avs_library->avs_clip_get_error(avs->clip); if (error) { av_log(s, AV_LOG_ERROR, "%s\n", error); avs->error = 1; av_freep(&pkt->data); return AVERROR_UNKNOWN; } dst_p = pkt->data; for (i = 0; i < avs->n_planes; i++) { plane = avs->planes[i]; src_p = avs_get_read_ptr_p(frame, plane); rowsize = avs_get_row_size_p(frame, plane); planeheight = avs_get_height_p(frame, plane); pitch = avs_get_pitch_p(frame, plane); // Flip RGB video. if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) { src_p = src_p + (planeheight - 1) * pitch; pitch = -pitch; } // An issue with avs_bit_blt on 2.5.8 prevents video from working correctly. // This problem doesn't exist for 2.6 and AvxSynth, so enable the workaround // for 2.5.8 only. This only displays the warning and exits if the script has // video. 2.5.8's internal interface version is 3, so avs_get_version allows // it to work only in the circumstance that the interface is 5 or higher (4 is // unused). There's a strong chance that AvxSynth, having been based on 2.5.8, // would also be identified as interface version 3, but since AvxSynth doesn't // suffer from this problem, special-case it. #ifdef _WIN32 if (avs_library->avs_get_version(avs->clip) > 3) { avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight); } else { av_log(s, AV_LOG_ERROR, "Video input from AviSynth 2.5.8 is not supported. Please upgrade to 2.6.\n"); avs->error = 1; av_freep(&pkt->data); return AVERROR_UNKNOWN; } #else avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight); #endif dst_p += rowsize * planeheight; } avs_library->avs_release_video_frame(frame); return 0; }
// Copy AviSynth clip data into an AVPacket. static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard) { AviSynthContext *avs = s->priv_data; AVS_VideoFrame *frame; unsigned char *dst_p; const unsigned char *src_p; int n, i, plane, rowsize, planeheight, pitch, bits; const char *error; if (avs->curr_frame >= avs->vi->num_frames) return AVERROR_EOF; // This must happen even if the stream is discarded to prevent desync. n = avs->curr_frame++; if (discard) return 0; pkt->pts = n; pkt->dts = n; pkt->duration = 1; // Define the bpp values for the new AviSynth 2.6 colorspaces if (avs_is_yv24(avs->vi)) { bits = 24; } else if (avs_is_yv16(avs->vi)) { bits = 16; } else if (avs_is_yv411(avs->vi)) { bits = 12; } else if (avs_is_y8(avs->vi)) { bits = 8; } else { bits = avs_bits_per_pixel(avs->vi); } // Without cast to int64_t, calculation overflows at about 9k x 9k resolution. pkt->size = (((int64_t)avs->vi->width * (int64_t)avs->vi->height) * bits) / 8; if (!pkt->size) return AVERROR_UNKNOWN; pkt->data = av_malloc(pkt->size); if (!pkt->data) return AVERROR_UNKNOWN; frame = avs_library->avs_get_frame(avs->clip, n); error = avs_library->avs_clip_get_error(avs->clip); if (error) { av_log(s, AV_LOG_ERROR, "%s\n", error); avs->error = 1; av_freep(&pkt->data); return AVERROR_UNKNOWN; } dst_p = pkt->data; for (i = 0; i < avs->n_planes; i++) { plane = avs->planes[i]; src_p = avs_get_read_ptr_p(frame, plane); rowsize = avs_get_row_size_p(frame, plane); planeheight = avs_get_height_p(frame, plane); pitch = avs_get_pitch_p(frame, plane); // Flip RGB video. if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) { src_p = src_p + (planeheight - 1) * pitch; pitch = -pitch; } avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight); dst_p += rowsize * planeheight; } avs_library->avs_release_video_frame(frame); return 0; }