void *avi_converter_to_mjpeg (AviMovie *movie, int stream, unsigned char *buffer, int *size) { unsigned char *buf; int bufsize= *size; numbytes = 0; *size= 0; buf = MEM_mallocN (movie->header->Height * movie->header->Width * 3, "avi.avi_converter_to_mjpeg 1"); if (!movie->interlace) { check_and_compress_jpeg(movie->streams[stream].sh.Quality/100, buf, buffer, movie->header->Width, movie->header->Height, bufsize); } else { deinterlace (movie->odd_fields, buf, buffer, movie->header->Width, movie->header->Height); MEM_freeN (buffer); buffer= buf; buf= MEM_mallocN (movie->header->Height * movie->header->Width * 3, "avi.avi_converter_to_mjpeg 2"); check_and_compress_jpeg(movie->streams[stream].sh.Quality/100, buf, buffer, movie->header->Width, movie->header->Height/2, bufsize/2); *size+= numbytes; numbytes=0; check_and_compress_jpeg(movie->streams[stream].sh.Quality/100, buf+*size, buffer+(movie->header->Height/2)*movie->header->Width*3, movie->header->Width, movie->header->Height/2, bufsize/2); } *size += numbytes; MEM_freeN (buffer); return buf; }
void *avi_converter_to_mjpeg(AviMovie *movie, int stream, unsigned char *buffer, size_t *size) { unsigned char *buf; size_t bufsize = *size; numbytes = 0; *size = 0; buf = imb_alloc_pixels(movie->header->Height, movie->header->Width, 3, sizeof(unsigned char), "avi.avi_converter_to_mjpeg 1"); if (!buf) { return NULL; } if (!movie->interlace) { Compress_JPEG(movie->streams[stream].sh.Quality / 100, buf, buffer, movie->header->Width, movie->header->Height, bufsize); *size += numbytes; } else { deinterlace(movie->odd_fields, buf, buffer, movie->header->Width, movie->header->Height); MEM_freeN(buffer); buffer = buf; buf = imb_alloc_pixels(movie->header->Height, movie->header->Width, 3, sizeof(unsigned char), "avi.avi_converter_to_mjpeg 1"); if (buf) { Compress_JPEG(movie->streams[stream].sh.Quality / 100, buf, buffer, movie->header->Width, movie->header->Height / 2, bufsize / 2); *size += numbytes; numbytes = 0; Compress_JPEG(movie->streams[stream].sh.Quality / 100, buf + *size, buffer + (size_t)(movie->header->Height / 2) * (size_t)movie->header->Width * 3, movie->header->Width, movie->header->Height / 2, bufsize / 2); *size += numbytes; } } MEM_freeN(buffer); return buf; }
static GIF_ANIMATION * load_object (PACKFILE * file, long size) { int version; BITMAP *bmp = NULL; int i, j; GIF_ANIMATION *gif = calloc (1, sizeof *gif); GIF_FRAME frame; int have_global_palette = 0; (void) size; gif->frames_count = 0; /* is it really a GIF? */ if (pack_getc (file) != 'G') goto error; if (pack_getc (file) != 'I') goto error; if (pack_getc (file) != 'F') goto error; if (pack_getc (file) != '8') goto error; /* '7' or '9', for 87a or 89a. */ version = pack_getc (file); if (version != '7' && version != '9') goto error; if (pack_getc (file) != 'a') goto error; gif->width = pack_igetw (file); gif->height = pack_igetw (file); i = pack_getc (file); /* Global color table? */ if (i & 128) gif->palette.colors_count = 1 << ((i & 7) + 1); else gif->palette.colors_count = 0; /* Background color is only valid with a global palette. */ gif->background_index = pack_getc (file); /* Skip aspect ratio. */ pack_fseek (file, 1); if (gif->palette.colors_count) { read_palette (file, &gif->palette); have_global_palette = 1; } memset(&frame, 0, sizeof frame); /* For first frame. */ frame.transparent_index = -1; do { i = pack_getc (file); switch (i) { case 0x2c: /* Image Descriptor */ { int w, h; int interlaced = 0; frame.xoff = pack_igetw (file); frame.yoff = pack_igetw (file); w = pack_igetw (file); h = pack_igetw (file); bmp = create_bitmap_ex (8, w, h); if (!bmp) goto error; i = pack_getc (file); /* Local palette. */ if (i & 128) { frame.palette.colors_count = 1 << ((i & 7) + 1); read_palette (file, &frame.palette); } else { frame.palette.colors_count = 0; } if (i & 64) interlaced = 1; if (LZW_decode (file, bmp)) goto error; if (interlaced) deinterlace (bmp); frame.bitmap_8_bit = bmp; bmp = NULL; gif->frames_count++; gif->frames = realloc (gif->frames, gif->frames_count * sizeof *gif->frames); gif->frames[gif->frames_count - 1] = frame; memset(&frame, 0, sizeof frame); /* For next frame. */ frame.transparent_index = -1; break; } case 0x21: /* Extension Introducer. */ j = pack_getc (file); /* Extension Type. */ i = pack_getc (file); /* Size. */ if (j == 0xf9) /* Graphic Control Extension. */ { /* size must be 4 */ if (i != 4) goto error; i = pack_getc (file); frame.disposal_method = (i >> 2) & 7; frame.duration = pack_igetw (file); if (i & 1) /* Transparency? */ { frame.transparent_index = pack_getc (file); } else { pack_fseek (file, 1); frame.transparent_index = -1; } i = pack_getc (file); /* Size. */ } /* Application Extension. */ else if (j == 0xff) { if (i == 11) { char name[12]; pack_fread (name, 11, file); i = pack_getc (file); /* Size. */ name[11] = '\0'; if (!strcmp (name, "NETSCAPE2.0")) { if (i == 3) { j = pack_getc (file); gif->loop = pack_igetw (file); if (j != 1) gif->loop = 0; i = pack_getc (file); /* Size. */ } } } } /* Possibly more blocks until terminator block (0). */ while (i) { pack_fseek (file, i); i = pack_getc (file); } break; case 0x3b: /* GIF Trailer. */ pack_fclose (file); return gif; } } while (TRUE); error: if (file) pack_fclose (file); if (gif) algif_destroy_raw_animation (gif); if (bmp) destroy_bitmap (bmp); return NULL; }
HRESULT ofxBlackmagicGrabber::VideoInputFrameArrived(IDeckLinkVideoInputFrame * videoFrame, IDeckLinkAudioInputPacket * audioFrame){ IDeckLinkVideoFrame* rightEyeFrame = NULL; IDeckLinkVideoFrame3DExtensions* threeDExtensions = NULL; // Handle Video Frame if(videoFrame) { // If 3D mode is enabled we retreive the 3D extensions interface which gives. // us access to the right eye frame by calling GetFrameForRightEye() . if ( (videoFrame->QueryInterface(IID_IDeckLinkVideoFrame3DExtensions, (void **) &threeDExtensions) != S_OK) || (threeDExtensions->GetFrameForRightEye(&rightEyeFrame) != S_OK)) { rightEyeFrame = NULL; } if (threeDExtensions) threeDExtensions->Release(); if (videoFrame->GetFlags() & bmdFrameHasNoInputSource){ ofLogError(LOG_NAME) << "Frame received (#" << frameCount << "- No input signal detected"; } /*else {*/ const char *timecodeString = NULL; if (g_timecodeFormat != 0) { IDeckLinkTimecode *timecode; if (videoFrame->GetTimecode(g_timecodeFormat, &timecode) == S_OK) { CFStringRef timecodeString; timecode->GetString(&timecodeString); } } // ofLogVerbose(LOG_NAME) << "Frame received (#" << frameCount // << ") [" << (timecodeString != NULL ? timecodeString : "No timecode") // << "] -" << (rightEyeFrame != NULL ? "Valid Frame (3D left/right)" : "Valid Frame") // << "- Size: " << (videoFrame->GetRowBytes() * videoFrame->GetHeight()) << "bytes"; if (timecodeString) free((void*)timecodeString); yuvToRGB(videoFrame); if(bDeinterlace) deinterlace(); pixelsMutex.lock(); bNewFrameArrived = true; ofPixels * aux = currentPixels; currentPixels = backPixels; backPixels = aux; pixelsMutex.unlock(); //} if (rightEyeFrame) rightEyeFrame->Release(); frameCount++; } #if 0 //No audio // Handle Audio Frame void* audioFrameBytes; if (audioFrame) { if (audioOutputFile != -1) { audioFrame->GetBytes(&audioFrameBytes); write(audioOutputFile, audioFrameBytes, audioFrame->GetSampleFrameCount() * g_audioChannels * (g_audioSampleDepth / 8)); } } #endif return S_OK; }
void denoise_frame(void) { uint16_t x,y; uint32_t bad_vector = 0; /* adjust contrast for luma and chroma */ contrast_frame(); switch(denoiser.mode) { case 0: /* progressive mode */ { /* deinterlacing wanted ? */ if(denoiser.deinterlace) deinterlace(); /* Generate subsampled images */ subsample_frame (denoiser.frame.sub2ref,denoiser.frame.ref); subsample_frame (denoiser.frame.sub4ref,denoiser.frame.sub2ref); subsample_frame (denoiser.frame.sub2avg,denoiser.frame.avg); subsample_frame (denoiser.frame.sub4avg,denoiser.frame.sub2avg); for(y=32;y<(denoiser.frame.h+32);y+=8) { for(x=0;x<denoiser.frame.w;x+=8) { vector.x=0; vector.y=0; if( !low_contrast_block(x,y) && x>(denoiser.border.x) && y>(denoiser.border.y+32) && x<(denoiser.border.x+denoiser.border.w) && y<(denoiser.border.y+32+denoiser.border.h) ) { mb_search_44(x,y); mb_search_22(x,y); mb_search_11(x,y); if (mb_search_00(x,y) > denoiser.block_thres) bad_vector++; } if ( (vector.x+x)>0 && (vector.x+x)<W && (vector.y+y)>32 && (vector.y+y)<(32+H) ) { move_block(x,y); } else { vector.x=0; vector.y=0; move_block(x,y); } } } /* scene change? */ if ( denoiser.do_reset && denoiser.frame.w*denoiser.frame.h*denoiser.scene_thres/(64*100) < bad_vector) { denoiser.reset = denoiser.do_reset; } bad_vector = 0; average_frame(); correct_frame2(); denoise_frame_pass2(); sharpen_frame(); black_border(); ac_memcpy(denoiser.frame.avg[Yy],denoiser.frame.tmp[Yy],denoiser.frame.w*(denoiser.frame.h+64)); ac_memcpy(denoiser.frame.avg[Cr],denoiser.frame.tmp[Cr],denoiser.frame.w*(denoiser.frame.h+64)/4); ac_memcpy(denoiser.frame.avg[Cb],denoiser.frame.tmp[Cb],denoiser.frame.w*(denoiser.frame.h+64)/4); break; } case 1: /* interlaced mode */ { /* Generate subsampled images */ subsample_frame (denoiser.frame.sub2ref,denoiser.frame.ref); subsample_frame (denoiser.frame.sub4ref,denoiser.frame.sub2ref); subsample_frame (denoiser.frame.sub2avg,denoiser.frame.avg); subsample_frame (denoiser.frame.sub4avg,denoiser.frame.sub2avg); /* process the fields as two seperate images */ denoiser.frame.h /= 2; denoiser.frame.w *= 2; /* if lines are twice as wide as normal the offset is only 16 lines * despite 32 in progressive mode... */ for(y=16;y<(denoiser.frame.h+16);y+=8) for(x=0;x<denoiser.frame.w;x+=8) { vector.x=0; vector.y=0; if(!low_contrast_block(x,y) && x>(denoiser.border.x) && y>(denoiser.border.y+32) && x<(denoiser.border.x+denoiser.border.w) && y<(denoiser.border.y+32+denoiser.border.h) ) { mb_search_44(x,y); mb_search_22(x,y); mb_search_11(x,y); mb_search_00(x,y); } if ( (vector.x+x)>0 && (vector.x+x)<W && (vector.y+y)>32 && (vector.y+y)<(32+H) ) { move_block(x,y); } else { vector.x=0; vector.y=0; move_block(x,y); } } /* process the fields in one image again */ denoiser.frame.h *= 2; denoiser.frame.w /= 2; average_frame(); correct_frame2(); denoise_frame_pass2(); sharpen_frame(); black_border(); ac_memcpy(denoiser.frame.avg[0],denoiser.frame.tmp[0],denoiser.frame.w*(denoiser.frame.h+64)); ac_memcpy(denoiser.frame.avg[1],denoiser.frame.tmp[1],denoiser.frame.w*(denoiser.frame.h+64)/4); ac_memcpy(denoiser.frame.avg[2],denoiser.frame.tmp[2],denoiser.frame.w*(denoiser.frame.h+64)/4); break; } case 2: /* PASS II only mode */ { /* deinterlacing wanted ? */ if(denoiser.deinterlace) deinterlace(); /* as the normal denoising functions are not used we need to copy ... */ ac_memcpy(denoiser.frame.tmp[0],denoiser.frame.ref[0],denoiser.frame.w*(denoiser.frame.h+64)); ac_memcpy(denoiser.frame.tmp[1],denoiser.frame.ref[1],denoiser.frame.w*(denoiser.frame.h+64)/4); ac_memcpy(denoiser.frame.tmp[2],denoiser.frame.ref[2],denoiser.frame.w*(denoiser.frame.h+64)/4); denoise_frame_pass2(); sharpen_frame(); black_border(); break; } } }
void *VideoLayer::feed() { int got_picture=0; int len1=0 ; int ret=0; bool got_it=false; double now = get_master_clock(); if(paused) return rgba_picture->data[0]; /** * follow user video loop */ if(mark_in!=NO_MARK && mark_out!=NO_MARK && seekable) { if (now >= mark_out) seek((int64_t)mark_in * AV_TIME_BASE); } // operate seek if was requested if(to_seek>=0) { seek(to_seek); to_seek = -1; } got_it=false; while (!got_it) { if(packet_len<=0) { /** * Read one packet from the media and put it in pkt */ while(1) { #ifdef DEBUG func("av_read_frame ..."); #endif ret = av_read_frame(avformat_context, &pkt); #ifdef DEBUG if(pkt.stream_index == video_index) std::cout << "video read packet"; else if(pkt.stream_index == audio_index) std::cout << "audio read packet"; std::cout << " pkt.data=" << pkt.data; std::cout << " pkt.size=" << pkt.size; std::cout << " pkt.pts/dts=" << pkt.pts << "/" << pkt.dts << std::endl; std::cout << "pkt.duration=" << pkt.duration; std::cout << " avformat_context->start_time=" << avformat_context->start_time; std::cout << " avformat_context->duration=" << avformat_context->duration/AV_TIME_BASE << std::endl; std::cout << "avformat_context->duration=" << avformat_context->duration << std::endl; #endif /* TODO(shammash): this may be good for streams but breaks * looping in files, needs fixing. */ // if(!pkt.duration) continue; // if(!pkt.size || !pkt.data) { // return NULL; // } /** * check eof and loop */ if(ret!= 0) { //does not enter if data are available eos->notify(); // eos->dispatcher->do_jobs(); /// XXX hack hack hack ret = seek(avformat_context->start_time); if (ret < 0) { error("VideoLayer::could not loop file"); return rgba_picture->data[0]; } continue; } else if( (pkt.stream_index == video_index) || (pkt.stream_index == audio_index) ) break; /* exit loop */ } } // loop break after a known index is found frame_number++; //std::cout << "frame_number :" << frame_number << std::endl; /** * Decode video */ if(pkt.stream_index == video_index) { len1 = decode_video_packet(&got_picture); AVFrame *yuv_picture=&av_frame; if(len1<0) { // error("VideoLayer::Error while decoding frame"); func("one frame only?"); return NULL; } else if (len1 == 0) { packet_len=0; return NULL; } /** * We've found a picture */ ptr += len1; packet_len -= len1; if (got_picture!=0) { got_it=true; avformat_stream=avformat_context->streams[video_index]; /** Deinterlace input if requested */ if(deinterlaced) deinterlace((AVPicture *)yuv_picture); #ifdef WITH_SWSCALE sws_scale(img_convert_ctx, yuv_picture->data, yuv_picture->linesize, 0, video_codec_ctx->height, rgba_picture->data, rgba_picture->linesize); #else /** * yuv2rgb */ img_convert(rgba_picture, PIX_FMT_RGB32, (AVPicture *)yuv_picture, video_codec_ctx->pix_fmt, //avformat_stream.codec->pix_fmt, video_codec_ctx->width, video_codec_ctx->height); #endif // memcpy(frame_fifo.picture[fifo_position % FIFO_SIZE]->data[0],rgba_picture->data[0],geo.size); /* TODO move */ if(fifo_position == FIFO_SIZE) fifo_position=0; /* workaround since sws_scale conversion from YUV returns an buffer RGBA with alpha set to 0x0 */ { register int bufsize = ( rgba_picture->linesize[0] * video_codec_ctx->height ) /4; int32_t *pbuf = (int32_t*)rgba_picture->data[0]; for(; bufsize>0; bufsize--) { *pbuf = (*pbuf | alpha_bitmask); pbuf++; } } jmemcpy(frame_fifo.picture[fifo_position]->data[0], rgba_picture->data[0], rgba_picture->linesize[0] * video_codec_ctx->height); // avpicture_get_size(PIX_FMT_RGBA32, enc->width, enc->height)); fifo_position++; } } // end video packet decoding //////////////////////// // audio packet decoding else if(pkt.stream_index == audio_index) { // XXX(shammash): audio decoding seems to depend on screen properties, so // we skip decoding audio frames if there's no screen // long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000; // ringbuffer_write(screen->audio, (const char*)audio_float_buf, samples*sizeof(float)); // ... and so on ... if(use_audio && screen) { int data_size; len1 = decode_audio_packet(&data_size); if (len1 > 0) { int samples = data_size/sizeof(uint16_t); long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000; double m_ResampleRatio = (double)(m_SampleRate)/(double)audio_samplerate; long unsigned max_buf = ceil(AVCODEC_MAX_AUDIO_FRAME_SIZE * m_ResampleRatio * audio_channels); if (audio_resampled_buf_len < max_buf) { if (audio_resampled_buf) free (audio_resampled_buf); audio_resampled_buf = (float*) malloc(max_buf * sizeof(float)); audio_resampled_buf_len = max_buf; } src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples); if (m_ResampleRatio == 1.0) { ringbuffer_write(screen->audio, (const char*)audio_float_buf, samples*sizeof(float)); time_t *tm = (time_t *)malloc(sizeof(time_t)); time (tm); // std::cerr << "-- VL:" << asctime(localtime(tm)); } else { src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples); SRC_DATA src_data; int offset = 0; do { src_data.input_frames = samples/audio_channels; src_data.output_frames = audio_resampled_buf_len/audio_channels - offset; src_data.end_of_input = 0; src_data.src_ratio = m_ResampleRatio; src_data.input_frames_used = 0; src_data.output_frames_gen = 0; src_data.data_in = audio_float_buf + offset; src_data.data_out = audio_resampled_buf + offset; src_simple (&src_data, SRC_SINC_MEDIUM_QUALITY, audio_channels) ; ringbuffer_write(screen->audio, (const char*)audio_resampled_buf, src_data.output_frames_gen * audio_channels *sizeof(float)); offset += src_data.input_frames_used * audio_channels; samples -= src_data.input_frames_used * audio_channels; if (samples>0) warning("resampling left: %i < %i", src_data.input_frames_used, samples/audio_channels); } while (samples > audio_channels); } } } } av_free_packet(&pkt); /* sun's good. love's bad */ } // end of while(!got_it) return frame_fifo.picture[fifo_position-1]->data[0]; }