BOOL CStreamVideoSource::Pump() { DWORD dwBytesRead, dwBytesToRead; unsigned char *pBuf = GetWritePtr(); unsigned int uiBufLen = GetBufferSize(); { dwBytesToRead = 10 * 1024;//dwBytesRead; if( !ReadFile( m_hChildStdOut_R, pBuf, dwBytesToRead, &dwBytesRead, NULL ) ) { TRACE( "CStreamVideoSource: Unable to ReadFile\n" ); return FALSE; } // Process the data DataAdded( dwBytesRead, NULL ); } return TRUE; }
/** \fn blend \brief Blend src1 and src2 into our image */ bool ADMImage::blend(ADMImage *src1,ADMImage *src2) { blendFunction *myBlend=blendC; #ifdef ADM_CPU_X86 if(CpuCaps::hasMMX()) myBlend=blendMMX; if(CpuCaps::hasSSE()) myBlend=blendSSE; #endif ADM_assert(src1->_width==src2->_width); ADM_assert(_width==src2->_width); ADM_assert(src1->_height==src2->_height); for(int x=0;x<3;x++) { ADM_PLANE plane=(ADM_PLANE)x; myBlend(GetWidth(plane),GetHeight(plane), GetWritePtr(plane),GetPitch(plane), src1->GetReadPtr(plane),src1->GetPitch(plane), src2->GetReadPtr(plane),src2->GetPitch(plane) ); } return true; }
PVideoFrame Waifu2xVideoFilter::GetFrame(int n, IScriptEnvironment* env) { int percent = (int)((n / (double)vi.num_frames) * 100); outputDebug([&](std::ostringstream& s) { s << "Waifu2x GetFrame Starting: " << n << "/" << vi.num_frames << "(" << percent << "%)"; }); PVideoFrame src = child->GetFrame(n, env); // Assume YV12, YV16 or YV24 (with chroma, planar) // Process Y at first. cv::Mat yImg(src->GetHeight(PLANAR_Y), src->GetRowSize(PLANAR_Y), CV_8U, (void *)src->GetReadPtr(PLANAR_Y), src->GetPitch(PLANAR_Y)); yImg.convertTo(yImg, CV_32F, 1.0 / 255.0); if (this->nrLevel > 0) { OutputDebugStringA("Waifu2x NR Start."); if (!filterWithModels(this->modelsNR, yImg, yImg)) { env->ThrowError("Waifu2x NR Failed."); return src; } OutputDebugStringA("Waifu2x NR Finished."); } if (this->enableScaling) { OutputDebugStringA("Waifu2x Scaling Start."); int curRowSize = src->GetRowSize(PLANAR_Y); int curHeight = src->GetHeight(PLANAR_Y); for (int i = 0; i < iterTimesTwiceScaling; i++) { curRowSize *= 2; curHeight *= 2; cv::resize(yImg, yImg, cv::Size(curRowSize, curHeight), 0, 0, cv::INTER_NEAREST); if (!filterWithModels(this->modelsScale, yImg, yImg)) { env->ThrowError("Waifu2x filtering failed."); return src; } } OutputDebugStringA("Waifu2x Scaling Finished."); } yImg.convertTo(yImg, CV_8U, 255.0); // Finally process U, V cv::Mat uImg(src->GetHeight(PLANAR_U), src->GetRowSize(PLANAR_U), CV_8U, (void *)src->GetReadPtr(PLANAR_U), src->GetPitch(PLANAR_U)); cv::Mat vImg(src->GetHeight(PLANAR_V), src->GetRowSize(PLANAR_V), CV_8U, (void *)src->GetReadPtr(PLANAR_V), src->GetPitch(PLANAR_V)); if (this->enableScaling) { // process U and V at first (just INTER_CUBIC resize). cv::resize(uImg, uImg, cv::Size(uImg.cols * this->scaleRatioAdjusted, uImg.rows * this->scaleRatioAdjusted), 0, 0, cv::INTER_CUBIC); cv::resize(vImg, vImg, cv::Size(vImg.cols * this->scaleRatioAdjusted, vImg.rows * this->scaleRatioAdjusted), 0, 0, cv::INTER_CUBIC); } auto dst = env->NewVideoFrame(vi); env->BitBlt(dst->GetWritePtr(PLANAR_Y), dst->GetPitch(PLANAR_Y), yImg.data, yImg.step, yImg.cols, yImg.rows); env->BitBlt(dst->GetWritePtr(PLANAR_U), dst->GetPitch(PLANAR_U), uImg.data, uImg.step, uImg.cols, uImg.rows); env->BitBlt(dst->GetWritePtr(PLANAR_V), dst->GetPitch(PLANAR_V), vImg.data, vImg.step, vImg.cols, vImg.rows); OutputDebugStringA("Waifu2x GetFrame Finished."); return dst; }
PVideoFrame __stdcall BuildMM::GetFrame(int n, ise_t* env) { int ft = field; if (mode == 1) { ft = (n & 1) ? 1 - order : order; n /= 2; } const int* tmmlut = tmmlutf[ft]; const int tstart = n + vals[ft].tstart, tstop = n + vals[ft].tstop, bstart = n + vals[ft].bstart, bstop = n + vals[ft].bstop, ocount = vals[ft].ocount, ccount = vals[ft].ccount; auto b = BMMBuffer(ocount, ccount, length, tstop - tstart + 1); int* opitch = b.intVals.data(); int* cpitch = opitch + ocount; int* plut0 = cpitch + ccount; int* plut1 = plut0 + 2 * length - 1; const uint8_t** srcp0 = b.ptrs.data(); const uint8_t** srcp1 = srcp0 + ocount; const uint8_t** srcp2 = srcp1 + ccount; int nf = vi.num_frames - 1; for (int i = tstop; i >= tstart; --i) { if (i < 0 || i >= nfSrc) { b.tops[i - tstart] = black; continue; } b.tops[i - tstart] = child->GetFrame(i, env); } for (int i = bstop; i >= bstart; --i) { if (i < 0 || i >= nfSrc) { b.btms[i - bstart] = black; continue; } b.btms[i - bstart] = btmf->GetFrame(i, env); } PVideoFrame* oclips = ft == 0 ? b.btms : b.tops; PVideoFrame* cclips = ft == 0 ? b.tops : b.btms; auto dst = env->NewVideoFrame(vi); const int offo = (length & 1) ? 0 : 1; const int offc = !offo; for (int p = 0; p < numPlanes; ++p) { const int plane = planes[p]; uint8_t* dstp = dst->GetWritePtr(plane); const int dpitch = dst->GetPitch(plane); const int width = dst->GetRowSize(plane); const int height = dst->GetHeight(plane); for (int i = !ft; i < height; i += 2) { memset(dstp + dpitch * i, 10, width); } dstp += dpitch * ft; for (int i = 0; i < ocount; ++i) { opitch[i] = oclips[i]->GetPitch(plane); srcp0[i] = oclips[i]->GetReadPtr(plane); srcp2[i] = srcp0[i] + opitch[i] * ft; } for (int i = 0; i < ccount; ++i) { srcp1[i] = cclips[i]->GetReadPtr(plane); cpitch[i] = cclips[i]->GetPitch(plane); } const int ct = ccount / 2; for (int y = ft; y < height; y += 2) { for (int x = 0; x < width; ++x) { if (srcp1[ct - 2][x] == 0 && srcp1[ct][x] == 0 && srcp1[ct + 1][x] == 0) { dstp[x] = 60; continue; } for (int i = 0; i < ccount; ++i) { plut0[i * 2 + offc] = plut1[i * 2 + offc] = srcp1[i][x]; } for (int i = 0; i < ocount; ++i) { plut0[i * 2 + offo] = srcp0[i][x]; plut1[i * 2 + offo] = srcp2[i][x]; } int val = 0; for (int i = 0; i < length; ++i) { for (int j = 0; j < length - 4; ++j) { if (plut0[i + j] == 0) goto j1; } val |= (gvlut[i] << 3); j1: for (int j = 0; j < length - 4; ++j) { if (plut1[i + j] == 0) goto j2; } val |= gvlut[i]; j2: if (vlut[val] == 2) break; } dstp[x] = tmmlut[val]; } dstp += dpitch * 2; for (int i = 0; i < ccount; ++i) { srcp1[i] += cpitch[i]; } int y0 = y == 0 ? 0 : 1; int y1 = y == height - 3 ? 0 : 1; for (int i = 0; i < ocount; ++i) { srcp0[i] += opitch[i] * y0; srcp2[i] += opitch[i] * y1; } } } return dst; }
/** \fn saveAsJpg \brief save current image into filename, into jpg format */ bool ADMImage::saveAsJpg(const char *filename) { AVCodecContext *context=NULL; AVFrame frame; bool result=false; AVCodec *codec=NULL; int sz=0,r=0; ADM_byteBuffer byteBuffer; context=avcodec_alloc_context(); if(!context) { printf("[saveAsJpg] Cannot allocate context\n"); return false; } codec=avcodec_find_encoder(CODEC_ID_MJPEG); if(!codec) { printf("[saveAsJpg] Cannot allocate codec\n"); goto jpgCleanup; } context->pix_fmt =PIX_FMT_YUV420P; context->strict_std_compliance = -1; context->time_base.den=1; context->time_base.num=1; context->width=_width; context->height=_height; context->flags |= CODEC_FLAG_QSCALE; r=avcodec_open(context, codec); if(r<0) { printf("[saveAsJpg] Cannot mix codec and context\n"); ADM_dealloc (context); return false; } // Setup our image & stuff.... frame.linesize[0] = GetPitch(PLANAR_Y); frame.linesize[1] = GetPitch(PLANAR_U); frame.linesize[2] = GetPitch(PLANAR_V); frame.data[0] = GetWritePtr(PLANAR_Y); frame.data[2] = GetWritePtr(PLANAR_U); frame.data[1] = GetWritePtr(PLANAR_V); // Grab a temp buffer // Encode! frame.quality = (int) floor (FF_QP2LAMBDA * 2+ 0.5); byteBuffer.setSize(_width*_height*4); if ((sz = avcodec_encode_video (context, byteBuffer.at(0), _width*_height*4, &frame)) < 0) { printf("[jpeg] Error %d encoding video\n",sz); goto jpgCleanup; } // Ok now write our file... { FILE *f=ADM_fopen(filename,"wb"); if(f) { fwrite(byteBuffer.at(0),sz,1,f); fclose(f); result=true; }else { printf("[saveAsJpeg] Cannot open %s for writing!\n",filename); } } // Cleanup jpgCleanup: if(context) { avcodec_close (context); av_free (context); } context=NULL; return result; }
/** \fn saveAsJpg \brief save current image into filename, into jpg format */ bool ADMImage::saveAsJpgInternal(const char *filename) { AVCodecContext *context=NULL; AVFrame *frame=NULL; bool result=false; AVCodec *codec=NULL; int r=0; ADM_byteBuffer byteBuffer; frame=av_frame_alloc(); if(!frame) { printf("[saveAsJpg] Cannot allocate frame\n"); goto jpgCleanup; } codec=avcodec_find_encoder(AV_CODEC_ID_MJPEG); if(!codec) { printf("[saveAsJpg] Cannot allocate codec\n"); goto jpgCleanup; } context=avcodec_alloc_context3(codec); if(!context) { printf("[saveAsJpg] Cannot allocate context\n"); goto jpgCleanup; } context->pix_fmt =AV_PIX_FMT_YUV420P; context->strict_std_compliance = -1; context->time_base.den=1; context->time_base.num=1; context->width=_width; context->height=_height; context->flags |= CODEC_FLAG_QSCALE; r=avcodec_open2(context, codec, NULL); if(r<0) { printf("[saveAsJpg] Cannot mix codec and context\n"); ADM_dealloc (context); return false; } // Setup our image & stuff.... frame->width=_width; frame->height=_height; frame->format=AV_PIX_FMT_YUV420P; frame->linesize[0] = GetPitch(PLANAR_Y); frame->linesize[2] = GetPitch(PLANAR_U); frame->linesize[1] = GetPitch(PLANAR_V); frame->data[0] = GetWritePtr(PLANAR_Y); frame->data[2] = GetWritePtr(PLANAR_U); frame->data[1] = GetWritePtr(PLANAR_V); // Grab a temp buffer // Encode! frame->quality = (int) floor (FF_QP2LAMBDA * 2+ 0.5); byteBuffer.setSize(_width*_height*4); AVPacket pkt; av_init_packet(&pkt); int gotSomething; pkt.size=_width*_height*4; pkt.data=byteBuffer.at(0); r=avcodec_encode_video2(context,&pkt,frame,&gotSomething); if(r || !gotSomething) { ADM_error("[jpeg] Error %d encoding video\n",r); goto jpgCleanup; } // Ok now write our file... { FILE *f=ADM_fopen(filename,"wb"); if(f) { fwrite(byteBuffer.at(0),pkt.size,1,f); fclose(f); result=true; }else { printf("[saveAsJpeg] Cannot open %s for writing!\n",filename); } } // Cleanup jpgCleanup: if(context) { avcodec_close (context); av_free (context); context=NULL; } if(frame) { av_frame_free(&frame); frame=NULL; } return result; }