LRESULT CSkinWndHelper::OnSize(WPARAM wParam, LPARAM lParam) { LRESULT lResult = CallWindowProc(m_oldWndProc,m_hWnd, WM_SIZE,wParam,lParam); UINT nType = wParam; int cx = LOWORD(lParam); int cy = HIWORD(lParam); if (nType != SIZE_MINIMIZED && nType != SIZE_MAXHIDE ) { if (m_Rgn.m_hRgn) { m_Rgn.DeleteObject(); m_Rgn.m_hRgn = NULL; } CRect rc; GetWindowRect(m_hWnd,&rc); //获得窗口矩形 rc -= rc.TopLeft(); m_Rgn.CreateRoundRectRgn(rc.left, rc.top, rc.right+1, rc.bottom+1, 5, 5); //根据窗口矩形创建一个圆角矩形 SetWindowRgn(m_hWnd,m_Rgn, TRUE); //根据圆角矩形指定的区域改变窗口的形状 } CRect rcWnd; GetWindowRect(m_hWnd,&rcWnd); rcWnd.OffsetRect( -rcWnd.left, -rcWnd.top); if (m_bHaveMaxBox||m_bHaveMinBox) { CRect rMin(rcWnd.right - 74, 8, rcWnd.right-54, 30); m_TitleBtn[ID_MIN_BTN].SetRect(rMin); CRect rMax(rcWnd.right - 52, 8, rcWnd.right-32, 30); m_TitleBtn[ID_MAX_BTN].SetRect(rMax); } CRect rClose(rcWnd.right - 30, 8, rcWnd.right - 10, 30); m_TitleBtn[ID_CLOSE_BTN].SetRect(rClose); if (nType == SIZE_MAXIMIZED|| nType == SIZE_RESTORED) { DoNcPaint(); } return lResult; }
/*! \brief Tests the **reduce_min** kernel. * \details The kernel computes the minimum element of each row of an array. */ TEST (Reduce, reduce_min) { try { const unsigned int rows = 1024; const unsigned int cols = 1024; const unsigned int bufferInSize = cols * rows * sizeof (cl_float); const unsigned int bufferOutSize = rows * sizeof (cl_float); // Setup the OpenCL environment clutils::CLEnv clEnv; clEnv.addContext (0); clEnv.addQueue (0, 0, CL_QUEUE_PROFILING_ENABLE); clEnv.addProgram (0, kernel_filename_reduce); // Configure kernel execution parameters clutils::CLEnvInfo<1> info (0, 0, 0, { 0 }, 0); cl_algo::RBC::Reduce<cl_algo::RBC::ReduceConfig::MIN, cl_float> rMin (clEnv, info); rMin.init (cols, rows); // Initialize data (writes on staging buffer directly) std::generate (rMin.hPtrIn, rMin.hPtrIn + bufferInSize / sizeof (cl_float), RBC::rNum_R_0_1); // RBC::printBufferF ("Original:", rMin.hPtrIn, cols, rows, 3); rMin.write (); // Copy data to device rMin.run (); // Execute kernels (~ 45 us) cl_float *results = (cl_float *) rMin.read (); // Copy results to host // RBC::printBufferF ("Received:", results, 1, rows, 3); // Produce reference array of distances cl_float *refMin = new cl_float[rows]; auto func = [](cl_float a, cl_float b) -> bool { return a < b; }; RBC::cpuReduce<cl_float> (rMin.hPtrIn, refMin, cols, rows, func); // RBC::printBufferF ("Expected:", refMin, 1, rows, 3); // Verify blurred output float eps = std::numeric_limits<float>::epsilon (); // 1.19209e-07 for (uint i = 0; i < rows; ++i) ASSERT_LT (std::abs (refMin[i] - results[i]), eps); // Profiling =========================================================== if (profiling) { const int nRepeat = 1; /* Number of times to perform the tests. */ // CPU clutils::CPUTimer<double, std::milli> cTimer; clutils::ProfilingInfo<nRepeat> pCPU ("CPU"); for (int i = 0; i < nRepeat; ++i) { cTimer.start (); RBC::cpuReduce<cl_float> (rMin.hPtrIn, refMin, cols, rows, func); pCPU[i] = cTimer.stop (); } // GPU clutils::GPUTimer<std::milli> gTimer (clEnv.devices[0][0]); clutils::ProfilingInfo<nRepeat> pGPU ("GPU"); for (int i = 0; i < nRepeat; ++i) pGPU[i] = rMin.run (gTimer); // Benchmark pGPU.print (pCPU, "Reduce<MIN>"); } } catch (const cl::Error &error) { std::cerr << error.what () << " (" << clutils::getOpenCLErrorCodeString (error.err ()) << ")" << std::endl; exit (EXIT_FAILURE); } }
void LocalMaxStatUtil::descendingLadderEpochRepeat ( size_t dimension_, // #(distinct values) const Int4 *score_, // values const double *prob_, // probability of corresponding value double *eSumAlpha_, // expectation (sum [alpha]) double *eOneMinusExpSumAlpha_, // expectation [1.0 - exp (sum [alpha])] bool isStrict_, // ? is this a strict descending ladder epoch double lambda_, // lambda for repeats : default is lambda0_ below size_t endW_, // maximum w plus 1 double *pAlphaW_, // probability {alpha = w} : pAlphaW_ [0, wEnd) double *eOneMinusExpSumAlphaW_, // expectation [1.0 - exp (sum [alpha]); alpha = w] : eOneMinusExpSumAlphaW_ [0, wEnd) double lambda0_, // lambda for flattened distribution (avoid recomputation) double mu0_, // mean of flattened distribution (avoid recomputation) double muAssoc0_, // mean of associated flattened distribution (avoid recomputation) double thetaMin0_, // thetaMin of flattened distribution (avoid recomputation) double rMin0_, // rMin of flattened distribution (avoid recomputation) double time_, // get time for the dynamic programming computation bool *terminated_) // ? Was the dynamic programming computation terminated prematurely ? // assumes logarithmic regime { // Start dynamic programming probability calculation using notation in // // Mott R. and Tribe R. (1999) // J. Computational Biology 6(1):91-112 // // Karlin S. and Taylor H.M.(1981) // A Second Course in Stochastic Processes, p. 480 // // Note there is an error in Eq (6.19) there, which is corrected in Eq (6.20) // // This program uses departure into (-Inf, 0] not (-Inf, 0) // avoid recomputation double mu0 = 0.0 == mu0_ ? mu (dimension_, score_, prob_) : mu0_; assert (mu0 < 0.0); double lambda0 = 0.0 == lambda0_ ? lambda (dimension_, score_, prob_) : lambda0_; assert (0.0 < lambda0); if (lambda_ == 0.0) lambda_ = lambda0; assert (0.0 < lambda_); double muAssoc0 = 0.0 == muAssoc0_ ? muAssoc (dimension_, score_, prob_, lambda0) : muAssoc0_; assert (0.0 < muAssoc0); double thetaMin0 = 0.0 == thetaMin0_ ? thetaMin (dimension_, score_, prob_, lambda0) : thetaMin0_; assert (0.0 < thetaMin0); double rMin0 = 0.0 == rMin0_ ? rMin (dimension_, score_, prob_, lambda0, thetaMin0) : rMin0_; assert (0.0 < rMin0 && rMin0 < 1.0); const Int4 ITER_MIN = static_cast <Int4> ((log (REL_TOL * (1.0 - rMin0)) / log (rMin0))); assert (0 < ITER_MIN); const Int4 ITER = static_cast <Int4> (endW_) < ITER_MIN ? ITER_MIN : static_cast <Int4> (endW_); assert (0 < ITER); const Int4 Y_MAX = static_cast <Int4> (-log (REL_TOL) / lambda0); Int4 entry = isStrict_ ? -1 : 0; n_setParameters (dimension_, score_, prob_, entry); double time0 = 0.0; double time1 = 0.0; if (time_ > 0.0) Sls::alp_data::get_current_time (time0); DynProgProbLim dynProgProb (n_step, dimension_, prob_, score_ [0] - 1, Y_MAX); if (pAlphaW_) pAlphaW_ [0] = 0.0; if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [0] = 0.0; dynProgProb.update (); // iterate random walk Int4 value = 0; if (eSumAlpha_) *eSumAlpha_ = 0.0; if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ = 0.0; for (size_t w = 1; w < static_cast <size_t> (ITER); w++) { if (w < endW_) { // sum pAlphaW_ [w] and eOneMinusExpSumAlphaW_ [w] if (pAlphaW_) pAlphaW_ [w] = 0.0; if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] = 0.0; for (value = score_ [0]; value <= entry; value++) { if (pAlphaW_) pAlphaW_ [w] += dynProgProb.getProb (value); if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] += dynProgProb.getProb (value) * (1.0 - exp (lambda_ * static_cast <double> (value))); } } for (value = score_ [0]; value <= entry; value++) { if (eSumAlpha_) *eSumAlpha_ += dynProgProb.getProb (value) * static_cast <double> (value); if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.getProb (value) * (1.0 - exp (lambda_ * static_cast <double> (value))); } dynProgProb.setValueFct (n_bury); dynProgProb.update (); // put probability into the morgue dynProgProb.setValueFct (n_step); dynProgProb.update (); // iterate random walk if (time_ > 0.0) { Sls::alp_data::get_current_time (time1); if (time1 - time0 > time_) { *terminated_ = true; return; } } } for (value = score_ [0]; value <= entry; value++) { if (eSumAlpha_) *eSumAlpha_ += dynProgProb.getProb (value) * static_cast <double> (value); if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.getProb (value) * (1.0 - exp (lambda_ * static_cast <double> (value))); } // check that not too much probability has been omitted double prob = 0.0; for (value = entry + 1; value < dynProgProb.getValueUpper (); value++) { prob += dynProgProb.getProb (value); } prob += dynProgProb.getProbLost (); const double FUDGE = 2.0; assert (prob <= FUDGE * static_cast <double> (dimension_) * REL_TOL); }