int cc_sweep(char *buffer, int bufsize, struct cc **tokens, int length) { struct cc *p; char *cp; int i; short *hc; short *places = cc_places[length]; struct cc **pp = tokens; short threshold = thresh(length); #ifndef cc_weight short wthreshold = wthresh(length); short limit = wlimit(length); #endif int time0; short pc = tt.tt_padc; i = length - 1; bufsize -= i; cp = buffer + i; hc = cc_hashcodes; time0 = cc_time0; for (i = 0; i < bufsize; i++, time0++) { struct cc **h; { short *hc1 = hc; short c = *cp++; short hh; if ((hh = *hc1) < 0 || c == pc) { *hc1++ = -1; hc = hc1; continue; } h = cc_htab + (*hc1++ = hash(hh, c)); hc = hc1; } for (p = *h; p != 0; p = p->hforw) if (p->length == (char) length) { char *p1 = p->string; char *p2 = cp - length; int n = length; do if (*p1++ != *p2++) goto fail; while (--n); break; fail: ; } if (p == 0) { p = cc_q1a.qback; if (p == &cc_q1a || (p->time >= cc_time0 && p->length == (char) length)) continue; if (p->hback != 0) if ((*p->hback = p->hforw) != 0) p->hforw->hback = p->hback; { char *p1 = p->string; char *p2 = cp - length; int n = length; do *p1++ = *p2++; while (--n); } p->length = length; #ifndef cc_weight p->weight = cc_weight; #endif p->time = time0; p->bcount = 1; p->ccount = 0; p->flag = 0; if ((p->hforw = *h) != 0) p->hforw->hback = &p->hforw; *h = p; p->hback = h; qinsert(p, &cc_q1a); places[i] = -1; p->places = i; #ifdef STATS ntoken_stat++; #endif } else if (p->time < cc_time0) { #ifndef cc_weight if ((p->weight += p->time - time0) < 0) p->weight = cc_weight; else if ((p->weight += cc_weight) > limit) p->weight = limit; #endif p->time = time0; p->bcount = 1; p->ccount = 0; if (p->code >= 0) { p->flag = 1; *pp++ = p; } else #ifndef cc_weight if (p->weight >= wthreshold) { p->flag = 1; *pp++ = p; qinsert(p, &cc_q1b); } else #endif { p->flag = 0; qinsert(p, &cc_q1a); } places[i] = -1; p->places = i; #ifdef STATS ntoken_stat++; #endif } else if (p->time + length > time0) { /* * overlapping token, don't count as two and * don't update time0, but do adjust weight to offset * the difference */ #ifndef cc_weight if (cc_weight != 0) { /* XXX */ p->weight += time0 - p->time; if (!p->flag && p->weight >= wthreshold) { p->flag = 1; *pp++ = p; qinsert(p, &cc_q1b); } } #endif places[i] = p->places; p->places = i; } else { #ifndef cc_weight if ((p->weight += p->time - time0) < 0) p->weight = cc_weight; else if ((p->weight += cc_weight) > limit) p->weight = limit; #endif p->time = time0; p->bcount++; if (!p->flag && /* code must be < 0 if flag false here */ (p->bcount >= threshold #ifndef cc_weight || p->weight >= wthreshold #endif )) { p->flag = 1; *pp++ = p; qinsert(p, &cc_q1b); } places[i] = p->places; p->places = i; } } if ((i = pp - tokens) > 0) { *pp = 0; if (cc_reverse) cc_sweep_reverse(tokens, places); if (cc_sort && i > 1) { qsort((char *) tokens, i, sizeof *tokens, cc_token_compare); } if (cc_chop) { if ((i = i * cc_chop / 100) == 0) i = 1; tokens[i] = 0; } i++; } return i; }
std::vector<std::vector<std::vector<cv::Point>>> MultiContourObjectDetector::findApproxContours( cv::Mat image, bool performOpening, bool findBaseShape) { // CREATE ACTIVE ZONE 80% AND 50% --------------------- Point centre(image.size().width / 2, image.size().height / 2); int deleteHeight = image.size().height * _deleteFocus; int deleteWidth = image.size().width * _deleteFocus; int deleteX = centre.x - deleteWidth / 2; int deleteY = centre.y - deleteHeight / 2; int attenuationHeight = image.size().height * _attenuationFocus; int attenuationWidth = image.size().width * _attenuationFocus; int attenuationX = centre.x - attenuationWidth / 2; int attenuationY = centre.y - attenuationHeight / 2; Rect erase(deleteX, deleteY, deleteWidth, deleteHeight); _deleteRect = erase; Rect ease(attenuationX, attenuationY, attenuationWidth, attenuationHeight); _attenuationRect = ease; // ---------------------------------------- bool imageTooBig = false; Mat newImage; if (image.size().height <= 400 || image.size().width <= 400) { Mat pickColor = image(Rect((image.size().width / 2) - 1, image.size().height - 2, 2, 2)); Scalar color = mean(pickColor); int increment = 2; newImage = Mat(Size(image.size().width + increment, image.size().height + increment), image.type()); newImage = color; Point nc(newImage.size().width / 2, newImage.size().height / 2); int incH = image.size().height; int incW = image.size().width; int incX = nc.x - incW / 2; int incY = nc.y - incH / 2; image.copyTo(newImage(Rect(incX, incY, incW, incH))); } else { imageTooBig = true; newImage = image; } Size imgSize = newImage.size(); Mat gray(imgSize, CV_8UC1); Mat thresh(imgSize, CV_8UC1); if (newImage.channels() >= 3) cvtColor(newImage, gray, CV_BGR2GRAY); else newImage.copyTo(gray); int minThreshold; if (performOpening) { // PERFORM OPENING (Erosion --> Dilation) int erosion_size = 3; int dilation_size = 3; if (imageTooBig) { erosion_size = 5; dilation_size = 5; } Mat element = getStructuringElement(0, Size(2 * erosion_size, 2 * erosion_size), Point(erosion_size, erosion_size)); erode(gray, gray, element); dilate(gray, gray, element); minThreshold = mean(gray)[0]; if (minThreshold < 90) minThreshold = 60; else if (minThreshold >= 90 && minThreshold < 125) minThreshold = 100; } threshold(gray, thresh, minThreshold, 255, THRESH_BINARY); #ifdef DEBUG_MODE imshow("Threshold", thresh); #endif vector<vector<Point>> contours; vector<Vec4i> hierarchy; vector<Point> hull, approx; map<int, vector<vector<Point>>> hierachedContours; map<int, vector<vector<Point>>> approxHContours; findContours(thresh, contours, hierarchy, CV_RETR_TREE, CHAIN_APPROX_NONE); #ifdef DEBUG_MODE Mat tempI(image.size(), CV_8UC1); tempI = Scalar(0); drawContours(tempI, contours, -1, cv::Scalar(255), 1, CV_AA); imshow("Contours", tempI); #endif vector<vector<Point>> temp; // CATALOG BY HIERARCHY LOOP for (int i = 0; i < contours.size(); i++) { #ifdef DEBUG_MODE tempI = Scalar(0); temp.clear(); temp.push_back(contours[i]); drawContours(tempI, temp, -1, cv::Scalar(255), 1, CV_AA); #endif int parent = hierarchy[i][3]; if (parent == -1) { if (hierachedContours.count(i) == 0) { // me not found hierachedContours.insert(pair<int, vector<vector<Point>>>(i, vector<vector<Point>>())); hierachedContours[i].push_back(contours[i]); } else { // me found continue; } } else { if (hierachedContours.count(parent) == 0) { // dad not found hierachedContours.insert(pair<int, vector<vector<Point>>>(parent, vector<vector<Point>>())); hierachedContours[parent].push_back(contours[parent]); } hierachedContours[parent].push_back(contours[i]); } } int minPoint, maxPoint; minPoint = _minContourPoints - _minContourPoints / 2.1; maxPoint = _minContourPoints + _minContourPoints / 1.5; // APPROX LOOP for (map<int, vector<vector<Point>>>::iterator it = hierachedContours.begin(); it != hierachedContours.end(); it++) { if (it->second[0].size() < 400) continue; #ifdef DEBUG_MODE tempI = Scalar(0); drawContours(tempI, it->second, -1, cv::Scalar(255), 1, CV_AA); #endif if (it == hierachedContours.begin() && it->second.size() < _aspectedContours) continue; for (int k = 0; k < it->second.size(); k++) { if (it->second[k].size() < _minContourPoints) { if (k == 0) // padre break; else // figlio continue; } convexHull(it->second[k], hull, false); double epsilon = it->second[k].size() * 0.003; approxPolyDP(it->second[k], approx, epsilon, true); #ifdef DEBUG_MODE tempI = Scalar(0); vector<vector<Point>> temp; temp.push_back(approx); drawContours(tempI, temp, -1, cv::Scalar(255), 1, CV_AA); #endif // REMOVE TOO EXTERNAL SHAPES ------------- if (imageTooBig) { Rect bounding = boundingRect(it->second[k]); #ifdef DEBUG_MODE rectangle(tempI, _deleteRect, Scalar(255)); rectangle(tempI, bounding, Scalar(255)); #endif bool isInternal = bounding.x > _deleteRect.x && bounding.y > _deleteRect.y && bounding.x + bounding.width < _deleteRect.x + _deleteRect.width && bounding.y + bounding.height < _deleteRect.y + _deleteRect.height; if (!isInternal) { if (k == 0) break; } } // -------------------------------------------------- if (!findBaseShape) { if (hull.size() < minPoint || hull.size() > maxPoint) { if (k == 0) // padre break; else // figlio continue; } } if (k == 0) { approxHContours.insert(pair<int, vector<vector<Point>>>(it->first, vector<vector<Point>>())); approxHContours.at(it->first).push_back(approx); } else { approxHContours[it->first].push_back(approx); } } } int maxSize = 0, maxID = 0; vector<vector<vector<Point>>> lookupVector; for (map<int, vector<vector<Point>>>::iterator it = approxHContours.begin(); it != approxHContours.end(); it++) { if (it->second.size() <= 1) continue; if (findBaseShape) { int totSize = 0; for (int k = 0; k < it->second.size(); k++) { totSize += it->second[k].size(); } if (totSize > maxSize) { maxSize = totSize; maxID = it->first; } } else { lookupVector.push_back(it->second); } } if (findBaseShape) { lookupVector.push_back(approxHContours.at(maxID)); } return lookupVector; }
int main( int argc, /* arg count */ char * argv[] /* arg vector */ ){ static char * context = "main(chain)"; char * stem = NULL; /* dump filename stem */ char * suffix = NULL; /* dump filename suffix */ char * suff2 = NULL; /* last half of suffix */ int nr, nc; /* integer matrix sizes */ int n; /* square matrix/vector size */ real base_x, base_y; /* base of Mandelbrot */ real ext_x, ext_y; /* extent of Mandelbrot */ int limit, seed; /* randmat controls */ real fraction; /* invperc/thresh filling */ int itersLife; /* life iterations */ int itersElastic, relax; /* elastic controls */ int2D i2D; /* integer matrix */ bool2D b2D; /* boolean matrix */ pt1D cities; /* cities point vector */ int n_cities; /* number of cities */ pt1D net; /* net point vector */ int n_net; /* number of net points */ real2D r2D_gauss; /* real matrix for Gaussian */ real2D r2D_sor; /* real matrix for SOR */ real1D r1D_gauss_v; /* real vector input for Gaussian */ real1D r1D_sor_v; /* real vector input for SOR */ real1D r1D_gauss_a; /* real vector answer for Gaussian */ real1D r1D_sor_a; /* real vector answer for SOR */ real1D r1D_gauss_c; /* real vector check for Gaussian */ real1D r1D_sor_c; /* real vector check for SOR */ real tol; /* SOR tolerance */ real realDiff; /* vector difference */ bool choicesSet = FALSE; /* path choices set? */ bool doMandel = TRUE; /* mandel vs. randmat */ bool doInvperc = TRUE; /* invperc vs. thresholding */ bool doDump = FALSE; /* dump intermediate results? */ int argd = 1; /* argument index */ /* arguments */ #if NUMA MAIN_INITENV(,32000000) #endif while (argd < argc){ CHECK(argv[argd][0] == '-', fail(context, "bad argument", "index", "%d", argd, NULL)); switch(argv[argd][1]){ case 'E' : /* elastic */ itersElastic = arg_int(context, argc, argv, argd+1, argv[argd]); relax = arg_int(context, argc, argv, argd+2, argv[argd]); argd += 3; break; case 'F' : /* fraction (invperc/thresh) */ fraction = arg_real(context, argc, argv, argd+1, argv[argd]); argd += 2; break; case 'L' : /* life */ itersLife = arg_int(context, argc, argv, argd+1, argv[argd]); argd += 2; break; case 'M' : /* mandel */ base_x = arg_real(context, argc, argv, argd+1, argv[argd]); base_y = arg_real(context, argc, argv, argd+2, argv[argd]); ext_x = arg_real(context, argc, argv, argd+3, argv[argd]); ext_y = arg_real(context, argc, argv, argd+4, argv[argd]); argd += 5; break; case 'N' : /* winnow */ n_cities = arg_int(context, argc, argv, argd+1, argv[argd]); argd += 2; break; case 'R' : /* randmat */ limit = arg_int(context, argc, argv, argd+1, argv[argd]); seed = arg_int(context, argc, argv, argd+2, argv[argd]); argd += 3; break; case 'S' : /* matrix size */ nr = arg_int(context, argc, argv, argd+1, argv[argd]); nc = arg_int(context, argc, argv, argd+2, argv[argd]); argd += 3; break; case 'T' : /* SOR tolerance */ tol = arg_real(context, argc, argv, argd+1, argv[argd]); argd += 2; break; case 'c' : /* choice */ CHECK(!choicesSet, fail(context, "choices already set", NULL)); suffix = arg_str(context, argc, argv, argd+1, argv[argd]); argd += 2; switch(suffix[0]){ case 'i' : doInvperc = TRUE; break; case 't' : doInvperc = FALSE; break; default : fail(context, "unknown choice(s)", "choice", "%s", suffix, NULL); } switch(suffix[1]){ case 'm' : doMandel = TRUE; break; case 'r' : doMandel = FALSE; break; default : fail(context, "unknown choice(s)", "choice", "%s", suffix, NULL); } suff2 = suffix+1; choicesSet = TRUE; break; case 'd' : /* dump */ doDump = TRUE; argd += 1; if ((argd < argc) && (argv[argd][0] != '-')){ stem = arg_str(context, argc, argv, argd, argv[argd-1]); argd += 1; } break; #if GRAPHICS case 'g' : gfx_open(app_chain, arg_gfxCtrl(context, argc, argv, argd+1, argv[argd])); argd += 2; break; #endif #if MIMD case 'p' : DataDist = arg_dataDist(context, argc, argv, argd+1, argv[argd]); ParWidth = arg_int(context, argc, argv, argd+2, argv[argd]); argd += 3; break; #endif case 'u' : io_init(FALSE); argd += 1; break; default : fail(context, "unknown flag", "flag", "%s", argv[argd], NULL); break; } } CHECK(choicesSet, fail("context", "choices not set using -c flag", NULL)); /* initialize */ #if MIMD sch_init(DataDist); #endif /* mandel vs. randmat */ if (doMandel){ mandel(i2D, nr, nc, base_x, base_y, ext_x, ext_y); if (doDump) io_wrInt2D(context, mkfname(stem, NULL, suff2, "i2"), i2D, nr, nc); } else { randmat(i2D, nr, nc, limit, seed); if (doDump) io_wrInt2D(context, mkfname(stem, NULL, suff2, "i2"), i2D, nr, nc); } /* half */ half(i2D, nr, nc); if (doDump) io_wrInt2D(context, mkfname(stem, "h", suff2, "i2"), i2D, nr, nc); /* invperc vs. thresh */ if (doInvperc){ invperc(i2D, b2D, nr, nc, fraction); if (doDump) io_wrBool2D(context, mkfname(stem, NULL, suffix, "b2"), b2D, nr, nc); } else { thresh(i2D, b2D, nr, nc, fraction); if (doDump) io_wrBool2D(context, mkfname(stem, NULL, suffix, "b2"), b2D, nr, nc); } /* life */ life(b2D, nr, nc, itersLife); if (doDump) io_wrBool2D(context, mkfname(stem, "l", suffix, "b2"), b2D, nr, nc); /* winnow */ winnow(i2D, b2D, nr, nc, cities, n_cities); if (doDump) io_wrPt1D(context, mkfname(stem, "w", suffix, "p1"), cities, n_cities); /* norm */ norm(cities, n_cities); if (doDump) io_wrPt1D(context, mkfname(stem, "n", suffix, "p1"), cities, n_cities); /* elastic */ n_net = (int)(ELASTIC_RATIO * n_cities); CHECK(n_net <= MAXEXT, fail(context, "too many net points required", "number of net points", "%d", n_net, NULL)); elastic(cities, n_cities, net, n_net, itersElastic, relax); if (doDump) io_wrPt1D(context, mkfname(stem, "e", suffix, "p1"), net, n_net); /* outer */ n = n_net; outer(net, r2D_gauss, r1D_gauss_v, n); if (doDump){ io_wrReal2D(context, mkfname(stem, "o", suffix, "r2"), r2D_gauss, n, n); io_wrReal1D(context, mkfname(stem, "o", suffix, "r1"), r1D_gauss_v, n); } cpReal2D(r2D_gauss, r2D_sor, n, n); cpReal1D(r1D_gauss_v, r1D_sor_v, n); /* gauss */ gauss(r2D_gauss, r1D_gauss_v, r1D_gauss_a, n); if (doDump) io_wrReal1D(context, mkfname(stem, "g", suffix, "r1"), r1D_gauss_a, n); /* product (gauss) */ product(r2D_gauss, r1D_gauss_a, r1D_gauss_c, n, n); if (doDump) io_wrReal1D(context, mkfname(stem, "pg", suffix, "r1"), r1D_gauss_c, n); /* sor */ sor(r2D_sor, r1D_sor_v, r1D_sor_a, n, tol); if (doDump) io_wrReal1D(context, mkfname(stem, "s", suffix, "r1"), r1D_gauss_a, n); /* product (sor) */ product(r2D_sor, r1D_sor_a, r1D_sor_c, n, n); if (doDump) io_wrReal1D(context, mkfname(stem, "ps", suffix, "r1"), r1D_gauss_c, n); /* difference */ vecdiff(r1D_gauss_a, r1D_sor_a, n, &realDiff); if (doDump) io_wrReal0D(context, mkfname(stem, "v", suffix, "r0"), realDiff); #if IEEE ieee_retrospective(stderr); #endif #if NUMA MAIN_END; #endif return 0; }
/** * Iterative Soft Thresholding * * @param maxiter maximum number of iterations * @param epsilon stop criterion * @param tau (step size) weighting on the residual term, A^H (b - Ax) * @param lambda_start initial regularization weighting * @param lambda_end final regularization weighting (for continuation) * @param N size of input, x * @param data structure, e.g. sense_data * @param vops vector ops definition * @param op linear operator, e.g. A * @param thresh threshold function, e.g. complex soft threshold * @param x initial estimate * @param b observations */ void ist(unsigned int maxiter, float epsilon, float tau, float continuation, bool hogwild, long N, void* data, const struct vec_iter_s* vops, void (*op)(void* data, float* dst, const float* src), void (*thresh)(void* data, float lambda, float* dst, const float* src), void* tdata, float* x, const float* b, const float* x_truth, void* obj_eval_data, float (*obj_eval)(const void*, const float*)) { struct iter_data itrdata = { .rsnew = 1., .rsnot = 1., .iter = 0, .maxiter = maxiter, }; float* r = vops->allocate(N); float* x_err = NULL; if (NULL != x_truth) x_err = vops->allocate(N); itrdata.rsnot = vops->norm(N, b); float ls_old = 1.; float lambda_scale = 1.; int hogwild_k = 0; int hogwild_K = 10; for (itrdata.iter = 0; itrdata.iter < maxiter; itrdata.iter++) { if (NULL != x_truth) { vops->sub(N, x_err, x, x_truth); debug_printf(DP_DEBUG3, "relMSE = %f\n", vops->norm(N, x_err) / vops->norm(N, x_truth)); } if (NULL != obj_eval) { float objval = obj_eval(obj_eval_data, x); debug_printf(DP_DEBUG3, "#%d OBJVAL= %f\n", itrdata.iter, objval); } ls_old = lambda_scale; lambda_scale = ist_continuation(&itrdata, continuation); if (lambda_scale != ls_old) debug_printf(DP_DEBUG3, "##lambda_scale = %f\n", lambda_scale); thresh(tdata, tau, x, x); op(data, r, x); // r = A x vops->xpay(N, -1., r, b); // r = b - r = b - A x itrdata.rsnew = vops->norm(N, r); debug_printf(DP_DEBUG3, "#It %03d: %f \n", itrdata.iter, itrdata.rsnew / itrdata.rsnot); if (itrdata.rsnew < epsilon) break; vops->axpy(N, x, tau * lambda_scale, r); if (hogwild) hogwild_k++; if (hogwild_k == hogwild_K) { hogwild_K *= 2; hogwild_k = 0; tau /= 2; } } debug_printf(DP_DEBUG3, "\n"); if (NULL != x_truth) vops->del(x_err); vops->del(r); } /** * Iterative Soft Thresholding/FISTA to solve min || b - Ax ||_2 + lambda || T x ||_1 * * @param maxiter maximum number of iterations * @param epsilon stop criterion * @param tau (step size) weighting on the residual term, A^H (b - Ax) * @param lambda_start initial regularization weighting * @param lambda_end final regularization weighting (for continuation) * @param N size of input, x * @param data structure, e.g. sense_data * @param vops vector ops definition * @param op linear operator, e.g. A * @param thresh threshold function, e.g. complex soft threshold * @param x initial estimate * @param b observations */ void fista(unsigned int maxiter, float epsilon, float tau, float continuation, bool hogwild, long N, void* data, const struct vec_iter_s* vops, void (*op)(void* data, float* dst, const float* src), void (*thresh)(void* data, float lambda, float* dst, const float* src), void* tdata, float* x, const float* b, const float* x_truth, void* obj_eval_data, float (*obj_eval)(const void*, const float*)) { struct iter_data itrdata = { .rsnew = 1., .rsnot = 1., .iter = 0, .maxiter = maxiter, }; float* r = vops->allocate(N); float* o = vops->allocate(N); float* x_err = NULL; if (NULL != x_truth) x_err = vops->allocate(N); float ra = 1.; vops->copy(N, o, x); itrdata.rsnot = vops->norm(N, b); float ls_old = 1.; float lambda_scale = 1.; int hogwild_k = 0; int hogwild_K = 10; for (itrdata.iter = 0; itrdata.iter < maxiter; itrdata.iter++) { if (NULL != x_truth) { vops->sub(N, x_err, x, x_truth); debug_printf(DP_DEBUG3, "relMSE = %f\n", vops->norm(N, x_err) / vops->norm(N, x_truth)); } if (NULL != obj_eval) { float objval = obj_eval(obj_eval_data, x); debug_printf(DP_DEBUG3, "#%d OBJVAL= %f\n", itrdata.iter, objval); } ls_old = lambda_scale; lambda_scale = ist_continuation(&itrdata, continuation); if (lambda_scale != ls_old) debug_printf(DP_DEBUG3, "##lambda_scale = %f\n", lambda_scale); thresh(tdata, lambda_scale * tau, x, x); ravine(vops, N, &ra, x, o); // FISTA op(data, r, x); // r = A x vops->xpay(N, -1., r, b); // r = b - r = b - A x itrdata.rsnew = vops->norm(N, r); debug_printf(DP_DEBUG3, "#It %03d: %f \n", itrdata.iter, itrdata.rsnew / itrdata.rsnot); if (itrdata.rsnew < epsilon) break; vops->axpy(N, x, tau, r); if (hogwild) hogwild_k++; if (hogwild_k == hogwild_K) { hogwild_K *= 2; hogwild_k = 0; tau /= 2; } } debug_printf(DP_DEBUG3, "\n"); vops->del(o); vops->del(r); if (NULL != x_truth) vops->del(x_err); } /** * Landweber L. An iteration formula for Fredholm integral equations of the * first kind. Amer. J. Math. 1951; 73, 615-624. */ void landweber(unsigned int maxiter, float epsilon, float alpha, long N, long M, void* data, const struct vec_iter_s* vops, void (*op)(void* data, float* dst, const float* src), void (*adj)(void* data, float* dst, const float* src), float* x, const float* b, float (*obj_eval)(const void*, const float*)) { float* r = vops->allocate(M); float* p = vops->allocate(N); double rsnot = vops->norm(M, b); UNUSED(obj_eval); for (unsigned int i = 0; i < maxiter; i++) { op(data, r, x); // r = A x vops->xpay(M, -1., r, b); // r = b - r = b - A x double rsnew = vops->norm(M, r); debug_printf(DP_DEBUG3, "#%d: %f\n", i, rsnew / rsnot); if (rsnew < epsilon) break; adj(data, p, r); vops->axpy(N, x, alpha, p); } vops->del(r); vops->del(p); } /** * Conjugate Gradient Descent to solve Ax = b for symmetric A * * @param maxiter maximum number of iterations * @param regularization parameter * @param epsilon stop criterion * @param N size of input, x * @param data structure, e.g. sense_data * @param vops vector ops definition * @param linop linear operator, i.e. A * @param x initial estimate * @param b observations */ float conjgrad(unsigned int maxiter, float l2lambda, float epsilon, long N, void* data, const struct vec_iter_s* vops, void (*linop)(void* data, float* dst, const float* src), float* x, const float* b, const float* x_truth, void* obj_eval_data, float (*obj_eval)(const void*, const float*)) { float* r = vops->allocate(N); float* p = vops->allocate(N); float* Ap = vops->allocate(N); float* x_err = NULL; if (NULL != x_truth) x_err = vops->allocate(N); // The first calculation of the residual might not // be necessary in some cases... linop(data, r, x); // r = A x vops->axpy(N, r, l2lambda, x); vops->xpay(N, -1., r, b); // r = b - r = b - A x vops->copy(N, p, r); // p = r float rsnot = (float)pow(vops->norm(N, r), 2.); float rsold = rsnot; float rsnew = rsnot; float eps_squared = pow(epsilon, 2.); if (0. == rsold) { debug_printf(DP_DEBUG3, "CG: early out\n"); return 0.; } for (unsigned int i = 0; i < maxiter; i++) { if (NULL != x_truth) { vops->sub(N, x_err, x, x_truth); debug_printf(DP_DEBUG3, "relMSE = %f\n", vops->norm(N, x_err) / vops->norm(N, x_truth)); } if ((NULL != obj_eval) && (NULL != obj_eval_data)) { float objval = obj_eval(obj_eval_data, x); debug_printf(DP_DEBUG3, "#CG%d OBJVAL= %f\n", i, objval); } debug_printf(DP_DEBUG3, "#%d: %f\n", i, (double)sqrtf(rsnew)); linop(data, Ap, p); // Ap = A p vops->axpy(N, Ap, l2lambda, p); float pAp = (float)vops->dot(N, p, Ap); if (0. == pAp) break; float alpha = rsold / pAp; vops->axpy(N, x, +alpha, p); vops->axpy(N, r, -alpha, Ap); rsnew = (float)pow(vops->norm(N, r), 2.); float beta = rsnew / rsold; rsold = rsnew; if (rsnew <= eps_squared) { //debug_printf(DP_DEBUG3, "%d ", i); break; } vops->xpay(N, beta, p, r); // p = beta * p + r } vops->del(Ap); vops->del(p); vops->del(r); if (NULL != x_truth) vops->del(x_err); return sqrtf(rsnew); }
int main(int argc, char **argv){ if(argc != 9){ printf("wrong # args\n"); printf("inputfile outDir(./lol/) invert (0 or 1) k*1000 windowRadius gaussPyramidThresh(>= survive) finalMixThresh(>= survive) writeDebugImages(0 or 1)\n"); } int width, height, channels; unsigned char *data; char *dir = argv[2]; int inv = atoi(argv[3]); float k = float(atoi(argv[4]))/1000.f; int window = atoi(argv[5]); int gaussPyramidThresh = atoi(argv[6]); int finalMixThresh = atoi(argv[7]); bool debugImages = atoi(argv[8]); bool res = loadImage(argv[1], &width, &height, &channels, &data); if(!res){ printf("error reading image\n"); return 1; } printf("start\n"); // to grayscale unsigned char *dataGray = new unsigned char[width*height]; toGrayscale(width,height,data,dataGray); // build SAT unsigned int *sat = new unsigned int[width*height]; float * satSquared = new float[width*height]; SAT(dataGray,sat,width,height); SATSquared(dataGray,satSquared,width,height); // do thresh thresh(dataGray, sat, satSquared, width, height, k, window); if(inv){invert(dataGray,width,height);} char filename[200]; sprintf(filename,"%sresRaw.bmp",dir); if(debugImages){saveImage(filename, width, height, 1, dataGray);} unsigned char ** pyramid=NULL; makePyramid(dataGray, &pyramid, pyramidLevels, gaussPyramidThresh, width,height); for(int L=0; L<pyramidLevels; L++){ char filename[200]; sprintf(filename,"%sres_raw_%d.bmp",dir,L); if(debugImages){saveImage(filename,width/pow(2,L),height/pow(2,L),1,pyramid[L]);} } // label int vecSizeY=32; int vecSizeX=128; unsigned char *dist=new unsigned char[width*height]; unsigned short *labels = new unsigned short[width*height]; unsigned short area[maxLabels]; unsigned char *reason = new unsigned char[width*height]; unsigned char *tile = new unsigned char[width*height]; for(int l = pyramidLevels-1; l>=0; l--){ int lw = width/pow(2,l); int lh = height/pow(2,l); // write out debug data char filename[200]; sprintf(filename,"%sres_%dp2.bmp",dir,l); if(debugImages){saveImage(filename, lw,lh, 1, pyramid[l]);} } for(int L = pyramidLevels-1; L>=0; L--){ int lw = width/pow(2,L); int lh = height/pow(2,L); // clear out labels so that we can do progressive cleanup passes for(int i=0; i<lw*lh; i++){reason[i]=0;labels[i]=0;} unsigned short firstId = 1; int tileId = 0; int minArea = 6; if(L<2){minArea = 30;} int nTilesX = ceil((float)lw/(float)vecSizeX); int nTilesY = ceil((float)lh/(float)vecSizeY); int lastFixup = 0; for(int by=0; by<nTilesY; by++){ int endY = (by+1)*vecSizeY; if(endY>lh){endY=lh;} bool fixupRanThisRow = false; for(int bx=0; bx<nTilesX; bx++){ int endX = (bx+1)*vecSizeX; if(endX>lw){endX=lw;} label(pyramid[L],labels,reason,area,lw,lh,minArea,vecSizeX*bx,endX,vecSizeY*by,endY,maxLabels); unsigned short lastId=0; if(!condenseLabels(labels,area,firstId,&lastId,lw,lh,vecSizeX*bx,endX,vecSizeY*by,endY,maxLabels)){ printf("Error: ran out of labels!\n"); goto writeout; // an exception occured } firstId=lastId+1; //printf("ML %d\n",(int)firstId); // for debugging for(int x=vecSizeX*bx; x<endX; x++){ for(int y=vecSizeY*by; y<endY; y++){ tile[x+y*lw]=tileId; } } tileId++; if(firstId > (maxLabels*4)/5 && !fixupRanThisRow){ labelProp(labels,area,lw,lh,0,lw,0,endY,maxLabels); filter(pyramid,L,reason,labels,minArea,lw,lh,width,0,lw,lastFixup,endY,maxLabels); computeArea(labels,area,lw,lh,0,lw,0,endY,maxLabels); condenseLabels(labels,area,1,&firstId,lw,lh,0,lw,0,endY,maxLabels); firstId++; printf("fixup TL %d\n",firstId); lastFixup = (by+1)*vecSizeY; fixupRanThisRow=true; } } } // fix labels across region boundries labelProp(labels,area,lw,lh,0,lw,0,lh,maxLabels); computeArea(labels,area,lw,lh,0,lw,0,lh,maxLabels); condenseLabels(labels,area,1,&firstId,lw,lh,0,lw,0,lh,maxLabels); //printf("TL %d\n",firstId); distanceTransform(pyramid[L],dist,0,lw,lh); filter(pyramid,L,reason,labels,minArea,lw,lh,width,0,lw,0,lh,maxLabels); // now what's left "must" be text, so delete it from other pyrmid levels and save it writeout: // write out debug data char filename[200]; if(debugImages){ sprintf(filename,"%sL_%d.bmp",dir,L); saveImage(filename, lw,lh, labels); sprintf(filename,"%sD_%d.bmp",dir,L); saveImage(filename, lw,lh, 1, dist); sprintf(filename,"%sres_%d.bmp",dir,L); saveImage(filename, lw,lh, 1, pyramid[L]); sprintf(filename,"%sR_%d.bmp",dir,L); saveImage(filename, lw,lh, 1, reason); sprintf(filename,"%sT_%d.bmp",dir,L); saveImage(filename, lw,lh, 1, tile); } if(L==pyramidLevels-1){ for(int l = 2; l>=0; l--){ int lw = width/pow(2,l); int lh = height/pow(2,l); // write out debug data char filename[200]; sprintf(filename,"%sres_%dp.bmp",dir,l); if(debugImages){saveImage(filename, lw,lh, 1, pyramid[l]);} } } } for(int y=0; y<height; y++){ for(int x=0; x<width; x++){ int count=0; for(int l=0; l<pyramidLevels; l++){ int lx = x/pow(2,l); int ly = y/pow(2,l); int lw = width/pow(2,l); if(pyramid[l][lx+ly*lw]){count++;} } if(count<finalMixThresh){ dataGray[x+y*width]=0; } } } sprintf(filename,"%sres.bmp",dir); saveImage(filename, width, height, 1, dataGray); return 0; }