static void state_destroy(state_t * state) { if (state->img != NULL) image_u8_destroy(state->img); vx_world_destroy(state->world); vx_world_destroy(state->world2); vx_world_destroy(state->world3); assert(zhash_size(state->layers) == 0); zhash_destroy(state->layers); free(state); }
std::vector<TagMatch> detectTags(const cv::Mat& image) { int residual = image.cols % IMAGE_U8_DEFAULT_ALIGNMENT; cv::Mat img_aligned; if (residual != 0) { cv::copyMakeBorder(image, img_aligned, 0, 0, (IMAGE_U8_DEFAULT_ALIGNMENT - residual) / 2, (IMAGE_U8_DEFAULT_ALIGNMENT - residual) / 2, cv::BORDER_CONSTANT, 0); } else { img_aligned = image; } cv::cvtColor(img_aligned, img_aligned, CV_RGB2GRAY); image_u8_t *image_u8 = fromCvMat(img_aligned); std::vector<TagMatch> tags = detectTags(image_u8); if (show_window) { cv::cvtColor(img_aligned, img_aligned, CV_GRAY2RGB); for (int i = 0; i < tags.size(); i++) { cv::line(img_aligned, tags[i].p0, tags[i].p1, cv::Scalar(255,0,0), 2, CV_AA); cv::line(img_aligned, tags[i].p1, tags[i].p2, cv::Scalar(0,255,0), 2, CV_AA); cv::line(img_aligned, tags[i].p2, tags[i].p3, cv::Scalar(0,0,255), 2, CV_AA); cv::line(img_aligned, tags[i].p3, tags[i].p0, cv::Scalar(0,0,255), 2, CV_AA); Eigen::Vector3d x_axis(2,0,1); Eigen::Vector3d y_axis(0,2,1); Eigen::Vector3d origin(0,0,1); Eigen::Vector3d px = tags[i].H * x_axis; Eigen::Vector3d py = tags[i].H * y_axis; Eigen::Vector3d o = tags[i].H * origin; px/= px[2]; py/= py[2]; o/= o[2]; cv::line(img_aligned, cv::Point2d(o[0], o[1]), cv::Point2d(px[0], px[1]), cv::Scalar(255,0,255), 1, CV_AA); cv::line(img_aligned, cv::Point2d(o[0], o[1]), cv::Point2d(py[0], py[1]), cv::Scalar(255,255,0), 1, CV_AA); } cv::imshow("detections", img_aligned); cv::waitKey(1); } image_u8_destroy(image_u8); return tags; }
int main(int argc, char *argv[]) { getopt_t *getopt = getopt_create(); getopt_add_bool(getopt, 'h', "help", 0, "Show this help"); getopt_add_bool(getopt, 'd', "debug", 0, "Enable debugging output (slow)"); getopt_add_bool(getopt, 'q', "quiet", 0, "Reduce output"); getopt_add_string(getopt, 'f', "family", "tag36h11", "Tag family to use"); getopt_add_int(getopt, '\0', "border", "1", "Set tag family border size"); getopt_add_int(getopt, 'i', "iters", "1", "Repeat processing on input set this many times"); getopt_add_int(getopt, 't', "threads", "4", "Use this many CPU threads"); getopt_add_double(getopt, 'x', "decimate", "1.0", "Decimate input image by this factor"); getopt_add_double(getopt, 'b', "blur", "0.0", "Apply low-pass blur to input"); getopt_add_bool(getopt, '1', "refine-decode", 0, "Spend more time trying to decode tags"); getopt_add_bool(getopt, '2', "refine-pose", 0, "Spend more time trying to precisely localize tags"); if (!getopt_parse(getopt, argc, argv, 1) || getopt_get_bool(getopt, "help")) { printf("Usage: %s [options] <input files>\n", argv[0]); getopt_do_usage(getopt); exit(0); } const zarray_t *inputs = getopt_get_extra_args(getopt); apriltag_family_t *tf = NULL; const char *famname = getopt_get_string(getopt, "family"); if (!strcmp(famname, "tag36h11")) tf = tag36h11_create(); else if (!strcmp(famname, "tag36h10")) tf = tag36h10_create(); else if (!strcmp(famname, "tag36artoolkit")) tf = tag36artoolkit_create(); else if (!strcmp(famname, "tag25h9")) tf = tag25h9_create(); else if (!strcmp(famname, "tag25h7")) tf = tag25h7_create(); else { printf("Unrecognized tag family name. Use e.g. \"tag36h11\".\n"); exit(-1); } tf->black_border = getopt_get_int(getopt, "border"); apriltag_detector_t *td = apriltag_detector_create(); apriltag_detector_add_family(td, tf); td->quad_decimate = getopt_get_double(getopt, "decimate"); td->quad_sigma = getopt_get_double(getopt, "blur"); td->nthreads = getopt_get_int(getopt, "threads"); td->debug = getopt_get_bool(getopt, "debug"); td->refine_decode = getopt_get_bool(getopt, "refine-decode"); td->refine_pose = getopt_get_bool(getopt, "refine-pose"); int quiet = getopt_get_bool(getopt, "quiet"); int maxiters = getopt_get_int(getopt, "iters"); const int hamm_hist_max = 10; for (int iter = 0; iter < maxiters; iter++) { if (maxiters > 1) printf("iter %d / %d\n", iter + 1, maxiters); for (int input = 0; input < zarray_size(inputs); input++) { int hamm_hist[hamm_hist_max]; memset(hamm_hist, 0, sizeof(hamm_hist)); char *path; zarray_get(inputs, input, &path); if (!quiet) printf("loading %s\n", path); image_u8_t *im = image_u8_create_from_pnm(path); if (im == NULL) { printf("couldn't find %s\n", path); continue; } zarray_t *detections = apriltag_detector_detect(td, im); for (int i = 0; i < zarray_size(detections); i++) { apriltag_detection_t *det; zarray_get(detections, i, &det); if (!quiet) printf("detection %3d: id (%2dx%2d)-%-4d, hamming %d, goodness %8.3f, margin %8.3f\n", i, det->family->d*det->family->d, det->family->h, det->id, det->hamming, det->goodness, det->decision_margin); hamm_hist[det->hamming]++; apriltag_detection_destroy(det); } zarray_destroy(detections); if (!quiet) { timeprofile_display(td->tp); printf("nedges: %d, nsegments: %d, nquads: %d\n", td->nedges, td->nsegments, td->nquads); } if (!quiet) printf("Hamming histogram: "); for (int i = 0; i < hamm_hist_max; i++) printf("%5d", hamm_hist[i]); if (quiet) { printf("%12.3f", timeprofile_total_utime(td->tp) / 1.0E3); } printf("\n"); image_u8_destroy(im); } } // don't deallocate contents of inputs; those are the argv apriltag_detector_destroy(td); tag36h11_destroy(tf); return 0; }
int main(){ bool showGradient = true; bool found = false; VideoCapture cap(0); // open the default camera Size size(854,480); // size of desired frame origionally 1280x720, 1024x576, 854x480 if(!cap.isOpened()) // check if camera opened return -1; Mat frame; Mat src; /* From apriltag_demo.c */ int maxiters = 5; const int hamm_hist_max = 10; int quiet = 0; apriltag_family_t *tf = tag36h11_create(); // Apriltag family 36h11, can change tf->black_border = 1; // Set tag family border size apriltag_detector_t *td = apriltag_detector_create(); // Apriltag detector apriltag_detector_add_family(td, tf); // Add apriltag family td->quad_decimate = 1.0; // Decimate input image by factor td->quad_sigma = 0.0; // No blur (I think) td->nthreads = 4; // 4 treads provided td->debug = 0; // No debuging output td->refine_decode = 0; // Don't refine decode td->refine_pose = 0; // Don't refine pose // Output variables char imgSize[20]; char renderTime[20]; char detectString[50]; char convertTime[50]; char displayString[120]; char outputString[120]; char locationString[120]; double time_taken = 0.0; long double totalFPS = 0.0; double count = 0.0; /* End of apriltag_demo.c */ while(1){ clock_t t; t = clock(); cap >> src; // Get a new frame from camera if(found){ resize(src,frame,size); } // Resize to smaller image if tag found else{ frame = src; } // Keep standard image if no tag if(showGradient){ cvtColor(src, frame, CV_BGR2GRAY); cvtColor(frame, frame, CV_GRAY2RGB); src = gradientEdges(frame); // Show gradient for fun }else{ cvtColor(src, src, CV_BGR2GRAY); } pnm_t *pnm = mat2pnm(&frame); // Convert Mat fram to pnm image_u8_t *im = pnm_to_image_u8(pnm); // Convert pnm to gray image_u8 if (im == NULL) { // Error - no image created from pnm std::cout << "Error, not a proper pnm" << std::endl; return -1; } /*** Start from origional Apriltags from apriltag_demo.c ***/ int hamm_hist[hamm_hist_max]; memset(hamm_hist, 0, sizeof(hamm_hist)); zarray_t *detections = apriltag_detector_detect(td, im); for (int i = 0; i < zarray_size(detections); i++) { apriltag_detection_t *det; zarray_get(detections, i, &det); sprintf(locationString, "Tag Center: (%f,%f)", det->c[0], det->c[1]); sprintf(detectString, "detection %2d: id (%2dx%2d)-%-4d, hamming %d, goodness %5.3f, margin %5.3f\n", i+1, det->family->d*det->family->d, det->family->h, det->id, det->hamming, det->goodness, det->decision_margin); hamm_hist[det->hamming]++; // draws a vertical rectangle around tag, not ideal, but easy to implement // det->p[corner][positon], counter clockwise Point pt1 = Point(det->p[0][0], det->p[0][1]); Point pt2 = Point(det->p[2][0], det->p[2][1]); cv::rectangle(src, pt1, pt2, cvScalar(102,255,0)); apriltag_detection_destroy(det); } if(zarray_size(detections) < 1){ found = false; sprintf(detectString, "No tag detected"); sprintf(locationString, "No tag detected"); }else{ found = false; } zarray_destroy(detections); image_u8_destroy(im); t = clock() - t; double time_taken = ((double)t)/(CLOCKS_PER_SEC/1000); //printf("ms to render: %5.3f\n", time_taken); if (!quiet) { //timeprofile_display(td->tp); totalFPS += (1000.0/time_taken); count += 1.0; if(count > 30000.0){ totalFPS = 0.0; count = 0.0; } sprintf(displayString, "fps: %2.2Lf, nquads: %d",totalFPS/count, td->nquads); //std::cout << displayString; } //for (int i = 0; i < hamm_hist_max; i++) //printf("%5d", hamm_hist[i]); sprintf(renderTime, "Render: %5.3fms", time_taken); sprintf(imgSize, "%dx%d", frame.cols, frame.rows); sprintf(outputString, "%s %s %s", renderTime, convertTime, imgSize); printf("%s %s\r", detectString, outputString); if (quiet) { printf("%12.3f", timeprofile_total_utime(td->tp) / 1.0E3); } printf("\n"); /*** End of origional Apriltags from apriltag_demo.c ***/ // displays fps, edges, segments, quads putText(src, displayString, cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); // displays render time, convert time, and image size putText(src, outputString, cvPoint(30,50), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); // Displays any detections (if any) putText(src, detectString, cvPoint(30,70), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); // Displays tag location (if any) putText(src, locationString, cvPoint(30,90), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); imshow("Display Apriltags", src); if(waitKey(30) >= 0) break; } /* deallocate apriltag constructs */ apriltag_detector_destroy(td); tag36h11_destroy(tf); return 0; }
int main(int argc, char *argv[]) { april_tag_family_t *tf = tag36h11_create(); april_tag_detector_t *td = april_tag_detector_create(tf); td->small_tag_refinement = 0; int maxiters = 1; zarray_t *inputs = zarray_create(sizeof(char*)); int waitsec = 0; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-d")) td->debug = 1; else if (!strcmp(argv[i], "-t")) td->nthreads = atoi(argv[++i]); else if (!strcmp(argv[i], "-f")) td->seg_decimate = (i+1 < argc && isdigit(argv[i+1][0])) ? atoi(argv[++i]) : 2; else if (!strcmp(argv[i], "-i")) maxiters = atoi(argv[++i]); else if (!strcmp(argv[i], "-r")) td->small_tag_refinement = 1; else if (!strcmp(argv[i], "-w")) waitsec = atoi(argv[++i]); else if (!strcmp(argv[i], "-b")) td->seg_sigma = atof(argv[++i]); /* else if (!strcmp(argv[i], "--family")) { char *fam = argv[++i]; if (!strcmp(fam, "36h11")) td->tag_family = tag36h11_create(); else if (!strcmp(fam, "36h10")) td->tag_family = tag36h10_create(); } */ else zarray_add(inputs, &argv[i]); } for (int iter = 0; iter < maxiters; iter++) { if (maxiters > 1) printf("iter %d / %d\n", iter + 1, maxiters); for (int input = 0; input < zarray_size(inputs); input++) { char *path; zarray_get(inputs, input, &path); printf("loading %s\n", path); image_u8_t *im = image_u8_create_from_pnm(path); if (im == NULL) { printf("couldn't find %s\n", path); continue; } zarray_t *detections = april_tag_detector_detect(td, im); for (int i = 0; i < zarray_size(detections); i++) { april_tag_detection_t *det; zarray_get(detections, i, &det); printf("detection %3d: id %4d, hamming %d, goodness %f\n", i, det->id, det->hamming, det->goodness); april_tag_detection_destroy(det); } zarray_destroy(detections); timeprofile_display(td->tp); printf("nedges: %d, nsegments: %d, nquads: %d\n", td->nedges, td->nsegments, td->nquads); image_u8_destroy(im); if (zarray_size(inputs) > 1 || iter > 0) sleep(waitsec); } } april_tag_detector_destroy(td); tag36h11_destroy(tf); return 0; }
zarray_t *apriltag_quad_thresh(apriltag_detector_t *td, image_u8_t *im) { //////////////////////////////////////////////////////// // step 1. threshold the image, creating the edge image. int w = im->width, h = im->height, s = im->stride; image_u8_t *threshim = threshold(td, im); assert(threshim->stride == s); image_u8_t *edgeim = image_u8_create(w, h); if (1) { image_u8_t *sumim = image_u8_create(w, h); // apply a horizontal sum kernel of width 3 for (int y = 0; y < h; y++) { for (int x = 1; x+1 < w; x++) { sumim->buf[y*s + x] = threshim->buf[y*s + x - 1] + threshim->buf[y*s + x + 0] + threshim->buf[y*s + x + 1]; } } timeprofile_stamp(td->tp, "sumim"); // deglitch if (td->qtp.deglitch) { for (int y = 1; y+1 < h; y++) { for (int x = 1; x+1 < w; x++) { // edge: black pixel next to white pixel if (threshim->buf[y*s + x] == 0 && sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] == 8) { threshim->buf[y*s + x] = 1; sumim->buf[y*s + x - 1]++; sumim->buf[y*s + x + 0]++; sumim->buf[y*s + x + 1]++; } if (threshim->buf[y*s + x] == 1 && sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] == 1) { threshim->buf[y*s + x] = 0; sumim->buf[y*s + x - 1]--; sumim->buf[y*s + x + 0]--; sumim->buf[y*s + x + 1]--; } } } timeprofile_stamp(td->tp, "deglitch"); } // apply a vertical sum kernel of width 3; check if any // over-threshold pixels are adjacent to an under-threshold // pixel. // // There are two types of edges: white pixels neighboring a // black pixel, and black pixels neighboring a white pixel. We // label these separately. (Values 0xc0 and 0x3f are picked // such that they add to 255 (see below) and so that they can be // viewed as pixel intensities for visualization purposes.) // // symmetry of detection. We don't want to use JUST "black // near white" (or JUST "white near black"), because that // biases the detection towards one side of the edge. This // measurably reduces detection performance. // // On large tags, we could treat "neighbor" pixels the same // way. But on very small tags, there may be other edges very // near the tag edge. Since each of these edges is effectively // two pixels thick (the white pixel near the black pixel, and // the black pixel near the white pixel), it becomes likely // that these two nearby edges will actually touch. // // A partial solution to this problem is to define edges to be // adjacent white-near-black and black-near-white pixels. // for (int y = 1; y+1 < h; y++) { for (int x = 1; x+1 < w; x++) { if (threshim->buf[y*s + x] == 0) { // edge: black pixel next to white pixel if (sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] > 0) edgeim->buf[y*s + x] = 0xc0; } else { // edge: white pixel next to black pixel when both // edge types are on, we get less bias towards one // side of the edge. if (sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] < 9) edgeim->buf[y*s + x] = 0x3f; } } } if (td->debug) { for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { threshim->buf[y*s + x] *= 255; } } image_u8_write_pnm(threshim, "debug_threshold.pnm"); image_u8_write_pnm(edgeim, "debug_edge.pnm"); // image_u8_destroy(edgeim2); } image_u8_destroy(threshim); image_u8_destroy(sumim); } timeprofile_stamp(td->tp, "edges"); //////////////////////////////////////////////////////// // step 2. find connected components. unionfind_t *uf = unionfind_create(w * h); for (int y = 1; y < h - 1; y++) { for (int x = 1; x < w -1; x++) { uint8_t v = edgeim->buf[y*s + x]; if (v==0) continue; // (dx,dy) pairs for 8 connectivity: // (REFERENCE) (1, 0) // (-1, 1) (0, 1) (1, 1) // // i.e., the minimum value of dx should be: // y=0: 1 // y=1: -1 for (int dy = 0; dy <= 1; dy++) { for (int dx = 1-2*dy; dx <= 1; dx++) { if (edgeim->buf[(y+dy)*s + (x+dx)] == v) { unionfind_connect(uf, y*w + x, (y+dy)*w + x + dx); } } } } } timeprofile_stamp(td->tp, "unionfind"); zhash_t *clustermap = zhash_create(sizeof(uint64_t), sizeof(zarray_t*), zhash_uint64_hash, zhash_uint64_equals); for (int y = 1; y < h-1; y++) { for (int x = 1; x < w-1; x++) { uint8_t v0 = edgeim->buf[y*s + x]; if (v0 == 0) continue; uint64_t rep0 = unionfind_get_representative(uf, y*w + x); // 8 connectivity. (4 neighbors to check). // for (int dy = 0; dy <= 1; dy++) { // for (int dx = 1-2*dy; dx <= 1; dx++) { // 4 connectivity. (2 neighbors to check) for (int n = 1; n <= 2; n++) { int dy = n & 1; int dx = (n & 2) >> 1; uint8_t v1 = edgeim->buf[(y+dy)*s + x + dx]; if (v0 + v1 != 255) continue; uint64_t rep1 = unionfind_get_representative(uf, (y+dy)*w + x+dx); uint64_t clusterid; if (rep0 < rep1) clusterid = (rep1 << 32) + rep0; else clusterid = (rep0 << 32) + rep1; zarray_t *cluster = NULL; if (!zhash_get(clustermap, &clusterid, &cluster)) { cluster = zarray_create(sizeof(struct pt)); zhash_put(clustermap, &clusterid, &cluster, NULL, NULL); } // NB: We will add some points multiple times to a // given cluster. I don't know an efficient way to // avoid that here; we remove them later on when we // sort points by pt_compare_theta. if (1) { struct pt p = { .x = x, .y = y}; zarray_add(cluster, &p); } if (1) { struct pt p = { .x = x+dx, .y = y+dy}; zarray_add(cluster, &p); } } } } // make segmentation image. if (td->debug) { image_u8_t *d = image_u8_create(w, h); assert(d->stride == s); uint8_t *colors = (uint8_t*) calloc(w*h, 1); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { uint32_t v = unionfind_get_representative(uf, y*w+x); uint32_t sz = unionfind_get_set_size(uf, y*w+x); if (sz < td->qtp.min_cluster_pixels) continue; uint8_t color = colors[v]; if (color == 0) { const int bias = 20; color = bias + (random() % (255-bias)); colors[v] = color; } float mix = 0.7; mix = 1.0; d->buf[y*d->stride + x] = mix*color + (1-mix)*im->buf[y*im->stride + x]; } } free(colors); image_u8_write_pnm(d, "debug_segmentation.pnm"); image_u8_destroy(d); } timeprofile_stamp(td->tp, "make clusters"); //////////////////////////////////////////////////////// // step 3. process each connected component. zarray_t *clusters = zhash_values(clustermap); zhash_destroy(clustermap); zarray_t *quads = zarray_create(sizeof(struct quad)); int sz = zarray_size(clusters); int chunksize = 1 + sz / (APRILTAG_TASKS_PER_THREAD_TARGET * td->nthreads); struct quad_task tasks[sz / chunksize + 1]; int ntasks = 0; for (int i = 0; i < sz; i += chunksize) { tasks[ntasks].td = td; tasks[ntasks].cidx0 = i; tasks[ntasks].cidx1 = imin(sz, i + chunksize); tasks[ntasks].h = h; tasks[ntasks].w = w; tasks[ntasks].quads = quads; tasks[ntasks].clusters = clusters; tasks[ntasks].im = im; workerpool_add_task(td->wp, do_quad_task, &tasks[ntasks]); ntasks++; } workerpool_run(td->wp); timeprofile_stamp(td->tp, "fit quads to clusters"); if (td->debug) { FILE *f = fopen("debug_lines.ps", "w"); fprintf(f, "%%!PS\n\n"); image_u8_t *im2 = image_u8_copy(im); image_u8_darken(im2); image_u8_darken(im2); // assume letter, which is 612x792 points. double scale = fmin(612.0/im->width, 792.0/im2->height); fprintf(f, "%.15f %.15f scale\n", scale, scale); fprintf(f, "0 %d translate\n", im2->height); fprintf(f, "1 -1 scale\n"); postscript_image(f, im); for (int i = 0; i < zarray_size(quads); i++) { struct quad *q; zarray_get_volatile(quads, i, &q); float rgb[3]; int bias = 100; for (int i = 0; i < 3; i++) rgb[i] = bias + (random() % (255-bias)); fprintf(f, "%f %f %f setrgbcolor\n", rgb[0]/255.0f, rgb[1]/255.0f, rgb[2]/255.0f); fprintf(f, "%.15f %.15f moveto %.15f %.15f lineto %.15f %.15f lineto %.15f %.15f lineto %.15f %.15f lineto stroke\n", q->p[0][0], q->p[0][1], q->p[1][0], q->p[1][1], q->p[2][0], q->p[2][1], q->p[3][0], q->p[3][1], q->p[0][0], q->p[0][1]); } fclose(f); } // printf(" %d %d %d %d\n", indices[0], indices[1], indices[2], indices[3]); /* if (td->debug) { for (int i = 0; i < 4; i++) { int i0 = indices[i]; int i1 = indices[(i+1)&3]; if (i1 < i0) i1 += zarray_size(cluster); for (int j = i0; j <= i1; j++) { struct pt *p; zarray_get_volatile(cluster, j % zarray_size(cluster), &p); edgeim->buf[p->y*edgeim->stride + p->x] = 30+64*i; } } } */ unionfind_destroy(uf); for (int i = 0; i < zarray_size(clusters); i++) { zarray_t *cluster; zarray_get(clusters, i, &cluster); zarray_destroy(cluster); } zarray_destroy(clusters); image_u8_destroy(edgeim); return quads; }
double* getTag(char* path) { apriltag_family_t *tf = NULL; tf = tag36h11_create(); tf->black_border = 1; apriltag_detector_t *td = apriltag_detector_create(); apriltag_detector_add_family(td, tf); td->quad_decimate = 1.0; td->quad_sigma = 0.0; td->nthreads = 4; td->debug = 0; td->refine_decode = 0; td->refine_pose = 0; int quiet = 0; int maxiters = 1; const int hamm_hist_max = 10; int hamm_hist[hamm_hist_max]; memset(hamm_hist, 0, sizeof(hamm_hist)); image_u8_t *im = image_u8_create_from_pnm(path); if (im == NULL) { printf("couldn't find %s\n", path); return NULL; } zarray_t *detections = apriltag_detector_detect(td, im); nrows = zarray_size(detections); ncols = 9; if (nrows == 0) return NULL; double* output_matrix = new double[nrows*ncols]; for (int i = 0; i < zarray_size(detections); i++) { apriltag_detection_t *det; zarray_get(detections, i, &det); output_matrix[ncols*i+0] = det->id; for(int j=0; j<4; j++) { output_matrix[ncols*i+ 2*j +1] = det->p[j][0]; output_matrix[ncols*i+ 2*j +2] = det->p[j][1]; } hamm_hist[det->hamming]++; apriltag_detection_destroy(det); } zarray_destroy(detections); image_u8_destroy(im); // don't deallocate contents of inputs; those are the argv apriltag_detector_destroy(td); tag36h11_destroy(tf); return output_matrix; }
int main(int argc, char ** argv) { getopt_t *gopt = getopt_create(); getopt_add_bool (gopt, 'h', "help", 0, "Show help"); getopt_add_bool (gopt, '\0', "no-gtk", 0, "Don't show gtk window, only advertise remote connection"); getopt_add_int (gopt, 'l', "limitKBs", "-1", "Remote display bandwidth limit. < 0: unlimited."); getopt_add_string (gopt, '\0', "pnm", "", "Path for pnm file to render as texture (.e.g BlockM.pnm)"); getopt_add_bool (gopt, '\0', "stay-open", 0, "Stay open after gtk exits to continue handling remote connections"); // parse and print help if (!getopt_parse(gopt, argc, argv, 1) || getopt_get_bool(gopt,"help")) { printf ("Usage: %s [options]\n\n", argv[0]); getopt_do_usage (gopt); exit (1); } signal(SIGPIPE, SIG_IGN); // potential fix for Valgrind "Killed" on // remote viewer exit state_t * state = state_create(); // Load a pnm from file, and repack the data so that it's understandable by vx if (strcmp(getopt_get_string(gopt,"pnm"),"")) { image_u8_t * img2 = image_u8_create_from_pnm(getopt_get_string(gopt, "pnm")); state->img = image_util_convert_rgb_to_rgba (img2); image_u8_destroy (img2); } vx_global_init(); // Call this to initialize the vx-wide lock. Required to start the GL thread or to use the program library vx_application_t app = {.impl=state, .display_started=display_started, .display_finished=display_finished}; vx_remote_display_source_attr_t remote_attr; vx_remote_display_source_attr_init(&remote_attr); remote_attr.max_bandwidth_KBs = getopt_get_int(gopt, "limitKBs"); remote_attr.advertise_name = "Vx Stress Test"; vx_remote_display_source_t * cxn = vx_remote_display_source_create_attr(&app, &remote_attr); for (int i = 0; i < NRENDER; i++) { tinfo_t * tinfo = calloc(1,sizeof(tinfo_t)); tinfo->state = state; tinfo->id = i; pthread_create(&state->render_threads[i], NULL, render_loop, tinfo); } pthread_create(&state->camera_thread, NULL, camera_loop, state); if (!getopt_get_bool(gopt,"no-gtk")) { gdk_threads_init (); gdk_threads_enter (); gtk_init (&argc, &argv); vx_gtk_display_source_t * appwrap = vx_gtk_display_source_create(&app); GtkWidget * window = gtk_window_new (GTK_WINDOW_TOPLEVEL); GtkWidget * canvas = vx_gtk_display_source_get_widget(appwrap); gtk_window_set_default_size (GTK_WINDOW (window), 400, 400); gtk_container_add(GTK_CONTAINER(window), canvas); gtk_widget_show (window); gtk_widget_show (canvas); // XXX Show all causes errors! g_signal_connect_swapped(G_OBJECT(window), "destroy", G_CALLBACK(gtk_main_quit), NULL); gtk_main (); // Blocks as long as GTK window is open gdk_threads_leave (); vx_gtk_display_source_destroy(appwrap); // quit when gtk closes? Or wait for remote displays/Ctrl-C if (!getopt_get_bool(gopt, "stay-open")) state->running = 0; } for (int i = 0; i < NRENDER; i++) pthread_join(state->render_threads[i], NULL); vx_remote_display_source_destroy(cxn); state_destroy(state); vx_global_destroy(); getopt_destroy(gopt); }
int main(int argc, char ** argv) { getopt_t *gopt = getopt_create(); getopt_add_bool (gopt, 'h', "help", 0, "Show help"); getopt_add_bool (gopt, '\0', "no-gtk", 0, "Don't show gtk window, only advertise remote connection"); getopt_add_string (gopt, '\0', "pnm", "", "Path for pnm file to render as texture (.e.g BlockM.pnm)"); getopt_add_bool (gopt, '\0', "stay-open", 0, "Stay open after gtk exits to continue handling remote connections"); // parse and print help if (!getopt_parse(gopt, argc, argv, 1) || getopt_get_bool(gopt,"help")) { printf ("Usage: %s [options]\n\n", argv[0]); getopt_do_usage (gopt); exit (1); } state_t * state = state_create(); // Load a pnm from file, and repack the data so that it's understandable by vx if (strcmp(getopt_get_string(gopt,"pnm"),"")) { image_u8_t * img2 = image_u8_create_from_pnm(getopt_get_string(gopt, "pnm")); state->img = image_util_convert_rgb_to_rgba (img2); image_u8_destroy (img2); } vx_global_init(); // Call this to initialize the vx-wide lock. Required to start the GL thread or to use the program library pthread_create(&state->render_thread1, NULL, render_thread1, state); pthread_create(&state->render_thread2, NULL, render_thread2, state); vx_remote_display_source_t * cxn = vx_remote_display_source_create(&state->app); if (!getopt_get_bool(gopt,"no-gtk")) { gdk_threads_init (); gdk_threads_enter (); gtk_init (&argc, &argv); vx_gtk_display_source_t * appwrap = vx_gtk_display_source_create(&state->app); GtkWidget * window = gtk_window_new (GTK_WINDOW_TOPLEVEL); GtkWidget * canvas = vx_gtk_display_source_get_widget(appwrap); gtk_window_set_default_size (GTK_WINDOW (window), 400, 400); gtk_container_add(GTK_CONTAINER(window), canvas); gtk_widget_show (window); gtk_widget_show (canvas); // XXX Show all causes errors! g_signal_connect_swapped(G_OBJECT(window), "destroy", G_CALLBACK(gtk_main_quit), NULL); gtk_main (); // Blocks as long as GTK window is open gdk_threads_leave (); vx_gtk_display_source_destroy(appwrap); // quit when gtk closes? Or wait for remote displays/Ctrl-C if (!getopt_get_bool(gopt, "stay-open")) state->running = 0; } pthread_join(state->render_thread1, NULL); pthread_join(state->render_thread2, NULL); vx_remote_display_source_destroy(cxn); state_destroy(state); vx_global_destroy(); getopt_destroy(gopt); }
void cam_callback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::CameraInfoConstPtr &cinfo) { // Get camera info static bool init_cam = false; static cv::Mat K = cv::Mat::zeros(cv::Size(3, 3), CV_64F); static cv::Mat D = cv::Mat::zeros(cv::Size(1, 5), CV_64F); // Stop if camera not calibrated if (cinfo->K[0] == 0.0) throw std::runtime_error("Camera not calibrated."); // TODO: convert to function later // Assign camera info only once if (!init_cam) { for (int i = 0; i < 3; ++i) { double *pk = K.ptr<double>(i); for (int j = 0; j < 3; ++j) { pk[j] = cinfo->K[3 * i + j]; } } double *pd = D.ptr<double>(); for (int k = 0; k < 5; k++) { pd[k] = cinfo->D[k]; } init_cam = true; } // use cv_bridge and convert to grayscale image cv_bridge::CvImagePtr cv_ptr; // use toCvCopy because we will modify the image cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::MONO8); cv::Mat image_rgb; cv::cvtColor(cv_ptr->image, image_rgb, CV_GRAY2RGB); #if defined(BUILD_UMICH) // Use apriltag_umich // Currently not using this version static april_tag_family_t *tf = tag36h11_create(); static april_tag_detector_t *td = april_tag_detector_create(tf); image_u8_t *im = image_u8_create_from_gray( cv_ptr->image.cols, cv_ptr->image.rows, cv_ptr->image.data); zarray_t *detections = april_tag_detector_detect(td, im); ROS_INFO("Tags detected: %d", zarray_size(detections)); for (size_t i = 0; i < zarray_size(detections); i++) { april_tag_detection_t *det; zarray_get(detections, i, &det); for (int j = 0; j < 4; j++) { const Point2 p = Point2(det->p[j][0], det->p[j][1]); } april_tag_detection_destroy(det); } zarray_destroy(detections); image_u8_destroy(im); #elif defined(BUILD_MIT) // Use apriltag_mit static AprilTags::TagDetector tag_detector(AprilTags::tagCodes36h11); std::vector<AprilTags::TagDetection> detections = tag_detector.extractTags(cv_ptr->image); // Check detection size, only do work if there's tag detected if (detections.size()) { std::vector<Point2> pi; // Points in image std::vector<Point3> pw; // Points in world for (auto it = detections.begin(); it != detections.end(); it++) { const int id = it->id; const Point2 c2 = Point2(it->cxy.first, it->cxy.second); for (int j = 0; j < 4; j++) { const Point2 p2 = Point2(it->p[j].first, it->p[j].second); pi.push_back(p2); Point3 p3(tagsWorld[id].p[j].x, tagsWorld[id].p[j].y, 0.0); pw.push_back(p3); // Display tag corners cv::circle(image_rgb, p2, 6, colors[j], 2); } // Display tag id std::ostringstream ss; ss << id; auto color = cv::Scalar(0, 255, 255); if (tagsWorld.find(id) != tagsWorld.end()) { color = cv::Scalar(255, 255, 0); } cv::putText(image_rgb, ss.str(), Point2(c2.x - 5, c2.y + 5), cv::FONT_HERSHEY_PLAIN, 2, color, 2); } // Get pose static cv::Mat r = cv::Mat::zeros(cv::Size(1, 3), CV_64F); static cv::Mat cTw = cv::Mat::zeros(cv::Size(1, 3), CV_64F); cv::Mat wTc(cv::Size(3, 3), CV_64F); cv::Mat cRw(cv::Size(3, 3), CV_64F), wRc(cv::Size(3, 3), CV_64F); cv::solvePnP(pw, pi, K, D, r, cTw, false); cv::Rodrigues(r, cRw); wRc = cRw.inv(); wTc = -wRc * cTw; // ROS_INFO("%f, %f, %f", r.at<double>(0,0), r.at<double>(1,0), // r.at<double>(2,0)); cv::Mat q = rodriguesToQuat(r); // Publish geometry_msgs::PoseStamped pose_cam; pose_cam.header.stamp = image->header.stamp; pose_cam.header.frame_id = "0"; double *pt = wTc.ptr<double>(); pose_cam.pose.position.x = pt[0]; pose_cam.pose.position.y = pt[1]; pose_cam.pose.position.z = pt[2]; double *pq = q.ptr<double>(); pose_cam.pose.orientation.w = pq[0]; pose_cam.pose.orientation.x = pq[1]; pose_cam.pose.orientation.y = pq[2]; pose_cam.pose.orientation.z = pq[3]; pose_pub.publish(pose_cam); } #endif // Publish image cv_bridge::CvImage cv_image(image->header, sensor_msgs::image_encodings::BGR8, image_rgb); image_pub.publish(cv_image.toImageMsg()); // cv::imshow("image", image_rgb); // cv::waitKey(1); }