void vx_world_destroy(vx_world_t * world) { zhash_vmap_values(world->buffer_map, vx_world_buffer_destroy); // keys are stored in buffer struct zhash_destroy(world->buffer_map); assert(zarray_size(world->listeners) == 0 && "Destroy layers referencing worlds before worlds"); // we can't release these resources properly zarray_destroy(world->listeners); pthread_mutex_destroy(&world->buffer_mutex); pthread_mutex_destroy(&world->listener_mutex); // Tell the processing thread to quit world->process_running = 0; pthread_mutex_lock(&world->queue_mutex); pthread_cond_signal(&world->queue_cond); pthread_mutex_unlock(&world->queue_mutex); pthread_join(world->process_thread, NULL); pthread_mutex_destroy(&world->queue_mutex); pthread_cond_destroy(&world->queue_cond); // These are pointers to data stored elsewhere, just delete the // data structure zarray_destroy(world->listener_queue); zarray_destroy(world->buffer_queue); free(world); }
static void vx_world_buffer_destroy(vx_buffer_t * buffer) { vx_world_t * vw = buffer->world; pthread_mutex_lock(&vw->buffer_mutex); free(buffer->name); zarray_vmap(buffer->back_objs, vx_object_dec_destroy); zarray_destroy(buffer->back_objs); zarray_vmap(buffer->pending_objs, vx_object_dec_destroy); zarray_destroy(buffer->pending_objs); zarray_vmap(buffer->front_objs, vx_object_dec_destroy); zarray_destroy(buffer->front_objs); zhash_vmap_values(buffer->front_resc, vx_resc_dec_destroy); zhash_destroy(buffer->front_resc); vx_code_output_stream_destroy(buffer->front_codes); pthread_mutex_destroy(&buffer->mutex); free(buffer); pthread_mutex_unlock(&vw->buffer_mutex); }
void url_parser_destroy (url_parser_t *urlp) { free(urlp->protocol); free(urlp->host); free(urlp->path); zarray_vmap(urlp->keys, free); zarray_vmap(urlp->vals, free); zarray_destroy(urlp->keys); zarray_destroy(urlp->vals); free(urlp); }
void workerpool_destroy(workerpool_t *wp) { if (wp == NULL) return; // force all worker threads to exit. if (wp->nthreads > 1) { for (int i = 0; i < wp->nthreads; i++) workerpool_add_task(wp, NULL, NULL); pthread_mutex_lock(&wp->mutex); pthread_cond_broadcast(&wp->startcond); pthread_mutex_unlock(&wp->mutex); for (int i = 0; i < wp->nthreads; i++) pthread_join(wp->threads[i], NULL); pthread_mutex_destroy(&wp->mutex); pthread_cond_destroy(&wp->startcond); pthread_cond_destroy(&wp->endcond); free(wp->threads); } zarray_destroy(wp->tasks); free(wp); }
void vx_util_unproject(double * winxyz, double * model_matrix, double * projection_matrix, int * viewport, double * vec3_out) { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * mm = matd_create_data(4, 4, model_matrix); zarray_add(fp, &mm); matd_t * pm = matd_create_data(4, 4, projection_matrix); zarray_add(fp, &pm); matd_t *invpm = matd_op("(MM)^-1", pm, mm); zarray_add(fp, &invpm); double v[4] = { 2*(winxyz[0]-viewport[0]) / viewport[2] - 1, 2*(winxyz[1]-viewport[1]) / viewport[3] - 1, 2*winxyz[2] - 1, 1 }; matd_t * vm = matd_create_data(4, 1, v); zarray_add(fp, &vm); matd_t * objxyzh = matd_op("MM", invpm, vm); zarray_add(fp, &objxyzh); vec3_out[0] = objxyzh->data[0] / objxyzh->data[3]; vec3_out[1] = objxyzh->data[1] / objxyzh->data[3]; vec3_out[2] = objxyzh->data[2] / objxyzh->data[3]; // cleanup zarray_vmap(fp, matd_destroy); zarray_destroy(fp); }
CameraHandler() { zarray_t* urls = image_source_enumerate(); bool gotCamera = false; for (int i = 0; i < zarray_size(urls); ++i) { char* url; zarray_get(urls, i, &url); _isrc = image_source_open(url); if (_isrc != NULL) { printf("connected to camera %s\n", url); gotCamera = true; free(url); break; } } zarray_destroy(urls); if (!gotCamera) { printf("couldn't find a camera\n"); exit(1); } if (pthread_mutex_init(&_dataMutex, NULL)) { printf("dataMutex not initialized\n"); exit(1); } _im = nullptr; _running = false; _staticImage = false; _isrc->start(_isrc); }
void default_cam_mgr_rotate(default_cam_mgr_t * state, double *q, uint32_t animate_ms) { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * eye = matd_create_data(3,1,state->eye1); zarray_add(fp, &eye); matd_t *lookat = matd_create_data(3,1,state->lookat1); zarray_add(fp, &lookat); matd_t *up = matd_create_data(3,1,state->up1); zarray_add(fp, &up); matd_t * toEye = matd_subtract(eye, lookat); zarray_add(fp, &toEye); matd_t * nextToEye = matd_create(3,1); zarray_add(fp, &nextToEye); vx_util_quat_rotate(q, toEye->data, nextToEye->data); matd_t * nextEye = matd_add(lookat, nextToEye); zarray_add(fp, &nextEye); matd_t * nextUp = matd_copy(up); zarray_add(fp, &nextUp); vx_util_quat_rotate(q, up->data, nextUp->data); // copy back results memcpy(state->eye1, nextEye->data, sizeof(double)*3); memcpy(state->up1, nextUp->data, sizeof(double)*3); state->mtime1 = vx_util_mtime() + animate_ms; // Disable any prior fit command default_destroy_fit(state); // cleanup zarray_vmap(fp, matd_destroy); zarray_destroy(fp); default_cam_mgr_follow_disable(state); }
void vx_util_project(double * xyz, double * M44, double * P44, int * viewport, double * win_out3) { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * M = matd_create_data(4,4, M44); zarray_add(fp, &M); matd_t * P = matd_create_data(4,4, P44); zarray_add(fp, &P); matd_t * xyzp = matd_create(4,1); zarray_add(fp, &xyzp); memcpy(xyzp->data, xyz, 3*sizeof(double)); xyzp->data[3] = 1.0; matd_t * p = matd_op("MMM", P, M, xyzp); zarray_add(fp, &p); p->data[0] = p->data[0] / p->data[3]; p->data[1] = p->data[1] / p->data[3]; p->data[2] = p->data[2] / p->data[3]; double res[] = { viewport[0] + viewport[2]*(p->data[0]+1)/2.0, viewport[1] + viewport[3]*(p->data[1]+1)/2.0, (viewport[2] + 1)/2.0 }; memcpy(win_out3, res, 3*sizeof(double)); // cleanup zarray_vmap(fp, matd_destroy); zarray_destroy(fp); }
static void state_destroy(state_t * state) { if (verbose) printf("State destroying\n"); state->rendering = 0; pthread_join(state->render_thread, NULL); if (verbose) printf("render thread joined\n"); pthread_cond_signal(&state->movie_cond); pthread_join(state->movie_thread, NULL); if (verbose) printf("movie thread joined\n"); zhash_vmap_values(state->layer_info_map, layer_info_destroy); zhash_destroy(state->layer_info_map); // XXX values for (int i =0; i < 2; i++) { free(state->pixdatas[i]); if (state->pixbufs[i] != NULL) { g_object_unref(state->pixbufs[i]); } } task_thread_schedule_blocking(gl_thread, gl_cleanup_task, state); pthread_mutex_destroy(&state->mutex); pthread_mutex_destroy(&state->movie_mutex); pthread_cond_destroy(&state->movie_cond); zarray_destroy(state->listeners); if (state->last_render_info != NULL) render_info_destroy(state->last_render_info); vx_resc_manager_destroy(state->mgr); vx_gtk_buffer_manager_destroy(state->buffer_manager); free(state); }
void getopt_destroy (getopt_t *gopt) { // free the extra arguments and container zarray_vmap (gopt->extraargs, free); zarray_destroy (gopt->extraargs); // deep free of the getopt_option structs. Also frees key/values, so // after this loop, hash tables will no longer work zarray_vmap (gopt->options, getopt_option_destroy); zarray_destroy (gopt->options); // free tables zhash_destroy (gopt->lopts); zhash_destroy (gopt->sopts); free (gopt); }
void vx_util_lookat(double * _eye, double * _lookat, double * _up, double * _out44) { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * eye = matd_create_data(3,1, _eye); zarray_add(fp, &eye); matd_t * lookat = matd_create_data(3,1, _lookat); zarray_add(fp, &lookat); matd_t * up = matd_create_data(3,1, _up); zarray_add(fp, &up); up = matd_vec_normalize(up); zarray_add(fp, &up); // note different pointer than before! matd_t * tmp1 = matd_subtract(lookat, eye); zarray_add(fp, &tmp1); matd_t * f = matd_vec_normalize(tmp1); zarray_add(fp, &f); matd_t * s = matd_crossproduct(f, up); zarray_add(fp, &s); matd_t * u = matd_crossproduct(s, f); zarray_add(fp, &u); matd_t * M = matd_create(4,4); // set the rows of M with s, u, -f zarray_add(fp, &M); memcpy(M->data,s->data,3*sizeof(double)); memcpy(M->data + 4,u->data,3*sizeof(double)); memcpy(M->data + 8,f->data,3*sizeof(double)); for (int i = 0; i < 3; i++) M->data[2*4 +i] *= -1; M->data[3*4 + 3] = 1.0; matd_t * T = matd_create(4,4); T->data[0*4 + 3] = -eye->data[0]; T->data[1*4 + 3] = -eye->data[1]; T->data[2*4 + 3] = -eye->data[2]; T->data[0*4 + 0] = 1; T->data[1*4 + 1] = 1; T->data[2*4 + 2] = 1; T->data[3*4 + 3] = 1; zarray_add(fp, &T); matd_t * MT = matd_op("MM",M,T); zarray_add(fp, &MT); memcpy(_out44, MT->data, 16*sizeof(double)); // cleanup zarray_vmap(fp, matd_destroy); zarray_destroy(fp); }
static void render_info_destroy(render_info_t * rinfo) { zarray_destroy(rinfo->layers); // just pointers zhash_vmap_values(rinfo->camera_positions, vx_camera_pos_destroy); zhash_destroy(rinfo->camera_positions); zhash_vmap_values(rinfo->layer_positions, free); zhash_destroy(rinfo->layer_positions); free(rinfo); }
void vx_util_angle_axis_to_quat(double theta, double * axis3, double * qout) { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * axis = matd_create_data(3,1, axis3); zarray_add(fp, &axis); matd_t * axis_norm = matd_vec_normalize(axis); zarray_add(fp, &axis_norm); qout[0] = cos(theta/2); double s = sin(theta/2); qout[1] = axis_norm->data[0] * s; qout[2] = axis_norm->data[1] * s; qout[3] = axis_norm->data[2] * s; // cleanup zarray_vmap(fp, matd_destroy); zarray_destroy(fp); }
static void update_view(vx_gtk_buffer_manager_t * gtk) { // This order of these two mutex locks should prevent deadloc // even if a user sends op codes while the user holds the GDK mutex gdk_threads_enter(); pthread_mutex_lock(>k->mutex); // Clear XXX Double buffered? GList * children = gtk_container_get_children(GTK_CONTAINER(gtk->window)); for(GList * iter = children; iter != NULL; iter = g_list_next(iter)) gtk_widget_destroy(GTK_WIDGET(iter->data)); g_list_free(children); // Rebuild from scratch GtkWidget * box = gtk_vbox_new(0, 10); GtkWidget * widget = NULL; zarray_t *layers = zhash_values(gtk->layers); // contents: layer_info_t* zarray_sort(layers, layer_info_compare); for (int lidx = 0; lidx < zarray_size(layers); lidx++) { layer_info_t *linfo = NULL; zarray_get(layers, lidx, &linfo); // Draw the layer name: widget = gtk_label_new(""); char * text = sprintf_alloc("<b>Layer %d</b>", linfo->layer_id); gtk_label_set_markup(GTK_LABEL(widget), text); free(text); //gtk_container_add(GTK_CONTAINER(box), widget); gtk_box_pack_start(GTK_BOX(box), widget, FALSE, FALSE, 0); // Make a checkbox for each buffer zarray_t *buffers = zhash_values(linfo->buffers); // contents: buffer_info_t* zarray_sort(buffers, buffer_info_compare); for (int i = 0; i < zarray_size(buffers); i++) { buffer_info_t * buffer = NULL; zarray_get(buffers, i, &buffer); assert(buffer != NULL); widget = gtk_check_button_new_with_label(buffer->name); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(widget), buffer->enabled); g_signal_connect (G_OBJECT(widget), "toggled", G_CALLBACK (buffer_checkbox_changed), buffer); //gtk_container_add(GTK_CONTAINER(box), widget); gtk_box_pack_start(GTK_BOX(box), widget, FALSE, FALSE, 0); } zarray_destroy(buffers); } gtk_container_add(GTK_CONTAINER(gtk->window), box); gtk_widget_show_all(box); zarray_destroy(layers); pthread_mutex_unlock(>k->mutex); gdk_threads_leave(); }
vx_camera_pos_t * default_cam_mgr_get_cam_pos(default_cam_mgr_t * state, int * viewport, uint64_t mtime) { vx_camera_pos_t * p = calloc(1, sizeof(vx_camera_pos_t)); memcpy(p->viewport, viewport, 4*sizeof(int)); p->perspective_fovy_degrees = state->perspective_fovy_degrees; p->zclip_near = state->zclip_near; p->zclip_far = state->zclip_far; // process a fit command if necessary: if (state->fit != NULL) { fit_t * f = state->fit; // consume the fit command state->fit = NULL; // XXX minor race condition, could lose a fit cmd // XXX We can probably do better than this using the viewport... state->lookat1[0] = (f->xy0[0] + f->xy1[0]) / 2; state->lookat1[1] = (f->xy0[1] + f->xy1[1]) / 2; state->lookat1[2] = 0; // dimensions of fit double Fw = f->xy1[0] - f->xy0[0]; double Fh = f->xy1[1] - f->xy0[1]; // aspect ratios double Far = Fw / Fh; double Var = p->viewport[2] * 1.0 / p->viewport[3]; double tAngle = tan(p->perspective_fovy_degrees/2*M_PI/180.0); double height = fabs(0.5 * (Var > Far ? Fh : Fw / Var) / tAngle); state->eye1[0] = state->lookat1[0]; state->eye1[1] = state->lookat1[1]; state->eye1[2] = height; state->up1[0] = 0; state->up1[1] = 1; state->up1[2] = 0; state->mtime1 = f->mtime; free(f); } if (mtime > state->mtime1) { memcpy(p->eye, state->eye1, 3*sizeof(double)); memcpy(p->up, state->up1, 3*sizeof(double)); memcpy(p->lookat, state->lookat1, 3*sizeof(double)); p->perspectiveness = state->perspectiveness1; } else if (mtime <= state->mtime0) { memcpy(p->eye, state->eye0, 3*sizeof(double)); memcpy(p->up, state->up0, 3*sizeof(double)); memcpy(p->lookat, state->lookat0, 3*sizeof(double)); p->perspectiveness = state->perspectiveness0; } else { double alpha1 = ((double) mtime - state->mtime0) / (state->mtime1 - state->mtime0); double alpha0 = 1.0 - alpha1; scaled_combination(state->eye0, alpha0, state->eye1, alpha1, p->eye, 3); scaled_combination(state->up0, alpha0, state->up1, alpha1, p->up, 3); scaled_combination(state->lookat0, alpha0, state->lookat1, alpha1, p->lookat, 3); p->perspectiveness = state->perspectiveness0*alpha0 + state->perspectiveness1*alpha1; // Tweak so eye-to-lookat is the right distance { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * eye = matd_create_data(3,1, p->eye); zarray_add(fp, &eye); matd_t * lookat = matd_create_data(3,1, p->lookat); zarray_add(fp, &lookat); matd_t * up = matd_create_data(3,1, p->up); zarray_add(fp, &up); matd_t * eye0 = matd_create_data(3,1, state->eye0); zarray_add(fp, &eye0); matd_t * lookat0 = matd_create_data(3,1, state->lookat0); zarray_add(fp, &lookat0); matd_t * up0 = matd_create_data(3,1, state->up0); zarray_add(fp, &up0); matd_t * eye1 = matd_create_data(3,1, state->eye1); zarray_add(fp, &eye1); matd_t * lookat1 = matd_create_data(3,1, state->lookat1); zarray_add(fp, &lookat1); matd_t * up1 = matd_create_data(3,1, state->up1); zarray_add(fp, &up1); double dist0 = matd_vec_dist(eye0, lookat0); double dist1 = matd_vec_dist(eye1, lookat1); matd_t * dist = matd_create_scalar(dist0*alpha0 + dist1*alpha1); zarray_add(fp, &dist); matd_t * eye2p = matd_subtract(eye,lookat); zarray_add(fp, &eye2p); eye2p = matd_vec_normalize(eye2p); zarray_add(fp, &eye2p); eye = matd_op("M + (M*M)", lookat, eye2p, dist); // Only modified eye memcpy(p->eye, eye->data, 3*sizeof(double)); zarray_vmap(fp, matd_destroy); zarray_destroy(fp); } } // Need to do more fixup depending on interface mode! { if (state->interface_mode <= 2.0) { // stack eye on lookat: p->eye[0] = p->lookat[0]; p->eye[1] = p->lookat[1]; p->lookat[2] = 0; // skip fabs() for ENU/NED compat //p->eye[2] = fabs(p->eye[2]); { matd_t * up = matd_create_data(3,1, p->up); up->data[2] = 0; // up should never point in Z matd_t * up_norm = matd_vec_normalize(up); memcpy(p->up, up_norm->data, sizeof(double)*3); matd_destroy(up); matd_destroy(up_norm); } } else if (state->interface_mode == 2.5) { zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t * eye = matd_create_data(3,1, p->eye); zarray_add(fp, &eye); matd_t * lookat = matd_create_data(3,1, p->lookat); zarray_add(fp, &lookat); matd_t * up = matd_create_data(3,1, p->up); zarray_add(fp, &up); lookat->data[2] = 0.0; // Level horizon matd_t * dir = matd_subtract(lookat, eye); zarray_add(fp, &dir); matd_t * dir_norm = matd_vec_normalize(dir); zarray_add(fp, &dir_norm); matd_t * left = matd_crossproduct(up, dir_norm); zarray_add(fp, &left); left->data[2] = 0.0; left = matd_vec_normalize(left); zarray_add(fp, &left); // Don't allow upside down //up->data[2] = fmax(0.0, up->data[2]); // XXX NED? // Find an 'up' direction perpendicular to left matd_t * dot_scalar = matd_create_scalar(matd_vec_dot_product(up, left)); zarray_add(fp, &dot_scalar); up = matd_op("M - (M*M)", up, left, dot_scalar); zarray_add(fp, &up); up = matd_vec_normalize(up); zarray_add(fp, &up); // Now find eye position by computing new lookat dir matd_t * eye_dir = matd_crossproduct(up, left); zarray_add(fp, &eye_dir); matd_t *eye_dist_scalar = matd_create_scalar(matd_vec_dist(eye, lookat)); zarray_add(fp, &eye_dist_scalar); eye = matd_op("M + (M*M)", lookat, eye_dir, eye_dist_scalar); zarray_add(fp, &eye); // export results back to p: memcpy(p->eye, eye->data, sizeof(double)*3); memcpy(p->lookat, lookat->data, sizeof(double)*3); memcpy(p->up, up->data, sizeof(double)*3); zarray_vmap(fp, matd_destroy); zarray_destroy(fp); } } // Fix up for bad zoom if (1) { matd_t * eye = matd_create_data(3,1, p->eye); matd_t * lookat = matd_create_data(3,1, p->lookat); matd_t * up = matd_create_data(3,1, p->up); matd_t * lookeye = matd_subtract(lookat, eye); matd_t * lookdir = matd_vec_normalize(lookeye); double dist = matd_vec_dist(eye, lookat); dist = fmin(state->zclip_far / 3.0, dist); dist = fmax(state->zclip_near * 3.0, dist); matd_scale_inplace(lookdir, dist); matd_t * eye_fixed = matd_subtract(lookat, lookdir); memcpy(p->eye, eye_fixed->data, sizeof(double)*3); matd_destroy(eye); matd_destroy(lookat); matd_destroy(up); matd_destroy(lookeye); matd_destroy(lookdir); matd_destroy(eye_fixed); } // copy the result back into 'state' { memcpy(state->eye0, p->eye, 3*sizeof(double)); memcpy(state->up0, p->up, 3*sizeof(double)); memcpy(state->lookat0, p->lookat, 3*sizeof(double)); state->perspectiveness0 = p->perspectiveness; state->mtime0 = mtime; } return p; }
static void wav_group_destroy(wav_group_t * wgrp) { zarray_destroy(wgrp->group_idx); free(wgrp); }
void hsv_find_balls_blob_detector(image_u32_t* im, frame_t frame, metrics_t met, zarray_t* blobs_out) { assert(frame.xy0.x < frame.xy1.x && frame.xy0.y < frame.xy1.y); assert(frame.xy0.x >= 0 && frame.xy0.y >= 0 && frame.xy1.x < im->width && frame.xy1.y < im->height); assert(frame.ex0.x < frame.ex1.x && frame.ex0.y < frame.ex1.y); assert(frame.ex0.x >= 0 && frame.ex0.y >= 0 && frame.ex1.x < im->width && frame.ex1.y < im->height); // Int to node zhash_t* node_map = zhash_create(sizeof(uint32_t), sizeof(node_t*), zhash_uint32_hash, zhash_uint32_equals); for(int i = frame.xy0.y; i < frame.xy1.y; i++) { for(int j = frame.xy0.x; j < frame.xy1.x; j++) { if((i < frame.ex0.y || i > frame.ex1.y) || (j < frame.ex0.x || j > frame.ex1.x)) { uint32_t idx_im = i * im->stride + j; // Indframe.ex relative to image // Pixel color data uint32_t abgr = im->buf[idx_im]; hsv_t hsv = {0,0,0}; rgb_to_hsv(abgr, &hsv); hsv_t error = {fabs(hsv.hue - met.hsv.hue), fabs(hsv.sat - met.hsv.sat), fabs(hsv.val - met.hsv.val)}; // 'Acceptable' if((error.hue < met.error.hue) && (error.sat < met.error.sat) && (error.val < met.error.val)) { // Create new node, set itself up as a parent node_t* n = calloc(1, sizeof(node_t)); n->id = idx_im; n->parent_id = idx_im; n->parent_node = n; n->num_children = 0; node_t* tmp_node; uint32_t tmp_idx; // Add node to node map if(zhash_put(node_map, &idx_im, &n, &tmp_idx, &tmp_node)==1) { assert(0); } //Check if apart of another blob, or starting a new blob // if apart of another, point to the parent, if a new blob, point to self //Check neighbours if(!met.lines) { // only check this if don't want lines for tape detection if(j > frame.xy0.x) { tmp_idx = idx_im - 1; // is Left neighbour similar color if(zhash_get(node_map, &tmp_idx, &tmp_node) == 1) { node_t* neighbour = tmp_node; connect(n, neighbour); } } } if(i > frame.xy0.y) { tmp_idx = idx_im - im->stride; // is Bottom neighbor similar color if(tmp_idx > 0 && zhash_get(node_map, &tmp_idx, &tmp_node) == 1) { node_t* neighbour = tmp_node; connect(neighbour,n); } } } } } } //count number of children for each parent, go through node_map // if a node is not a parent, add 1 to it's parent->num_children and delete from hash // if is a parent do nothing zarray_t* vals = zhash_values(node_map); for(int i = 0; i < zarray_size(vals); i++) { node_t* node; zarray_get(vals, i, &node); resolve_r(node); if(node->parent_id != node->id) { node->parent_node->num_children++; // key should exist, if it doesn't find out why assert(zhash_remove(node_map, &node->id, NULL, NULL)); } } // search parent only hash and add to blobs out conditionally vals = zhash_values(node_map); for(int i = 0; i < zarray_size(vals); i++) { node_t* node; zarray_get(vals, i, &node); if(node->num_children > met.min_size) { loc_t pos; pos.x = node->parent_id%im->stride; pos.y = node->parent_id/im->stride; zarray_add(blobs_out, &pos); // printf("parent %d\n", node->id); } } zarray_destroy(vals); zhash_vmap_values(node_map, free); zhash_destroy(node_map); }
// this loop tries to run at X fps, and issue render commands static void * render_loop(void * foo) { state_t * state = foo; if (verbose)printf("Starting render thread!\n"); uint64_t render_count = 0; uint64_t last_mtime = vx_mtime(); double avgDT = 1.0f/state->target_frame_rate; uint64_t avg_loop_us = 3000; // initial render time guess while (state->rendering) { int64_t sleeptime = (1000000 / state->target_frame_rate) - (int64_t) avg_loop_us; if (sleeptime > 0) usleep(sleeptime); // XXX fix to include render time // Diagnostic tracking uint64_t mtime_start = vx_mtime(); // XXX avgDT = avgDT*.9 + .1 * (mtime_start - last_mtime)/1000; last_mtime = mtime_start; render_count++; if (verbose) { if (render_count % 100 == 0) printf("Average render DT = %.3f FPS = %.3f avgloopus %"PRIu64" sleeptime = %"PRIi64"\n", avgDT, 1.0/avgDT, avg_loop_us, sleeptime); } // prep the render data render_buffer_t rbuf; rbuf.state = state; rbuf.width = gtku_image_pane_get_width(state->imagePane); rbuf.height = gtku_image_pane_get_height(state->imagePane); if (rbuf.width == 0 && rbuf.height == 0) continue; // if the viewport is 0,0 // smartly reuse, or reallocate the output pixel buffer when resizing occurs GdkPixbuf * pixbuf = state->pixbufs[state->cur_pb_idx]; if (pixbuf == NULL || gdk_pixbuf_get_width(pixbuf) != rbuf.width || gdk_pixbuf_get_height(pixbuf) != rbuf.height) { if (pixbuf != NULL) { g_object_unref(pixbuf); free(state->pixdatas[state->cur_pb_idx]); } state->pixdatas[state->cur_pb_idx] = malloc(rbuf.width*rbuf.height*3); // can't stack allocate, can be too big (retina) pixbuf = gdk_pixbuf_new_from_data(state->pixdatas[state->cur_pb_idx], GDK_COLORSPACE_RGB, FALSE, 8, rbuf.width, rbuf.height, rbuf.width*3, NULL, NULL); // no destructor fn for pix data, handle manually state->pixbufs[state->cur_pb_idx] = pixbuf; } // second half of init: rbuf.out_buf = gdk_pixbuf_get_pixels(pixbuf); rbuf.format = GL_RGB; rbuf.rendered = 0; // 1 compute all the viewports render_info_t * rinfo = render_info_create(); rinfo->viewport[0] = rinfo->viewport[1] = 0; rinfo->viewport[2] = rbuf.width; rinfo->viewport[3] = rbuf.height; { zhash_iterator_t itr; uint32_t layer_id = 0; layer_info_t * linfo = NULL; zhash_iterator_init(state->layer_info_map, &itr); while(zhash_iterator_next(&itr, &layer_id, &linfo)){ zarray_add(rinfo->layers, &linfo); } zarray_sort(rinfo->layers, zvx_layer_info_compare); } zarray_t * fp = zarray_create(sizeof(matd_t*)); matd_t *mm = matd_create(4,4); zarray_add(fp, &mm); matd_t *pm = matd_create(4,4); zarray_add(fp, &pm); pthread_mutex_lock(&state->mutex); for (int i = 0; i < zarray_size(rinfo->layers); i++) { layer_info_t *linfo = NULL; zarray_get(rinfo->layers, i, &linfo); int * viewport = vx_viewport_mgr_get_pos(linfo->vp_mgr, rinfo->viewport, mtime_start); vx_camera_pos_t *pos = default_cam_mgr_get_cam_pos(linfo->cam_mgr, viewport, mtime_start); // store viewport, pos zhash_put(rinfo->layer_positions, &linfo->layer_id, &viewport, NULL, NULL); zhash_put(rinfo->camera_positions, &linfo->layer_id, &pos, NULL, NULL); // feed the actual camera/projection matrix to the gl side vx_camera_pos_model_matrix(pos,mm->data); vx_camera_pos_projection_matrix(pos, pm->data); matd_t * pmmm = matd_multiply(pm,mm); zarray_add(fp, &pmmm); float pm16[16]; vx_util_copy_floats(pmmm->data, pm16, 16); float eye3[16]; vx_util_copy_floats(pos->eye, eye3, 3); vx_gl_renderer_set_layer_render_details(state->glrend, linfo->layer_id, viewport, pm16, eye3); } // 2 Render the data task_thread_schedule_blocking(gl_thread, render_task, &rbuf); render_info_t * old = state->last_render_info; state->last_render_info = rinfo; pthread_mutex_unlock(&state->mutex); // 3 if a render occurred, then swap gtk buffers if (rbuf.rendered) { // point to the correct buffer for the next render: state->cur_pb_idx = (state->cur_pb_idx +1 ) % 2; // flip y coordinate in place: vx_util_flipy(rbuf.width*3, rbuf.height, rbuf.out_buf); // swap the image's backing buffer g_object_ref(pixbuf); // XXX Since gtku always unrefs with each of these calls, increment accordingly gtku_image_pane_set_buffer(state->imagePane, pixbuf); } // 3.1 If a movie is in progress, also need to serialize the frame pthread_mutex_lock(&state->movie_mutex); if (state->movie_file != NULL) { int last_idx = (state->cur_pb_idx + 1) % 2; GdkPixbuf * pb = state->pixbufs[last_idx]; movie_frame_t * movie_img = calloc(1, sizeof(movie_frame_t)); movie_img->mtime = mtime_start; movie_img->width = gdk_pixbuf_get_width(pb); movie_img->height = gdk_pixbuf_get_height(pb); movie_img->stride = 3*movie_img->width; movie_img->buf = malloc(movie_img->stride*movie_img->height); memcpy(movie_img->buf, state->pixdatas[last_idx], movie_img->stride*movie_img->height); // Alloc in this thread, dealloc in movie thread zarray_add(state->movie_pending, & movie_img); pthread_cond_signal(&state->movie_cond); } pthread_mutex_unlock(&state->movie_mutex); // cleanup if (old) render_info_destroy(old); zarray_vmap(fp, matd_destroy); zarray_destroy(fp); uint64_t mtime_end = vx_mtime(); avg_loop_us = (uint64_t)(.5*avg_loop_us + .5 * 1000 * (mtime_end - mtime_start)); } if (verbose) printf("Render thread exiting\n"); pthread_exit(NULL); }
zarray_t *apriltag_quad_thresh(apriltag_detector_t *td, image_u8_t *im) { //////////////////////////////////////////////////////// // step 1. threshold the image, creating the edge image. int w = im->width, h = im->height, s = im->stride; image_u8_t *threshim = threshold(td, im); assert(threshim->stride == s); image_u8_t *edgeim = image_u8_create(w, h); if (1) { image_u8_t *sumim = image_u8_create(w, h); // apply a horizontal sum kernel of width 3 for (int y = 0; y < h; y++) { for (int x = 1; x+1 < w; x++) { sumim->buf[y*s + x] = threshim->buf[y*s + x - 1] + threshim->buf[y*s + x + 0] + threshim->buf[y*s + x + 1]; } } timeprofile_stamp(td->tp, "sumim"); // deglitch if (td->qtp.deglitch) { for (int y = 1; y+1 < h; y++) { for (int x = 1; x+1 < w; x++) { // edge: black pixel next to white pixel if (threshim->buf[y*s + x] == 0 && sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] == 8) { threshim->buf[y*s + x] = 1; sumim->buf[y*s + x - 1]++; sumim->buf[y*s + x + 0]++; sumim->buf[y*s + x + 1]++; } if (threshim->buf[y*s + x] == 1 && sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] == 1) { threshim->buf[y*s + x] = 0; sumim->buf[y*s + x - 1]--; sumim->buf[y*s + x + 0]--; sumim->buf[y*s + x + 1]--; } } } timeprofile_stamp(td->tp, "deglitch"); } // apply a vertical sum kernel of width 3; check if any // over-threshold pixels are adjacent to an under-threshold // pixel. // // There are two types of edges: white pixels neighboring a // black pixel, and black pixels neighboring a white pixel. We // label these separately. (Values 0xc0 and 0x3f are picked // such that they add to 255 (see below) and so that they can be // viewed as pixel intensities for visualization purposes.) // // symmetry of detection. We don't want to use JUST "black // near white" (or JUST "white near black"), because that // biases the detection towards one side of the edge. This // measurably reduces detection performance. // // On large tags, we could treat "neighbor" pixels the same // way. But on very small tags, there may be other edges very // near the tag edge. Since each of these edges is effectively // two pixels thick (the white pixel near the black pixel, and // the black pixel near the white pixel), it becomes likely // that these two nearby edges will actually touch. // // A partial solution to this problem is to define edges to be // adjacent white-near-black and black-near-white pixels. // for (int y = 1; y+1 < h; y++) { for (int x = 1; x+1 < w; x++) { if (threshim->buf[y*s + x] == 0) { // edge: black pixel next to white pixel if (sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] > 0) edgeim->buf[y*s + x] = 0xc0; } else { // edge: white pixel next to black pixel when both // edge types are on, we get less bias towards one // side of the edge. if (sumim->buf[y*s + x - s] + sumim->buf[y*s + x] + sumim->buf[y*s + x + s] < 9) edgeim->buf[y*s + x] = 0x3f; } } } if (td->debug) { for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { threshim->buf[y*s + x] *= 255; } } image_u8_write_pnm(threshim, "debug_threshold.pnm"); image_u8_write_pnm(edgeim, "debug_edge.pnm"); // image_u8_destroy(edgeim2); } image_u8_destroy(threshim); image_u8_destroy(sumim); } timeprofile_stamp(td->tp, "edges"); //////////////////////////////////////////////////////// // step 2. find connected components. unionfind_t *uf = unionfind_create(w * h); for (int y = 1; y < h - 1; y++) { for (int x = 1; x < w -1; x++) { uint8_t v = edgeim->buf[y*s + x]; if (v==0) continue; // (dx,dy) pairs for 8 connectivity: // (REFERENCE) (1, 0) // (-1, 1) (0, 1) (1, 1) // // i.e., the minimum value of dx should be: // y=0: 1 // y=1: -1 for (int dy = 0; dy <= 1; dy++) { for (int dx = 1-2*dy; dx <= 1; dx++) { if (edgeim->buf[(y+dy)*s + (x+dx)] == v) { unionfind_connect(uf, y*w + x, (y+dy)*w + x + dx); } } } } } timeprofile_stamp(td->tp, "unionfind"); zhash_t *clustermap = zhash_create(sizeof(uint64_t), sizeof(zarray_t*), zhash_uint64_hash, zhash_uint64_equals); for (int y = 1; y < h-1; y++) { for (int x = 1; x < w-1; x++) { uint8_t v0 = edgeim->buf[y*s + x]; if (v0 == 0) continue; uint64_t rep0 = unionfind_get_representative(uf, y*w + x); // 8 connectivity. (4 neighbors to check). // for (int dy = 0; dy <= 1; dy++) { // for (int dx = 1-2*dy; dx <= 1; dx++) { // 4 connectivity. (2 neighbors to check) for (int n = 1; n <= 2; n++) { int dy = n & 1; int dx = (n & 2) >> 1; uint8_t v1 = edgeim->buf[(y+dy)*s + x + dx]; if (v0 + v1 != 255) continue; uint64_t rep1 = unionfind_get_representative(uf, (y+dy)*w + x+dx); uint64_t clusterid; if (rep0 < rep1) clusterid = (rep1 << 32) + rep0; else clusterid = (rep0 << 32) + rep1; zarray_t *cluster = NULL; if (!zhash_get(clustermap, &clusterid, &cluster)) { cluster = zarray_create(sizeof(struct pt)); zhash_put(clustermap, &clusterid, &cluster, NULL, NULL); } // NB: We will add some points multiple times to a // given cluster. I don't know an efficient way to // avoid that here; we remove them later on when we // sort points by pt_compare_theta. if (1) { struct pt p = { .x = x, .y = y}; zarray_add(cluster, &p); } if (1) { struct pt p = { .x = x+dx, .y = y+dy}; zarray_add(cluster, &p); } } } } // make segmentation image. if (td->debug) { image_u8_t *d = image_u8_create(w, h); assert(d->stride == s); uint8_t *colors = (uint8_t*) calloc(w*h, 1); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { uint32_t v = unionfind_get_representative(uf, y*w+x); uint32_t sz = unionfind_get_set_size(uf, y*w+x); if (sz < td->qtp.min_cluster_pixels) continue; uint8_t color = colors[v]; if (color == 0) { const int bias = 20; color = bias + (random() % (255-bias)); colors[v] = color; } float mix = 0.7; mix = 1.0; d->buf[y*d->stride + x] = mix*color + (1-mix)*im->buf[y*im->stride + x]; } } free(colors); image_u8_write_pnm(d, "debug_segmentation.pnm"); image_u8_destroy(d); } timeprofile_stamp(td->tp, "make clusters"); //////////////////////////////////////////////////////// // step 3. process each connected component. zarray_t *clusters = zhash_values(clustermap); zhash_destroy(clustermap); zarray_t *quads = zarray_create(sizeof(struct quad)); int sz = zarray_size(clusters); int chunksize = 1 + sz / (APRILTAG_TASKS_PER_THREAD_TARGET * td->nthreads); struct quad_task tasks[sz / chunksize + 1]; int ntasks = 0; for (int i = 0; i < sz; i += chunksize) { tasks[ntasks].td = td; tasks[ntasks].cidx0 = i; tasks[ntasks].cidx1 = imin(sz, i + chunksize); tasks[ntasks].h = h; tasks[ntasks].w = w; tasks[ntasks].quads = quads; tasks[ntasks].clusters = clusters; tasks[ntasks].im = im; workerpool_add_task(td->wp, do_quad_task, &tasks[ntasks]); ntasks++; } workerpool_run(td->wp); timeprofile_stamp(td->tp, "fit quads to clusters"); if (td->debug) { FILE *f = fopen("debug_lines.ps", "w"); fprintf(f, "%%!PS\n\n"); image_u8_t *im2 = image_u8_copy(im); image_u8_darken(im2); image_u8_darken(im2); // assume letter, which is 612x792 points. double scale = fmin(612.0/im->width, 792.0/im2->height); fprintf(f, "%.15f %.15f scale\n", scale, scale); fprintf(f, "0 %d translate\n", im2->height); fprintf(f, "1 -1 scale\n"); postscript_image(f, im); for (int i = 0; i < zarray_size(quads); i++) { struct quad *q; zarray_get_volatile(quads, i, &q); float rgb[3]; int bias = 100; for (int i = 0; i < 3; i++) rgb[i] = bias + (random() % (255-bias)); fprintf(f, "%f %f %f setrgbcolor\n", rgb[0]/255.0f, rgb[1]/255.0f, rgb[2]/255.0f); fprintf(f, "%.15f %.15f moveto %.15f %.15f lineto %.15f %.15f lineto %.15f %.15f lineto %.15f %.15f lineto stroke\n", q->p[0][0], q->p[0][1], q->p[1][0], q->p[1][1], q->p[2][0], q->p[2][1], q->p[3][0], q->p[3][1], q->p[0][0], q->p[0][1]); } fclose(f); } // printf(" %d %d %d %d\n", indices[0], indices[1], indices[2], indices[3]); /* if (td->debug) { for (int i = 0; i < 4; i++) { int i0 = indices[i]; int i1 = indices[(i+1)&3]; if (i1 < i0) i1 += zarray_size(cluster); for (int j = i0; j <= i1; j++) { struct pt *p; zarray_get_volatile(cluster, j % zarray_size(cluster), &p); edgeim->buf[p->y*edgeim->stride + p->x] = 30+64*i; } } } */ unionfind_destroy(uf); for (int i = 0; i < zarray_size(clusters); i++) { zarray_t *cluster; zarray_get(clusters, i, &cluster); zarray_destroy(cluster); } zarray_destroy(clusters); image_u8_destroy(edgeim); return quads; }
loc_t* fit_lines(image_u32_t* im, node_t* n, vx_buffer_t* buf, metrics_t met, loc_t* out) { // usleep(2000000); srand(time(NULL)); // isolate valid entries zarray_t* loc_arr = zarray_create(sizeof(loc_t*)); for(int i = 0; i < im->height; i++) { if(n->sides[i].leftmost.x == im->width) continue; // not apart of blob loc_t* loc = malloc(sizeof(loc_t)); loc->x = n->sides[i].leftmost.x; loc->y = n->sides[i].leftmost.y; loc->valid = 0; zarray_add(loc_arr, &loc); } for(int i = 0; i < im->height; i++) { if(n->sides[i].rightmost.x == -1) continue; loc_t* loc = malloc(sizeof(loc_t)); loc->x = n->sides[i].rightmost.x; loc->y = n->sides[i].rightmost.y; loc->valid = 0; zarray_add(loc_arr, &loc); } // printf("\n\nall\n"); // for(int i = 0; i < zarray_size(loc_arr); i++) // { // loc_t* p1; // zarray_get(loc_arr, i, &p1); // printf("(%d, %d)\n", p1->x, p1->y); // } // printf("\n\n"); int iterations = 0; int best_score = 0; int lines_found = 0; loc_t line_match[8]; int max_iterations = 500; while(lines_found < 2 && zarray_size(loc_arr) > met.num_outliers) // still a lot of points left { if(iterations > max_iterations) break; // reset image and array // vx_object_t *vim = vxo_image_from_u32(im, 0, 0); // vx_buffer_add_back(buf, vxo_pix_coords(VX_ORIGIN_BOTTOM_LEFT, vim)); add_arr_of_locs_to_buffer(loc_arr, buf, 1.0, vx_black, met); int num_outliers = met.num_outliers/met.consensus_accuracy; while(best_score < ((zarray_size(loc_arr) - num_outliers)/(4 - lines_found))) { reset_locs(loc_arr); // pick random sample (2 points) loc_t* p1; loc_t* p2; zarray_get(loc_arr, (rand()%zarray_size(loc_arr)), &p1); p2 = p1; while(p1 == p2) zarray_get(loc_arr, (rand()%zarray_size(loc_arr)), &p2); // don't get same point // find consensus score of line from other points // if inside consensus range add 1 int tmp_score = 0; for(int j = 0; j < zarray_size(loc_arr); j++) { loc_t* tmp; zarray_get(loc_arr, j, &tmp); if(fabs(dist_from_point_to_line(p1, p2, tmp)) < (double)met.consensus_accuracy) { tmp->valid = 1; // printf("dist: (%d, %d, %d) %lf\n", tmp->x, tmp->y, tmp->valid, // fabs(dist_from_point_to_line(p1, p2, tmp))); tmp_score++; } } // keep best line so far if(tmp_score > best_score) { if(lines_found != 0) { // if 2nd line intersects, throw it out and find another loc_t intersect = get_line_intersection(line_match[0], line_match[1], *p1, *p2); if(in_range(im, intersect.x, intersect.y)) continue; } best_score = tmp_score; // printf(" score:%d, %d, %d %lf\n", best_score, ((zarray_size(loc_arr)-1)/(4-lines_found)), // zarray_size(loc_arr), 10/met.std_dev_from_square); line_match[lines_found*2] = *p1; line_match[lines_found*2+1] = *p2; } iterations++; if(iterations > max_iterations) break; } // loc_t ext_lines[2]; // extend_lines_to_edge_of_image(im, line_match[lines_found*2], line_match[lines_found*2+1], ext_lines); // add_line_to_buffer(im, buf, 2.0, ext_lines[0], ext_lines[1], vx_yellow); // delete all points associated with the found line zarray_t* endpoints_arr = zarray_create(sizeof(loc_t*)); for(int i = 0; i < zarray_size(loc_arr); i++) { loc_t* tmp; zarray_get(loc_arr, i, &tmp); // printf("removed: (%d, %d, %d) \n", tmp->x, tmp->y, tmp->valid); if(tmp->valid) { // add_circle_to_buffer(buf, 1.0, *tmp, vx_red); zarray_add(endpoints_arr, &tmp); zarray_remove_index(loc_arr, i, 0); i--; } } // find endpoints of line loc_t ext_lines[2]; extend_lines_to_edge_of_image(im, line_match[lines_found*2], line_match[lines_found*2+1], ext_lines); line_t endpoints = find_line_endpoints(endpoints_arr, &ext_lines[0], &ext_lines[1]); add_circle_to_buffer(buf, 2.0, endpoints.start, vx_red); add_circle_to_buffer(buf, 2.0, endpoints.end, vx_red); line_match[lines_found*2] = endpoints.start; line_match[lines_found*2+1] = endpoints.end; lines_found++; best_score = 0; // vx_buffer_swap(buf); // usleep(500000); } loc_t* ret = calloc(lines_found*2, sizeof(loc_t)); for(int i = 0; i < lines_found; i++) { ret[i*2] = line_match[i*2]; ret[i*2+1] = line_match[i*2+1]; loc_t ext_lines[2]; extend_lines_to_edge_of_image(im, line_match[i*2], line_match[i*2+1], ext_lines); add_line_to_buffer(im, buf, 2.0, ext_lines[0], ext_lines[1], vx_blue); } zarray_vmap(loc_arr, free); zarray_destroy(loc_arr); return(ret); // int corners_found = 0; // loc_t corners[4]; // find_corners_from_lines(im,line_match, 4, &corners_found, corners); // for(int i = 0; i < corners_found; i++) { // printf("(%d, %d)\n", corners[i].x, corners[i].y); // add_circle_to_buffer(buf, 3.0, corners[i], vx_blue); // } // printf("(%d, %d) (%d, %d) %lf\n", line_match[0].x, line_match[0].y, // line_match[1].x, line_match[1].y, // (double)best_score/N); }
double* getTag(char* path) { apriltag_family_t *tf = NULL; tf = tag36h11_create(); tf->black_border = 1; apriltag_detector_t *td = apriltag_detector_create(); apriltag_detector_add_family(td, tf); td->quad_decimate = 1.0; td->quad_sigma = 0.0; td->nthreads = 4; td->debug = 0; td->refine_decode = 0; td->refine_pose = 0; int quiet = 0; int maxiters = 1; const int hamm_hist_max = 10; int hamm_hist[hamm_hist_max]; memset(hamm_hist, 0, sizeof(hamm_hist)); image_u8_t *im = image_u8_create_from_pnm(path); if (im == NULL) { printf("couldn't find %s\n", path); return NULL; } zarray_t *detections = apriltag_detector_detect(td, im); nrows = zarray_size(detections); ncols = 9; if (nrows == 0) return NULL; double* output_matrix = new double[nrows*ncols]; for (int i = 0; i < zarray_size(detections); i++) { apriltag_detection_t *det; zarray_get(detections, i, &det); output_matrix[ncols*i+0] = det->id; for(int j=0; j<4; j++) { output_matrix[ncols*i+ 2*j +1] = det->p[j][0]; output_matrix[ncols*i+ 2*j +2] = det->p[j][1]; } hamm_hist[det->hamming]++; apriltag_detection_destroy(det); } zarray_destroy(detections); image_u8_destroy(im); // don't deallocate contents of inputs; those are the argv apriltag_detector_destroy(td); tag36h11_destroy(tf); return output_matrix; }
int main(){ bool showGradient = true; bool found = false; VideoCapture cap(0); // open the default camera Size size(854,480); // size of desired frame origionally 1280x720, 1024x576, 854x480 if(!cap.isOpened()) // check if camera opened return -1; Mat frame; Mat src; /* From apriltag_demo.c */ int maxiters = 5; const int hamm_hist_max = 10; int quiet = 0; apriltag_family_t *tf = tag36h11_create(); // Apriltag family 36h11, can change tf->black_border = 1; // Set tag family border size apriltag_detector_t *td = apriltag_detector_create(); // Apriltag detector apriltag_detector_add_family(td, tf); // Add apriltag family td->quad_decimate = 1.0; // Decimate input image by factor td->quad_sigma = 0.0; // No blur (I think) td->nthreads = 4; // 4 treads provided td->debug = 0; // No debuging output td->refine_decode = 0; // Don't refine decode td->refine_pose = 0; // Don't refine pose // Output variables char imgSize[20]; char renderTime[20]; char detectString[50]; char convertTime[50]; char displayString[120]; char outputString[120]; char locationString[120]; double time_taken = 0.0; long double totalFPS = 0.0; double count = 0.0; /* End of apriltag_demo.c */ while(1){ clock_t t; t = clock(); cap >> src; // Get a new frame from camera if(found){ resize(src,frame,size); } // Resize to smaller image if tag found else{ frame = src; } // Keep standard image if no tag if(showGradient){ cvtColor(src, frame, CV_BGR2GRAY); cvtColor(frame, frame, CV_GRAY2RGB); src = gradientEdges(frame); // Show gradient for fun }else{ cvtColor(src, src, CV_BGR2GRAY); } pnm_t *pnm = mat2pnm(&frame); // Convert Mat fram to pnm image_u8_t *im = pnm_to_image_u8(pnm); // Convert pnm to gray image_u8 if (im == NULL) { // Error - no image created from pnm std::cout << "Error, not a proper pnm" << std::endl; return -1; } /*** Start from origional Apriltags from apriltag_demo.c ***/ int hamm_hist[hamm_hist_max]; memset(hamm_hist, 0, sizeof(hamm_hist)); zarray_t *detections = apriltag_detector_detect(td, im); for (int i = 0; i < zarray_size(detections); i++) { apriltag_detection_t *det; zarray_get(detections, i, &det); sprintf(locationString, "Tag Center: (%f,%f)", det->c[0], det->c[1]); sprintf(detectString, "detection %2d: id (%2dx%2d)-%-4d, hamming %d, goodness %5.3f, margin %5.3f\n", i+1, det->family->d*det->family->d, det->family->h, det->id, det->hamming, det->goodness, det->decision_margin); hamm_hist[det->hamming]++; // draws a vertical rectangle around tag, not ideal, but easy to implement // det->p[corner][positon], counter clockwise Point pt1 = Point(det->p[0][0], det->p[0][1]); Point pt2 = Point(det->p[2][0], det->p[2][1]); cv::rectangle(src, pt1, pt2, cvScalar(102,255,0)); apriltag_detection_destroy(det); } if(zarray_size(detections) < 1){ found = false; sprintf(detectString, "No tag detected"); sprintf(locationString, "No tag detected"); }else{ found = false; } zarray_destroy(detections); image_u8_destroy(im); t = clock() - t; double time_taken = ((double)t)/(CLOCKS_PER_SEC/1000); //printf("ms to render: %5.3f\n", time_taken); if (!quiet) { //timeprofile_display(td->tp); totalFPS += (1000.0/time_taken); count += 1.0; if(count > 30000.0){ totalFPS = 0.0; count = 0.0; } sprintf(displayString, "fps: %2.2Lf, nquads: %d",totalFPS/count, td->nquads); //std::cout << displayString; } //for (int i = 0; i < hamm_hist_max; i++) //printf("%5d", hamm_hist[i]); sprintf(renderTime, "Render: %5.3fms", time_taken); sprintf(imgSize, "%dx%d", frame.cols, frame.rows); sprintf(outputString, "%s %s %s", renderTime, convertTime, imgSize); printf("%s %s\r", detectString, outputString); if (quiet) { printf("%12.3f", timeprofile_total_utime(td->tp) / 1.0E3); } printf("\n"); /*** End of origional Apriltags from apriltag_demo.c ***/ // displays fps, edges, segments, quads putText(src, displayString, cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); // displays render time, convert time, and image size putText(src, outputString, cvPoint(30,50), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); // Displays any detections (if any) putText(src, detectString, cvPoint(30,70), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); // Displays tag location (if any) putText(src, locationString, cvPoint(30,90), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(150,150,250), 1, CV_AA); imshow("Display Apriltags", src); if(waitKey(30) >= 0) break; } /* deallocate apriltag constructs */ apriltag_detector_destroy(td); tag36h11_destroy(tf); return 0; }
vx_object_t * vxo_objmtl(const char * obj_filename) { int texture_flag = 0; FILE * fp_obj = fopen(obj_filename, "r"); if (fp_obj == NULL) return NULL; #define LNSZ 1024 char line_buffer[LNSZ]; // Store 3D vertices by value zarray_t * vertices = zarray_create(sizeof(float)*3); zarray_t * textures = zarray_create(sizeof(float)*3); zarray_t * normals = zarray_create(sizeof(float)*3); wav_group_t * cur_group = NULL; zarray_t * group_list = zarray_create(sizeof(wav_group_t*)); zhash_t * mtl_map = NULL; // created on reading mtllib entry //zhash_t * obj_map = zhash_create(sizeof(char*), sizeof(vx_object_t*), zhash_str_hash, zhash_str_equals); // Read in the entire file, save vertices, and indices for later processing. while (1) { int eof = fgets(line_buffer, LNSZ, fp_obj) == NULL; char * line = str_trim(line_buffer); // If possible, batch process the last group if (str_starts_with(line, "g ") || eof) { if (cur_group != NULL) { assert(cur_group->group_idx != NULL); zarray_add(group_list, &cur_group); cur_group = NULL; } } if (eof) break; if (str_starts_with(line, "#") || strlen(line) == 0 || !strcmp(line,"\r")) continue; if (str_starts_with(line, "g ")) { assert(mtl_map != NULL); char obj_name[LNSZ]; sscanf(line, "g %s", obj_name); cur_group = calloc(1, sizeof(wav_group_t)); cur_group->group_idx = zarray_create(sizeof(tri_idx_t)); } else if (str_starts_with(line, "v ")) { float vertex[3]; sscanf(line, "v %f %f %f", &vertex[0], &vertex[1], &vertex[2]); zarray_add(vertices, &vertex); } else if (str_starts_with(line, "vn ")) { float normal[3]; sscanf(line, "vn %f %f %f", &normal[0], &normal[1], &normal[2]); zarray_add(normals, &normal); } else if (str_starts_with(line, "vt ")) { texture_flag = 1; float texture[3]; sscanf(line, "vt %f %f %f", &texture[0], &texture[1], &texture[2]); zarray_add(textures, &texture); } else if (str_starts_with(line, "f ")) { tri_idx_t idxs; if (texture_flag) { sscanf(line, "f %d/%d/%d %d/%d/%d %d/%d/%d", &idxs.vIdxs[0], &idxs.tIdxs[0], &idxs.nIdxs[0], &idxs.vIdxs[1], &idxs.tIdxs[1], &idxs.nIdxs[1], &idxs.vIdxs[2], &idxs.tIdxs[2], &idxs.nIdxs[2]); } else { sscanf(line, "f %d//%d %d//%d %d//%d", &idxs.vIdxs[0], &idxs.nIdxs[0], &idxs.vIdxs[1], &idxs.nIdxs[1], &idxs.vIdxs[2], &idxs.nIdxs[2]); } zarray_add(cur_group->group_idx, &idxs); } else if (str_starts_with(line, "usemtl ")) { char *mname = calloc(1, sizeof(char)*1024); sscanf(line, "usemtl %s", mname); zhash_get(mtl_map, &mname, &cur_group->material); free(mname); } else if (str_starts_with(line, "s ")) { // No idea what to do with smoothing instructions } else if (str_starts_with(line, "mtllib ")) { char * cur_path = strdup(obj_filename); const char * dir_name = dirname(cur_path); char mtl_basename[LNSZ]; sscanf(line, "mtllib %s", mtl_basename); char mtl_filename[LNSZ]; sprintf(mtl_filename,"%s/%s", dir_name, mtl_basename); mtl_map = load_materials(mtl_filename); if (mtl_map == NULL) { zarray_destroy(vertices); zarray_destroy(normals); return NULL; // XXX cleanup! } free(cur_path); } else { printf("Did not parse: %s\n", line); for (int i = 0; i < strlen(line); i++) { printf("0x%x ", (int)line[i]); } printf("\n"); } } if (1) // useful to enable when compensating for model scale print_bounds(vertices); // Process the model sections in two passes -- first add all the // objects which are not transparent. Then render transparent // objects after vx_object_t * vchain = vxo_chain_create(); zarray_t * sorted_groups = zarray_create(sizeof(wav_group_t*)); for (int i = 0, sz = zarray_size(group_list); i < sz; i++) { wav_group_t * group = NULL; zarray_get(group_list, i, &group); // add to front if solid if (group->material.d == 1.0f) { zarray_insert(sorted_groups, 0, &group); } else { // add to back if transparent zarray_add(sorted_groups, &group); } } int total_triangles = 0; for (int i = 0, sz = zarray_size(sorted_groups); i < sz; i++) { wav_group_t * group = NULL; zarray_get(sorted_groups, i, &group); int ntri = zarray_size(group->group_idx); vx_resc_t * vert_resc = vx_resc_createf(ntri*9); vx_resc_t * norm_resc = vx_resc_createf(ntri*9); for (int j = 0; j < ntri; j++) { tri_idx_t idxs; zarray_get(group->group_idx, j, &idxs); for (int i = 0; i < 3; i++) { zarray_get(vertices, idxs.vIdxs[i]-1, &((float*)vert_resc->res)[9*j + i*3]); zarray_get(normals, idxs.nIdxs[i]-1, &((float*)norm_resc->res)[9*j + i*3]); } } vx_style_t * sty = vxo_mesh_style_fancy(group->material.Ka, group->material.Kd, group->material.Ks, group->material.d, group->material.Ns, group->material.illum); vxo_chain_add(vchain, vxo_mesh(vert_resc, ntri*3, norm_resc, GL_TRIANGLES, sty)); total_triangles += ntri; } //Cleanup: // 1. Materials, names are by reference, but materials are by value zhash_vmap_keys(mtl_map, free); zhash_destroy(mtl_map); // 2. Geometry zarray_destroy(vertices); // stored by value zarray_destroy(normals); // stored by value // 2b wav_group_t are stored by reference zarray_vmap(group_list, wav_group_destroy); zarray_destroy(group_list); zarray_destroy(sorted_groups); // duplicate list, so don't need to free fclose(fp_obj); return vchain; }
int main(int argc, char *argv[]) { april_tag_family_t *tf = tag36h11_create(); april_tag_detector_t *td = april_tag_detector_create(tf); td->small_tag_refinement = 0; int maxiters = 1; zarray_t *inputs = zarray_create(sizeof(char*)); int waitsec = 0; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-d")) td->debug = 1; else if (!strcmp(argv[i], "-t")) td->nthreads = atoi(argv[++i]); else if (!strcmp(argv[i], "-f")) td->seg_decimate = (i+1 < argc && isdigit(argv[i+1][0])) ? atoi(argv[++i]) : 2; else if (!strcmp(argv[i], "-i")) maxiters = atoi(argv[++i]); else if (!strcmp(argv[i], "-r")) td->small_tag_refinement = 1; else if (!strcmp(argv[i], "-w")) waitsec = atoi(argv[++i]); else if (!strcmp(argv[i], "-b")) td->seg_sigma = atof(argv[++i]); /* else if (!strcmp(argv[i], "--family")) { char *fam = argv[++i]; if (!strcmp(fam, "36h11")) td->tag_family = tag36h11_create(); else if (!strcmp(fam, "36h10")) td->tag_family = tag36h10_create(); } */ else zarray_add(inputs, &argv[i]); } for (int iter = 0; iter < maxiters; iter++) { if (maxiters > 1) printf("iter %d / %d\n", iter + 1, maxiters); for (int input = 0; input < zarray_size(inputs); input++) { char *path; zarray_get(inputs, input, &path); printf("loading %s\n", path); image_u8_t *im = image_u8_create_from_pnm(path); if (im == NULL) { printf("couldn't find %s\n", path); continue; } zarray_t *detections = april_tag_detector_detect(td, im); for (int i = 0; i < zarray_size(detections); i++) { april_tag_detection_t *det; zarray_get(detections, i, &det); printf("detection %3d: id %4d, hamming %d, goodness %f\n", i, det->id, det->hamming, det->goodness); april_tag_detection_destroy(det); } zarray_destroy(detections); timeprofile_display(td->tp); printf("nedges: %d, nsegments: %d, nquads: %d\n", td->nedges, td->nsegments, td->nquads); image_u8_destroy(im); if (zarray_size(inputs) > 1 || iter > 0) sleep(waitsec); } } april_tag_detector_destroy(td); tag36h11_destroy(tf); return 0; }
void cam_callback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::CameraInfoConstPtr &cinfo) { // Get camera info static bool init_cam = false; static cv::Mat K = cv::Mat::zeros(cv::Size(3, 3), CV_64F); static cv::Mat D = cv::Mat::zeros(cv::Size(1, 5), CV_64F); // Stop if camera not calibrated if (cinfo->K[0] == 0.0) throw std::runtime_error("Camera not calibrated."); // TODO: convert to function later // Assign camera info only once if (!init_cam) { for (int i = 0; i < 3; ++i) { double *pk = K.ptr<double>(i); for (int j = 0; j < 3; ++j) { pk[j] = cinfo->K[3 * i + j]; } } double *pd = D.ptr<double>(); for (int k = 0; k < 5; k++) { pd[k] = cinfo->D[k]; } init_cam = true; } // use cv_bridge and convert to grayscale image cv_bridge::CvImagePtr cv_ptr; // use toCvCopy because we will modify the image cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::MONO8); cv::Mat image_rgb; cv::cvtColor(cv_ptr->image, image_rgb, CV_GRAY2RGB); #if defined(BUILD_UMICH) // Use apriltag_umich // Currently not using this version static april_tag_family_t *tf = tag36h11_create(); static april_tag_detector_t *td = april_tag_detector_create(tf); image_u8_t *im = image_u8_create_from_gray( cv_ptr->image.cols, cv_ptr->image.rows, cv_ptr->image.data); zarray_t *detections = april_tag_detector_detect(td, im); ROS_INFO("Tags detected: %d", zarray_size(detections)); for (size_t i = 0; i < zarray_size(detections); i++) { april_tag_detection_t *det; zarray_get(detections, i, &det); for (int j = 0; j < 4; j++) { const Point2 p = Point2(det->p[j][0], det->p[j][1]); } april_tag_detection_destroy(det); } zarray_destroy(detections); image_u8_destroy(im); #elif defined(BUILD_MIT) // Use apriltag_mit static AprilTags::TagDetector tag_detector(AprilTags::tagCodes36h11); std::vector<AprilTags::TagDetection> detections = tag_detector.extractTags(cv_ptr->image); // Check detection size, only do work if there's tag detected if (detections.size()) { std::vector<Point2> pi; // Points in image std::vector<Point3> pw; // Points in world for (auto it = detections.begin(); it != detections.end(); it++) { const int id = it->id; const Point2 c2 = Point2(it->cxy.first, it->cxy.second); for (int j = 0; j < 4; j++) { const Point2 p2 = Point2(it->p[j].first, it->p[j].second); pi.push_back(p2); Point3 p3(tagsWorld[id].p[j].x, tagsWorld[id].p[j].y, 0.0); pw.push_back(p3); // Display tag corners cv::circle(image_rgb, p2, 6, colors[j], 2); } // Display tag id std::ostringstream ss; ss << id; auto color = cv::Scalar(0, 255, 255); if (tagsWorld.find(id) != tagsWorld.end()) { color = cv::Scalar(255, 255, 0); } cv::putText(image_rgb, ss.str(), Point2(c2.x - 5, c2.y + 5), cv::FONT_HERSHEY_PLAIN, 2, color, 2); } // Get pose static cv::Mat r = cv::Mat::zeros(cv::Size(1, 3), CV_64F); static cv::Mat cTw = cv::Mat::zeros(cv::Size(1, 3), CV_64F); cv::Mat wTc(cv::Size(3, 3), CV_64F); cv::Mat cRw(cv::Size(3, 3), CV_64F), wRc(cv::Size(3, 3), CV_64F); cv::solvePnP(pw, pi, K, D, r, cTw, false); cv::Rodrigues(r, cRw); wRc = cRw.inv(); wTc = -wRc * cTw; // ROS_INFO("%f, %f, %f", r.at<double>(0,0), r.at<double>(1,0), // r.at<double>(2,0)); cv::Mat q = rodriguesToQuat(r); // Publish geometry_msgs::PoseStamped pose_cam; pose_cam.header.stamp = image->header.stamp; pose_cam.header.frame_id = "0"; double *pt = wTc.ptr<double>(); pose_cam.pose.position.x = pt[0]; pose_cam.pose.position.y = pt[1]; pose_cam.pose.position.z = pt[2]; double *pq = q.ptr<double>(); pose_cam.pose.orientation.w = pq[0]; pose_cam.pose.orientation.x = pq[1]; pose_cam.pose.orientation.y = pq[2]; pose_cam.pose.orientation.z = pq[3]; pose_pub.publish(pose_cam); } #endif // Publish image cv_bridge::CvImage cv_image(image->header, sensor_msgs::image_encodings::BGR8, image_rgb); image_pub.publish(cv_image.toImageMsg()); // cv::imshow("image", image_rgb); // cv::waitKey(1); }
int main(int argc, char *argv[]) { getopt_t *getopt = getopt_create(); getopt_add_bool(getopt, 'h', "help", 0, "Show this help"); getopt_add_bool(getopt, 'd', "debug", 0, "Enable debugging output (slow)"); getopt_add_bool(getopt, 'q', "quiet", 0, "Reduce output"); getopt_add_string(getopt, 'f', "family", "tag36h11", "Tag family to use"); getopt_add_int(getopt, '\0', "border", "1", "Set tag family border size"); getopt_add_int(getopt, 'i', "iters", "1", "Repeat processing on input set this many times"); getopt_add_int(getopt, 't', "threads", "4", "Use this many CPU threads"); getopt_add_double(getopt, 'x', "decimate", "1.0", "Decimate input image by this factor"); getopt_add_double(getopt, 'b', "blur", "0.0", "Apply low-pass blur to input"); getopt_add_bool(getopt, '1', "refine-decode", 0, "Spend more time trying to decode tags"); getopt_add_bool(getopt, '2', "refine-pose", 0, "Spend more time trying to precisely localize tags"); if (!getopt_parse(getopt, argc, argv, 1) || getopt_get_bool(getopt, "help")) { printf("Usage: %s [options] <input files>\n", argv[0]); getopt_do_usage(getopt); exit(0); } const zarray_t *inputs = getopt_get_extra_args(getopt); apriltag_family_t *tf = NULL; const char *famname = getopt_get_string(getopt, "family"); if (!strcmp(famname, "tag36h11")) tf = tag36h11_create(); else if (!strcmp(famname, "tag36h10")) tf = tag36h10_create(); else if (!strcmp(famname, "tag36artoolkit")) tf = tag36artoolkit_create(); else if (!strcmp(famname, "tag25h9")) tf = tag25h9_create(); else if (!strcmp(famname, "tag25h7")) tf = tag25h7_create(); else { printf("Unrecognized tag family name. Use e.g. \"tag36h11\".\n"); exit(-1); } tf->black_border = getopt_get_int(getopt, "border"); apriltag_detector_t *td = apriltag_detector_create(); apriltag_detector_add_family(td, tf); td->quad_decimate = getopt_get_double(getopt, "decimate"); td->quad_sigma = getopt_get_double(getopt, "blur"); td->nthreads = getopt_get_int(getopt, "threads"); td->debug = getopt_get_bool(getopt, "debug"); td->refine_decode = getopt_get_bool(getopt, "refine-decode"); td->refine_pose = getopt_get_bool(getopt, "refine-pose"); int quiet = getopt_get_bool(getopt, "quiet"); int maxiters = getopt_get_int(getopt, "iters"); const int hamm_hist_max = 10; for (int iter = 0; iter < maxiters; iter++) { if (maxiters > 1) printf("iter %d / %d\n", iter + 1, maxiters); for (int input = 0; input < zarray_size(inputs); input++) { int hamm_hist[hamm_hist_max]; memset(hamm_hist, 0, sizeof(hamm_hist)); char *path; zarray_get(inputs, input, &path); if (!quiet) printf("loading %s\n", path); image_u8_t *im = image_u8_create_from_pnm(path); if (im == NULL) { printf("couldn't find %s\n", path); continue; } zarray_t *detections = apriltag_detector_detect(td, im); for (int i = 0; i < zarray_size(detections); i++) { apriltag_detection_t *det; zarray_get(detections, i, &det); if (!quiet) printf("detection %3d: id (%2dx%2d)-%-4d, hamming %d, goodness %8.3f, margin %8.3f\n", i, det->family->d*det->family->d, det->family->h, det->id, det->hamming, det->goodness, det->decision_margin); hamm_hist[det->hamming]++; apriltag_detection_destroy(det); } zarray_destroy(detections); if (!quiet) { timeprofile_display(td->tp); printf("nedges: %d, nsegments: %d, nquads: %d\n", td->nedges, td->nsegments, td->nquads); } if (!quiet) printf("Hamming histogram: "); for (int i = 0; i < hamm_hist_max; i++) printf("%5d", hamm_hist[i]); if (quiet) { printf("%12.3f", timeprofile_total_utime(td->tp) / 1.0E3); } printf("\n"); image_u8_destroy(im); } } // don't deallocate contents of inputs; those are the argv apriltag_detector_destroy(td); tag36h11_destroy(tf); return 0; }
// Pass in a codes describing which resources are no longer in use. Decrement user counts, // and return a list of all resources whos counts have reached zero, which therefore // should be deleted from the display using a OP_DEALLOC_RESOURCES opcode void vx_resc_manager_buffer_resources(vx_resc_manager_t * mgr, const uint8_t * data, int datalen) { if (0) print_manager(mgr); vx_code_input_stream_t * cins = vx_code_input_stream_create(data, datalen); int code = cins->read_uint32(cins); assert(code == OP_BUFFER_RESOURCES); int worldID = cins->read_uint32(cins); char * name = strdup(cins->read_str(cins)); //freed when cur_resources is eventually removed from the buffer map int count = cins->read_uint32(cins); zhash_t * cur_resources = zhash_create(sizeof(uint64_t), sizeof(vx_resc_t*), zhash_uint64_hash, zhash_uint64_equals); vx_resc_t * vr = NULL; for (int i = 0; i < count; i++) { uint64_t id = cins->read_uint64(cins); zhash_put(cur_resources, &id, &vr, NULL, NULL); } assert(cins->pos == cins->len); // we've emptied the stream vx_code_input_stream_destroy(cins); // 1 Update our records zhash_t * worldBuffers = NULL; zhash_get(mgr->allLiveSets, &worldID, &worldBuffers); if (worldBuffers == NULL) { worldBuffers = zhash_create(sizeof(char*), sizeof(zhash_t*), zhash_str_hash, zhash_str_equals); zhash_put(mgr->allLiveSets, &worldID, &worldBuffers, NULL, NULL); } zhash_t * old_resources = NULL; char * old_name = NULL; zhash_put(worldBuffers, &name, &cur_resources, &old_name, &old_resources); free(old_name); // 2 Figure out which resources have become unused: if(old_resources != NULL) { removeAll(old_resources, cur_resources); zarray_t * dealloc = zarray_create(sizeof(uint64_t)); // now 'old_resources' contains only the resources that are no longer referenced // iterate through each one, and see if there is a buffer somewhere that references it zhash_iterator_t prev_itr; zhash_iterator_init(old_resources, &prev_itr); uint64_t id = -1; vx_resc_t * vr = NULL; while(zhash_iterator_next(&prev_itr, &id, &vr)) { // Check all worlds zhash_iterator_t world_itr;// gives us all worlds zhash_iterator_init(mgr->allLiveSets, &world_itr); uint32_t wIDl = -1; zhash_t * buffer_map = NULL; while(zhash_iterator_next(&world_itr, &wIDl, &buffer_map)) { zhash_iterator_t buffer_itr; // gives us all buffers zhash_iterator_init(buffer_map, &buffer_itr); char * bName = NULL; zhash_t * resc_map = NULL; while(zhash_iterator_next(&buffer_itr, &bName, &resc_map)) { if (zhash_contains(resc_map, &id)) { goto continue_outer_loop; } } } // If none of the worlds have this resource, we need to flag removal zarray_add(dealloc, &id); continue_outer_loop: ; } // 3 Issue dealloc commands if (zarray_size(dealloc) > 0) { vx_code_output_stream_t * couts = vx_code_output_stream_create(512); couts->write_uint32(couts, OP_DEALLOC_RESOURCES); couts->write_uint32(couts, zarray_size(dealloc)); for (int i = 0; i < zarray_size(dealloc); i++) { uint64_t id = 0; zarray_get(dealloc, i, &id); couts->write_uint64(couts, id); } mgr->disp->send_codes(mgr->disp, couts->data, couts->pos); vx_code_output_stream_destroy(couts); // Also remove the resources we deallocated from remoteResc for (int i = 0; i < zarray_size(dealloc); i++) { uint64_t id = 0; zarray_get(dealloc, i, &id); assert(zhash_contains(mgr->remoteResc, &id)); zhash_remove(mgr->remoteResc, &id, NULL, NULL); } } zarray_destroy(dealloc); zhash_destroy(old_resources); } if (0) { print_manager(mgr); printf("\n\n"); } }
// returns 1 if no error int getopt_parse (getopt_t *gopt, int argc, char *argv[], int showErrors) { int okay = 1; zarray_t *toks = zarray_create (sizeof(char*)); // take the input stream and chop it up into tokens for (int i = 1; i < argc; i++) { char *arg = strdup (argv[i]); if (arg[0] != '-') { // if this isn't an option, put the whole thing in the args list. zarray_add (toks, &arg); } else { // this is an option. It could be a flag (like -v), or an option // with arguments (--file=foobar.txt). char *eq = strstr (arg, "="); // no equal sign? Push the whole thing. if (eq == NULL) { zarray_add (toks, &arg); } else { // there was an equal sign. Push the part // before and after the equal sign char *val = strdup (&eq[1]); eq[0] = 0; zarray_add (toks, &arg); // if the part after the equal sign is // enclosed by quotation marks, strip them. if (val[0]=='\"') { int last = strlen (val) - 1; if (val[last]=='\"') val[last] = 0; char *valclean = strdup (&val[1]); zarray_add (toks, &valclean); free (val); } else zarray_add (toks, &val); } } } // now loop over the elements and evaluate the arguments unsigned int i = 0; char *tok = NULL; while (i < zarray_size (toks)) { // rather than free statement throughout this while loop if (tok != NULL) free (tok); zarray_get (toks, i, &tok); if (0==strncmp (tok,"--", 2)) { char *optname = &tok[2]; getopt_option_t *goo = NULL; zhash_get (gopt->lopts, &optname, &goo); if (goo == NULL) { okay = 0; if (showErrors) printf ("Unknown option --%s\n", optname); i++; continue; } goo->was_specified = 1; if (goo->type == GOO_BOOL_TYPE) { if ((i+1) < zarray_size (toks)) { char *val = NULL; zarray_get (toks, i+1, &val); if (0==strcmp (val,"true")) { i+=2; getopt_modify_string (&goo->svalue, val); continue; } if (0==strcmp (val,"false")) { i+=2; getopt_modify_string (&goo->svalue, val); continue; } } getopt_modify_string (&goo->svalue, strdup("true")); i++; continue; } if (goo->type == GOO_STRING_TYPE) { // TODO: check whether next argument is an option, denoting missing argument if ((i+1) < zarray_size (toks)) { char *val = NULL; zarray_get (toks, i+1, &val); i+=2; getopt_modify_string (&goo->svalue, val); continue; } okay = 0; if (showErrors) printf ("Option %s requires a string argument.\n",optname); } } if (0==strncmp (tok,"-",1) && strncmp (tok,"--",2)) { int len = strlen (tok); int pos; for (pos = 1; pos < len; pos++) { char sopt[2]; sopt[0] = tok[pos]; sopt[1] = 0; char *sopt_ptr = (char*) &sopt; getopt_option_t *goo = NULL; zhash_get (gopt->sopts, &sopt_ptr, &goo); if (goo==NULL) { // is the argument a numerical literal that happens to be negative? if (pos==1 && isdigit (tok[pos])) { zarray_add (gopt->extraargs, &tok); tok = NULL; break; } else { okay = 0; if (showErrors) printf ("Unknown option -%c\n", tok[pos]); i++; continue; } } goo->was_specified = 1; if (goo->type == GOO_BOOL_TYPE) { getopt_modify_string (&goo->svalue, strdup("true")); continue; } if (goo->type == GOO_STRING_TYPE) { if ((i+1) < zarray_size (toks)) { char *val = NULL; zarray_get (toks, i+1, &val); // TODO: allow negative numerical values for short-name options ? if (val[0]=='-') { okay = 0; if (showErrors) printf ("Ran out of arguments for option block %s\n", tok); } i++; getopt_modify_string (&goo->svalue, val); continue; } okay = 0; if (showErrors) printf ("Option -%c requires a string argument.\n", tok[pos]); } } i++; continue; } // it's not an option-- it's an argument. zarray_add (gopt->extraargs, &tok); tok = NULL; i++; } if (tok != NULL) free (tok); zarray_destroy (toks); return okay; }
// returns the 35 points associated to the test chart in [x1,y1,x2,y2] // format if there are more than 35 points will return NULL matd_t* build_homography(image_u32_t* im, vx_buffer_t* buf, metrics_t met) { frame_t frame = {{0,0}, {im->width-1, im->height-1}, {0,0}, {1,1}}; int good_size = 0; zarray_t* blobs = zarray_create(sizeof(node_t)); hsv_find_balls_blob_detector(im, frame, met, blobs, buf); // remove unqualified blobs if(met.qualify) { for(int i = 0; i < zarray_size(blobs); i++) { node_t n; zarray_get(blobs, i, &n); if(!blob_qualifies(im, &n, met, buf)) zarray_remove_index(blobs, i, 0); } } if(zarray_size(blobs) == NUM_TARGETS ||zarray_size(blobs) == NUM_CHART_BLOBS) good_size = 1; zarray_sort(blobs, compare); int pix_array[zarray_size(blobs)*2]; // iterate through int idx = 0; double size = 2.0; for(int i = 0; i < zarray_size(blobs); i++) { node_t n; zarray_get(blobs, i, &n); loc_t center = { .x = n.ave_loc.x/n.num_children, .y = n.ave_loc.y/n.num_children}; loc_t parent = { .x = n.id % im->stride, .y = n.id / im->stride}; if(buf != NULL) { add_circle_to_buffer(buf, size, center, vx_maroon); // add_circle_to_buffer(buf, size, parent, vx_olive); // add_sides_to_buffer(im, buf, 1.0, &n, vx_orange, met); loc_t* lp = fit_lines(im, &n, buf, met, NULL); if(lp != NULL) { // printf("(%d, %d) (%d, %d) (%d, %d) (%d, %d) \n", // lp[0].x, lp[0].y, lp[1].x, lp[1].y, lp[2].x, lp[2].y, lp[3].x, lp[3].y); loc_t intersect = get_line_intersection(lp[0], lp[1], lp[2], lp[3]); if(in_range(im, intersect.x, intersect.y)) { loc_t ext_lines[2]; extend_lines_to_edge_of_image(im, intersect, center, ext_lines); add_line_to_buffer(im, buf, 2.0, ext_lines[0], ext_lines[1], vx_blue); } for(int i = 0; i < 4; i++) { pix_array[i*2] = lp[i].x; pix_array[i*2+1] = lp[i].y; add_circle_to_buffer(buf, 3.0, lp[i], vx_orange); } } free(n.sides); // loc_t corners[4] = {{n.box.right, n.box.top}, // {n.box.right, n.box.bottom}, // {n.box.left, n.box.bottom}, // {n.box.left, n.box.top}}; // print extremes of box // if(1) { // add_circle_to_buffer(buf, size, corners[0], vx_green); // add_circle_to_buffer(buf, size, corners[1], vx_yellow); // add_circle_to_buffer(buf, size, corners[2], vx_red); // add_circle_to_buffer(buf, size, corners[3], vx_blue); // for(int j = 0; j < 4; j++) { // // add_circle_to_buffer(buf, size, corners[j], vx_maroon); // } // } } } matd_t* H; H = dist_homography(pix_array, NUM_TARGETS); // if(0) {//zarray_size(blobs) == NUM_CHART_BLOBS){ // H = dist_homography(pix_array, NUM_CHART_BLOBS); // } // else if(zarray_size(blobs) == NUM_TARGETS){ // H = dist_homography(pix_array, NUM_TARGETS); // if(met.add_lines) connect_lines(blobs, buf); // } // else { // if(met.dothis) // printf("num figures: %d\n", zarray_size(blobs)); // return(NULL); // } // make projected points // project_measurements_through_homography(H, buf, blobs, zarray_size(blobs)); zarray_destroy(blobs); return(H); } /* { R00, R01, R02, TX, R10, R11, R12, TY, R20, R21, R22, TZ, 0, 0, 0, 1 }); */ double get_rotation(const char* axis, matd_t* H) { double cosine, sine, theta; if(strncmp(axis,"x", 1)) { cosine = MATD_EL(H, 1, 1); sine = MATD_EL(H, 2, 1); } else if(strncmp(axis,"y", 1)) { cosine = MATD_EL(H, 0, 0); sine = MATD_EL(H, 0, 2); } else if(strncmp(axis,"z", 1)) { cosine = MATD_EL(H, 0, 0); sine = MATD_EL(H, 1, 0); } else assert(0); theta = atan2(sine, cosine); return(theta); } // if buf is NULL, will not fill with points of the homography void take_measurements(image_u32_t* im, vx_buffer_t* buf, metrics_t met) { // form homography matd_t* H = build_homography(im, buf, met); if(H == NULL) return; // get model view from homography matd_t* Model = homography_to_pose(H, 654, 655, 334, 224); // printf("\n"); // matd_print(H, matrix_format); // printf("\n\n"); // printf("model:\n"); // matd_print(Model, "%15f"); // printf("\n\n"); // matd_print(matd_op("M^-1",Model), matrix_format); // printf("\n"); // extrapolate metrics from model view double TX = MATD_EL(Model, 0, 3); double TY = MATD_EL(Model, 1, 3); double TZ = MATD_EL(Model, 2, 3); // double rot_x = get_rotation("x", H); // double rot_y = get_rotation("y", H); // double rot_z = get_rotation("z", H); double cosine = MATD_EL(Model, 0, 0); double rot_z = acos(cosine) * 180/1.5 - 180; cosine = MATD_EL(Model, 2, 2); double rot_x = asin(cosine) * 90/1.3 + 90; cosine = MATD_EL(Model, 1, 1); double rot_y = asin(cosine); char str[200]; sprintf(str, "<<#00ffff,serif-30>> DIST:%lf Offset:(%lf, %lf)\n rot: (%lf, %lf, %lf)\n", TZ, TX, TY, rot_x, rot_y, rot_z); vx_object_t *text = vxo_text_create(VXO_TEXT_ANCHOR_BOTTOM_LEFT, str); vx_buffer_add_back(buf, vxo_pix_coords(VX_ORIGIN_BOTTOM_LEFT, text)); // printf("dist: %lf cos:%lf angle: %lf\n", TZ, cosine, theta); }