RenderableComponent::RenderableComponent(IEntity& owner, std::string texture) : BaseComponent(owner), mOwnsTexture(1), mTextureType(0) { mName = e::kComponentRenderable; D_PRINT("Generating texture..."); mTexture = new Texture(texture); D_PRINT("Done."); mVBO = mOwner.GetOwner()->GetRenderer()->GenerateDefaultVBO(); mIBOSize = 6; mCurrentEvent.SetName(e::kEventRenderCommand); mCurrentTMat = glm::mat3(1.f); mCurrentEvent.SetTarget(e::kEventTargetRender); mIsActive = false; mRenderState.mask = 0; mRenderState.hud = 1; frame_t frame ; frame.coords.maxS = 1.f; frame.coords.maxT = 1.f; frame.coords.minS = 0.f; frame.coords.minT = 0.f; mCurrentTMat = glm::mat3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, frame.coords.minS, frame.coords.minT, 1.0) * glm::mat3(frame.coords.maxS - frame.coords.minS, 0.0, 0.0, 0.0, frame.coords.maxT - frame.coords.minT, 0.0, 0.0, 0.0, 1.0); }
inline void mvdev_ud_zcopy_finish(MPIR_SHANDLE * s, int ah_index) { mvdev_connection_t *c = (mvdev_connection_t *) (s->connection); mv_sbuf *v = get_sbuf_ud(c, sizeof(mvdev_packet_ud_zcopy_finish)); mvdev_packet_ud_zcopy_finish *packet = (mvdev_packet_ud_zcopy_finish *) v->header_ptr; D_PRINT("sending a zcopy finish message for %p\n", s); /* fill in the packet */ PACKET_SET_HEADER(packet, c, MVDEV_PACKET_UD_ZCOPY_FINISH); packet->rreq = REQ_TO_ID(s->receive_id); /* mark MPI send complete when we get an ACK for this message */ v->shandle = s; /* needs to be on the same outgoing qp as the data */ mvdev_post_channel_send_qp_lid(c, v, sizeof(mvdev_packet_ud_zcopy_finish), &(mvdev.rndv_qp[s->hca_index]), ah_index); /* in case the finish message gets dropped we need to send again -- * even if we haven't pulled it from the CQ */ v->retry_always = 1; D_PRINT("finished sending finish message\n"); }
static int ik_btr_close_destroy(bool destroy) { int rc; if (daos_handle_is_inval(ik_toh)) { D_ERROR("Invalid tree open handle\n"); return -1; } if (destroy) { D_PRINT("Destroy btree\n"); rc = dbtree_destroy(ik_toh); } else { D_PRINT("Close btree\n"); rc = dbtree_close(ik_toh); } ik_toh = DAOS_HDL_INVAL; if (rc != 0) { D_ERROR("Tree %s failed: %d\n", destroy ? "destroy" : "close", rc); return -1; } return rc; }
/* Since we don't know the final size of the packet until it is * ready to be sent we need to have additional processing * at dequeue time */ void prepare_coalesced_pkt(viadev_connection_t * c, vbuf *v) { #ifdef ADAPTIVE_RDMA_FAST_PATH if(BUSY_FLAG == v->padding) { int len = v->len; VBUF_FLAG_TYPE flag; v->desc.u.sr.wr_id = (aint_t) v; v->desc.sg_entry.length = v->len + VBUF_FAST_RDMA_EXTRA_BYTES; /* update the flags */ if ((int) *(VBUF_FLAG_TYPE *) (v->buffer + len) == len) { flag = (VBUF_FLAG_TYPE) (len + FAST_RDMA_ALT_TAG); } else { flag = (VBUF_FLAG_TYPE) len; } /* head */ *(v->head_flag) = flag; /* tail */ *(VBUF_FLAG_TYPE *) (v->buffer + len) = flag; D_PRINT("Sending coalesced over rfp\n"); } else #endif { D_PRINT("Sending coalesced over send/recv\n"); vbuf_init_send(v, v->len); v->desc.sg_entry.length = v->len; } #ifndef _IBM_EHCA_ if(v->desc.sg_entry.length < c->max_inline) { v->desc.u.sr.send_flags = IBV_SEND_SIGNALED | IBV_SEND_INLINE; } else #endif { v->desc.u.sr.send_flags = IBV_SEND_SIGNALED; } #ifdef MEMORY_RELIABLE { viadev_packet_header *p = (viadev_packet_header *) v->buffer; p->crc32 = update_crc(1, (void *) ((char *)VBUF_BUFFER_START(v) + sizeof(viadev_packet_header)), v->desc.sg_entry.length - sizeof(viadev_packet_header)); p->dma_len = v->desc.sg_entry.length - sizeof(viadev_packet_header); } #endif D_PRINT("coalesce send len: %d\n", v->desc.sg_entry.length); /* should be all set to be sent */ }
void mvdev_ext_backlogq_send(mv_qp * qp) { mv_sdescriptor *d; struct ibv_send_wr *sr; struct ibv_send_wr *bad_wr; int i; while (qp->send_credits_remaining > 0 && qp->ext_backlogq_head) { d = qp->ext_backlogq_head; /* find how many desc are chained */ i = 1; sr = &(d->sr); while(sr->next) { sr = sr->next; i++; } assert(i == 1); if(qp->send_credits_remaining >= i) { qp->ext_backlogq_head = d->next_extsendq; if (d == qp->ext_backlogq_tail) { qp->ext_backlogq_tail = NULL; } d->next_extsendq = NULL; mvdev.connections[((mv_sbuf *)d->parent)->rank].queued--; /* reset the credit counter now -- so we don't lose credits in * the backlogq */ if(MVDEV_RPUT_FLAG == ((mv_sbuf *)d->parent)->flag) { D_PRINT("unqueing RPUT\n"); } else { PACKET_SET_CREDITS(((mv_sbuf *)d->parent), (&(mvdev.connections[((mv_sbuf *) d->parent)->rank]))); } D_PRINT("at %d, dropping to %d, queued: %d\n", qp->send_credits_remaining, qp->send_credits_remaining - i, mvdev.connections[((mv_sbuf *)d->parent)->rank].queued); qp->send_credits_remaining -= i; if((qp->send_wqes_avail - i) < 0 || (NULL != qp->ext_sendq_head)) { mvdev_ext_sendq_queue(qp, d); } else { if(ibv_post_send(qp->qp, &(d->sr), &bad_wr)) { error_abort_all(IBV_RETURN_ERR,"Error posting to RC QP (%d)\n", qp->send_wqes_avail); } qp->send_wqes_avail -= i; } } else { break; } } }
void dump_vbuf(char *msg, vbuf * v) { int i, len; viadev_packet_header *header = NULL; header = (viadev_packet_header *) VBUF_BUFFER_START(v); D_PRINT("%s: dump of vbuf %p, seq#=%d, type = %d\n", msg, v, header->id, header->type); len = 100; for (i = 0; i < len; i++) { if (0 == i % 16) D_PRINT("\n "); D_PRINT("%2x ", (unsigned int) v->buffer[i]); } D_PRINT("\n"); D_PRINT(" END OF VBUF DUMP\n"); }
void vbuf_init_rput(vbuf * v, void *local_address, uint32_t lkey, void *remote_address, uint32_t rkey, int len) { v->desc.u.sr.next = NULL; v->desc.u.sr.send_flags = IBV_SEND_SIGNALED; v->desc.u.sr.opcode = IBV_WR_RDMA_WRITE; v->desc.u.sr.wr_id = (aint_t) v; v->desc.u.sr.num_sge = 1; v->desc.u.sr.sg_list = &(v->desc.sg_entry); v->desc.sg_entry.length = len; v->desc.sg_entry.lkey = lkey; v->desc.sg_entry.addr = (uintptr_t) local_address; v->desc.u.sr.wr.rdma.remote_addr = (uintptr_t) remote_address; v->desc.u.sr.wr.rdma.rkey = rkey; #ifdef ADAPTIVE_RDMA_FAST_PATH v->padding = RPUT_VBUF_FLAG; #endif D_PRINT("RDMA write\n"); }
void release_vbuf(vbuf * v) { lock_vbuf(); /* note this correctly handles appending to empty free list */ D_PRINT("release_vbuf: releasing %p previous head = %p", v, free_vbuf_head); assert(v != free_vbuf_head); v->desc.next = free_vbuf_head; #ifdef ADAPTIVE_RDMA_FAST_PATH if ((v->padding != NORMAL_VBUF_FLAG) && (v->padding != RPUT_VBUF_FLAG) && (v->padding != RGET_VBUF_FLAG)) { error_abort_all(GEN_EXIT_ERR, "vbuf %p not correct!!! %d %d %d %d\n", v, v->padding, NORMAL_VBUF_FLAG, RPUT_VBUF_FLAG, RGET_VBUF_FLAG); } #endif free_vbuf_head = v; num_free_vbuf++; num_vbuf_free++; unlock_vbuf(); }
/** * Get the appropriate number of main XS based on the number of cores and * passed in preferred number of threads. */ static int dss_tgt_nr_get(int ncores, int nr) { int nr_default; D_ASSERT(ncores >= 1); /* Each system XS uses one core, and each main XS with * dss_tgt_offload_xs_nr offload XS. Calculate the nr_default * as the number of main XS based on number of cores. */ nr_default = (ncores - dss_sys_xs_nr) / DSS_XS_NR_PER_TGT; if (nr_default == 0) nr_default = 1; /* If user requires less target threads then set it as dss_tgt_nr, * if user requires more then uses the number calculated above * as creating more threads than #cores may hurt performance. */ if (nr >= 1 && nr < nr_default) nr_default = nr; if (nr_default != nr) D_PRINT("%d target XS(xstream) requested (#cores %d); " "use (%d) target XS\n", nr, ncores, nr_default); return nr_default; }
void ds_mgmt_hdlr_svc_rip(crt_rpc_t *rpc) { struct mgmt_svc_rip_in *murderer; int sig; bool force; d_rank_t rank = -1; murderer = crt_req_get(rpc); if (murderer == NULL) return; force = (murderer->rip_flags != 0); /* * the yield below is to workaround an ofi err msg at client-side - * fi_cq_readerr got err: 5(Input/output error) .. */ int i; for (i = 0; i < 200; i++) { ABT_thread_yield(); usleep(10); } /** ... adieu */ if (force) sig = SIGKILL; else sig = SIGTERM; crt_group_rank(NULL, &rank); D_PRINT("Service rank %d is being killed by signal %d... farewell\n", rank, sig); kill(getpid(), sig); }
int mvdev_post_srq_buffers(mv_rpool *rp, mv_srq * srq, int num_bufs) { int i = 0, total = 1; mv_rbuf *v, *first_v, *last_v; struct ibv_recv_wr *bad_wr; first_v = last_v = get_mv_rbuf(srq->buffer_size); prepare_rc_recv(first_v, rp); for(i = 1; i < num_bufs; i++) { ++total; v = get_mv_rbuf(srq->buffer_size); prepare_rc_recv(v, rp); last_v->desc.rr.next = &v->desc.rr; last_v = v; } if(MVDEV_UNLIKELY(ibv_post_srq_recv(srq->srq, &first_v->desc.rr, &bad_wr))) { fprintf(stderr, "Cannot post to SRQ!\n"); return 0; /* we should know if this happens */ } D_PRINT("Posted %d recvs to SRQ\n", i); return total; }
int mvdev_post_rq_buffers(mv_rpool * rp, mv_qp * qp, int num_bufs) { int i = 0, total = 1; mv_rbuf *v, *first_v, *last_v; struct ibv_recv_wr *bad_wr; D_PRINT("Attempting to post %d recv bufs\n", num_bufs); first_v = last_v = get_mv_rbuf(rp->buffer_size); switch(qp->type) { case MVDEV_CH_RC_RQ: prepare_rc_recv(first_v, rp); break; case MVDEV_CH_UD_RQ: prepare_ud_recv(first_v, rp); break; default: error_abort_all(IBV_RETURN_ERR,"Invalid QP Type: %d\n", qp->type); } for(i = 1; i < num_bufs; i++) { ++total; v = get_mv_rbuf(rp->buffer_size); switch(qp->type) { case MVDEV_CH_RC_RQ: prepare_rc_recv(v, rp); break; case MVDEV_CH_UD_RQ: prepare_ud_recv(v, rp); break; } last_v->desc.rr.next = &v->desc.rr; last_v = v; } if(ibv_post_recv(qp->qp, &first_v->desc.rr, &bad_wr)) { return 0; } D_PRINT("Posted %d recvs\n", i); return total; }
void mvdev_incoming_ud_zcopy_ack(mv_rbuf * v, mvdev_connection_t * c, mvdev_packet_ud_zcopy_ack * h) { MPIR_SHANDLE *shandle = (MPIR_SHANDLE *) ID_TO_REQ(h->sreq); D_PRINT("got zcopy ack\n"); SEND_COMPLETE(shandle); }
void copy( boost::gil::image_view<TLoc> from_view, TToView to_view) { D_PRINT("*******************************"); //copyAsync(from_view, to_view); //CUGIP_CHECK_RESULT(cudaThreadSynchronize()); }
/** * The parsing of arguments is done in 2 phases, the 1st phase is * arguments to daosctl itself which are handled here. The second * part is the options passed to each command which are handled by * the logic that processes each command. */ int handle_information_options(const char ***argv, int *argc) { if (*argc > 1) { const char *cmd = (*argv)[1]; if (cmd[0] != '-') return 0; if (!strcmp(cmd, "-h") || !strcmp(cmd, "--help")) { print_help(); exit(0); } if (!strcmp(cmd, "-V") || !strcmp(cmd, "--version")) { printf("\n%s\n", program_version); exit(0); } if (!strcmp(cmd, "-V") || !strcmp(cmd, "--usage")) { printf("\n%s\n", program_version); exit(0); } if (!strcmp(cmd, "--list-cmds")) { int i; printf("daosctl available commands:\n\n"); for (i = 0; i < command_count; i++) { struct cmd_struct *p = commands+i; printf("\t%s\n", p->cmd); } exit(0); } else { D_PRINT("Unknown option: %s\n", cmd); D_PRINT("\n Usage: %s\n", daosctl_usage_string); exit(129); } } else { D_PRINT("No options or commands.\n"); D_PRINT("\n Usage: %s\n", daosctl_usage_string); exit(129); } }
void mvdev_recv_ud_zcopy(MPIR_RHANDLE * rhandle) { mv_qp_pool_entry *rqp; D_PRINT("got zcopy start -- len: %d\n", rhandle->len); /* only way buffer is NULL is if length is zero */ if (NULL == rhandle->buf) { rhandle->buf = &nullrbuffer; } /* we need to make sure we have a QP available -- * otherwise we don't want to take this path */ if(NULL == mvdev.rndv_pool_qps_free_head) { D_PRINT("No QPs available -- using R3\n"); rhandle->protocol = MVDEV_PROTOCOL_R3; mvdev_recv_r3(rhandle); return; } /* try to register the buffer directly */ rhandle->dreg_entry = dreg_register(rhandle->buf, rhandle->len, DREG_ACL_WRITE); if (NULL == rhandle->dreg_entry) { /* failed to register memory, revert to R3 */ D_PRINT("Cannot register mem -- using R3\n"); rhandle->protocol = MVDEV_PROTOCOL_R3; mvdev_recv_r3(rhandle); return; } GET_RNDV_QP(rqp); rhandle->qp_entry = rqp; MV_ASSERT(rhandle->qp_entry != NULL); D_PRINT("before posting recv\n"); mvdev_post_zcopy_recv(rhandle); D_PRINT("Finished posting buffers\n"); MV_Rndv_Send_Reply(rhandle); }
struct ibv_qp * MV_Create_RC_QP(mv_qp_setup_information *si) { struct ibv_qp * qp = NULL; /* create */ { struct ibv_qp_init_attr attr; memset(&attr, 0, sizeof(struct ibv_qp_init_attr)); attr.srq = si->srq; D_PRINT("SRQ at create qp: %p\n", attr.srq); attr.send_cq = si->send_cq; attr.recv_cq = si->recv_cq; attr.cap.max_send_wr = si->cap.max_send_wr; attr.cap.max_recv_wr = si->cap.max_recv_wr; attr.cap.max_send_sge = si->cap.max_send_sge; attr.cap.max_recv_sge = si->cap.max_recv_sge; attr.cap.max_inline_data = si->cap.max_inline_data; attr.qp_type = IBV_QPT_RC; qp = ibv_create_qp(si->pd, &attr); if (!qp) { error_abort_all(IBV_RETURN_ERR, "Couldn't create RC QP"); return NULL; } } /* init */ { struct ibv_qp_attr attr; memset(&attr, 0, sizeof(struct ibv_qp_attr)); attr.qp_state = IBV_QPS_INIT; attr.pkey_index = mvparams.pkey_ix; attr.port_num = mvparams.default_port; attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; attr.pkey_index = 0; if(ibv_modify_qp(qp, &attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS)) { error_abort_all(IBV_RETURN_ERR, "Failed to modify RC QP to INIT"); return NULL; } } mvdev.rc_connections++; return qp; }
vbuf *get_vbuf(void) { vbuf *v; lock_vbuf(); /* * It will often be possible for higher layers to recover * when no vbuf is available, but waiting for more descriptors * to complete. For now, just abort. */ if (NULL == free_vbuf_head) { D_PRINT("Allocating new vbuf region\n"); allocate_vbuf_region(viadev_vbuf_secondary_pool_size); if (NULL == free_vbuf_head) { error_abort_all(GEN_EXIT_ERR, "No free vbufs. Pool size %d", vbuf_n_allocated); } } v = free_vbuf_head; num_free_vbuf--; num_vbuf_get++; /* this correctly handles removing from single entry free list */ free_vbuf_head = free_vbuf_head->desc.next; #ifdef ADAPTIVE_RDMA_FAST_PATH /* need to change this to RPUT_VBUF_FLAG or RGET_VBUF_FLAG later * if we are doing rput */ v->padding = NORMAL_VBUF_FLAG; #endif /* this is probably not the right place to initialize shandle to NULL. * Do it here for now because it will make sure it is always initialized. * Otherwise we would need to very carefully add the initialization in * a dozen other places, and probably miss one. */ v->shandle = NULL; v->ref_count = 0; v->len = 0; if(viadev_use_nfr) { v->ib_completed = 0; v->sw_completed = 0; v->prev = NULL; v->next = NULL; } v->grank = -1; /* Make sure it is not inadvertantly used anywhere */ unlock_vbuf(); return (v); }
void mvdev_windowq_queue(mvdev_connection_t * c, mv_sbuf * v, int total_len) { D_PRINT("Window q\n"); v->seqnum = total_len; v->extwin_ptr.next = NULL; if (c->ext_window_head == NULL) { c->ext_window_head = v; } else { c->ext_window_tail->extwin_ptr.next = v; } c->ext_window_tail = v; }
void mvdev_ext_sendq_queue(mv_qp *qp, mv_sdescriptor * d) { D_PRINT("ext_sendq add"); ++(qp->ext_sendq_size); d->next_extsendq = NULL; if(qp->ext_sendq_head == NULL) { qp->ext_sendq_head = d; } else { qp->ext_sendq_tail->next_extsendq = d; } qp->ext_sendq_tail = d; }
void AGLViewer::initializeGL() { soglu::initOpenGL(); soglu::initializeCg(); glClearColor( mViewerState->backgroundColor.redF(), mViewerState->backgroundColor.greenF(), mViewerState->backgroundColor.blueF(), mViewerState->backgroundColor.alphaF() ); mFrameBufferObject.Initialize( width(), height() ); mPickManager.initialize( 150 );D_PRINT("REMOVE THIS" ); initializeRenderingEnvironment(); }
void getHeadMeasurementData( const PointSet &aPoints, HeadMeasurementData &aHeadMeasurementData ) { ASSERT( aPoints.size() >= 3 ); Vector3f center; Eigen::Matrix3f covarianceMatrix; computeCovarianceMatrixFromPointSet( aPoints, center, covarianceMatrix ); typedef Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> Solver; Solver eigensolver(covarianceMatrix); Solver::RealVectorType eigenVals = eigensolver.eigenvalues(); Eigen::Matrix3f eigenVectors = eigensolver.eigenvectors(); D_PRINT( "Eigen values :\n" << eigenVals ); D_PRINT( "Eigen vectors :\n" << eigenVectors ); Vector3f v1( eigenVectors(0,2), eigenVectors(1,2), eigenVectors(2,2) ); Vector3f v2( eigenVectors(0,1), eigenVectors(1,1), eigenVectors(2,1) ); /* Vector3f v1 = mHumeralHeadPoints[0] - center; Vector3f v2 = mHumeralHeadPoints[1] - center; VectorNormalization( v1 ); VectorNormalization( v2 );*/ Vector3f normal = VectorProduct( v1, v2 ); VectorNormalization( normal ); /*v2 = VectorProduct( v1, normal ); VectorNormalization( v2 );*/ aHeadMeasurementData.point = center; aHeadMeasurementData.normal = normal; aHeadMeasurementData.vDirection = v1; aHeadMeasurementData.wDirection = v2; aHeadMeasurementData.available = true; }
static int ik_btr_query(void) { struct btr_attr attr; struct btr_stat stat; int rc; rc = dbtree_query(ik_toh, &attr, &stat); if (rc != 0) { D_ERROR("Failed to query btree: %d\n", rc); return -1; } D_PRINT("tree [order=%d, depth=%d]\n", attr.ba_order, attr.ba_depth); D_PRINT("node [total="DF_U64"]\n" "record [total="DF_U64"]\n" "key [total="DF_U64", max="DF_U64"]\n" "val [total="DF_U64", max="DF_U64"]\n", stat.bs_node_nr, stat.bs_rec_nr, stat.bs_key_sum, stat.bs_key_max, stat.bs_val_sum, stat.bs_val_max); return 0; }
void mvdev_ext_backlogq_queue(mv_qp *qp, mv_sdescriptor * d) { D_PRINT("backlogq add"); d->next_extsendq = NULL; if(qp->ext_backlogq_head == NULL) { qp->ext_backlogq_head = d; } else { qp->ext_backlogq_tail->next_extsendq = d; } qp->ext_backlogq_tail = d; /* TODO: if this has credit... send ack mvdev_explicit_ack(((mv_sbuf *)d->parent)->rank); */ mvdev.connections[((mv_sbuf *)d->parent)->rank].queued++; D_PRINT("now %d queued\n", mvdev.connections[((mv_sbuf *)d->parent)->rank].queued); if(mvdev.connections[((mv_sbuf *)d->parent)->rank].queued > 25) { mvdev.connections[((mv_sbuf *)d->parent)->rank].msg_info.control_ignore++; mvdev_explicit_ack(((mv_sbuf *)d->parent)->rank); } }
void MV_Send_RCFP_Normal( mvdev_connection_t * c, mv_sbuf * v, int total_len, mv_qp *send_qp) { MVBUF_HEAD_FLAG_TYPE * rcfp_header = (MVBUF_HEAD_FLAG_TYPE *) v->base_ptr; MVBUF_TAIL_FLAG_TYPE * rcfp_tail = (MVBUF_TAIL_FLAG_TYPE *) (v->base_ptr + total_len + sizeof(MVBUF_HEAD_FLAG_TYPE)); uint64_t thead, tlen, tseqnum, tail_val; mv_qp *qp = send_qp; total_len += MV_RCFP_OVERHEAD; D_PRINT("Sending over RCFP seqnum: %d\n", c->seqnum_next_tosend); v->seqnum = v->seqnum_last = c->seqnum_next_tosend; INCREMENT_SEQ_NUM(&(c->seqnum_next_tosend)); /* don't try to pack everything in yet */ tlen = total_len; tseqnum = v->seqnum; READ_RCFP_TAIL_VAL(*rcfp_tail, (&tail_val)); thead = (tail_val == MVBUF_FLAG_VAL) ? MVBUF_FLAG_VAL_ALT : MVBUF_FLAG_VAL; *rcfp_header = CREATE_RCFP_HEADER(thead, tlen, tseqnum); *rcfp_tail = CREATE_RCFP_FOOTER(thead, (uint64_t)63, (uint64_t)0); v->segments = 1; v->left_to_send = 1; mvdev.connection_ack_required[c->global_rank] = 0; prepare_rcfp_descriptor_headers(&(v->desc), total_len, qp); SEND_RC_SR(qp, v); v->rel_type = MV_RELIABLE_LOCK_SBUF; v->total_len = total_len; mvdev_track_send(c, v); RESET_CREDITS(v, c); #ifdef MV_PROFILE { mvdev_packet_header * p = (mvdev_packet_header *) v->header_ptr; COLLECT_RCFP_INTERNAL_MSG_INFO(total_len, p->type, c); } #endif }
void OrganSegmentationController::toggleRegionMarking( bool aToggle ) { if( aToggle ) { ASSERT(mModule.getGraphCutSegmentationWrapper().mWatersheds); setController( boost::make_shared<RegionMarkingMouseController>( mModule.getGraphCutSegmentationWrapper().mWatersheds, mIDMappingBuffer, mModule.getGraphCutSegmentationWrapper().mForegroundMarkers, boost::bind( &OrganSegmentationModule::update, mModule ) ) ); D_PRINT( "Switched to region marking controller" ); } else { resetController();//mMaskDrawingController.reset(); } }
void DatasetManager::registerImage( DatasetID aDatasetId, boost::filesystem::path aPath, M4D::Imaging::AImage::Ptr aImage, bool aUseAsCurrent ) { ASSERT( aDatasetId != 0 ); boost::recursive_mutex::scoped_lock lock( mDatasetInfoAccessLock ); ImageRecord *rec = new ImageRecord(); ADatasetRecord::Ptr p = ADatasetRecord::Ptr( rec ); rec->id = aDatasetId; rec->filePath = aPath; rec->image = aImage; mDatasetInfos[ aDatasetId ] = p; if ( aUseAsCurrent ) { setCurrentDatasetInfo( aDatasetId ); } D_PRINT( "Dataset info added, id = " << aDatasetId << ", dataset count = " << mDatasetInfos.size() ); }
/** * Uses the function table to call the right code for the detected * command. */ static int process_cmd(int argc, const char **argv) { int rc = EINVAL; int i; for (i = 0; i < command_count; i++) { if (!strcmp(commands[i].cmd, argv[1])) { rc = commands[i].fn(argc, argv, NULL); break; } } if (rc == EINVAL) { D_PRINT("Unknown command or missing argument: %s\n\n", argv[1]); print_help(); } return rc; }
void deallocate_vbufs() { vbuf_region *r = vbuf_region_head; lock_vbuf(); while (r) { if (r->mem_handle != NULL) { if(ibv_dereg_mr(r->mem_handle)) { error_abort_all(GEN_ASSERT_ERR, "could not deregister MR"); } /* free vbufs add it later */ } D_PRINT("deregister vbufs\n"); r = r->next; } unlock_vbuf(); }
int main(int argc, const char **argv) { /* doesn't return if there were informational options */ handle_information_options(&argv, &argc); /* setup daos if that isn't possible no point continuing */ int test_rc = setup(argc, (char **)argv); if (test_rc) { D_PRINT("Couldn't initialize DAOS.\n"); return 1; } /* there is a real command to process */ test_rc = process_cmd(argc, argv); /* shutdown daos */ done(); return test_rc; }