/* The computational routine */ void knnfield(const mwSize *dimsA, const mwSize *dimsB, nnf_data_t* videoA, nnf_data_t* videoB, NNFieldParams params, int32_t *outMatrix, nnf_data_t * distMatrix) { Video<nnf_data_t> A; int dA[4]; for (int i = 0; i < 4; ++i) { dA[i] = dimsA[i]; } A.initFromMxArray(4, dA, videoA); Video<nnf_data_t> B; int dB[4]; for (int i = 0; i < 4; ++i) { dB[i] = dimsB[i]; } B.initFromMxArray(4, dB, videoB); NNField field(&A,&B, params); NNFieldOutput output = field.compute(); Video<int32_t> &nnf = output.nnf; Video<nnf_data_t> &cost = output.error; const int32_t* pData = nnf.dataReader(); const nnf_data_t* pCost = cost.dataReader(); int channel_stride = nnf.voxelCount(); int in_nn_stride = 3*channel_stride; int out_nn_stride = 3*channel_stride; for (unsigned int idx = 0; idx < dimsA[0]*dimsA[1]*dimsA[2]; ++idx) // each voxel { for(int k = 0; k < params.knn; ++ k) { // each NN for (unsigned int c = 0; c < 3; ++c) { // each warp channel outMatrix[idx + c*channel_stride + k*out_nn_stride] = pData[idx + c*channel_stride + k*in_nn_stride]; } assert(outMatrix[idx + 0*channel_stride + k*out_nn_stride] < B.getWidth()-params.patch_size_space+1); assert(outMatrix[idx + 1*channel_stride + k*out_nn_stride] < B.getHeight()-params.patch_size_space+1); assert(outMatrix[idx + 2*channel_stride + k*out_nn_stride] < B.frameCount()-params.patch_size_time+1); } } if(distMatrix != nullptr) { for (unsigned int idx = 0; idx < dimsA[0]*dimsA[1]*dimsA[2]; ++idx) // each voxel { for(int k = 0; k < params.knn; ++ k) { // each NN distMatrix[idx + k*channel_stride] = pCost[idx + k*channel_stride]; } } } }
void mexFunction( int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[] ) { char* filename; int first_frame, last_frame; bool color; parse_arguments(nlhs, prhs, nrhs, &filename, &first_frame, &last_frame, &color); Video* video = NULL; if (first_frame < 0) { std::cout << "reading all video!" << std::endl; video = new Video(filename); first_frame = 1; last_frame = video->frames;//frames counted by grabbing all frames in video } else { std::cout << "reading frames: " << first_frame << "-" << last_frame << std::endl; video = new Video(filename, last_frame - first_frame); } int n_of_dims, channels; mwSize* dims; if (!color) { n_of_dims = 3; channels = 1; dims = new mwSize[3]; dims[1] = video->getWidth(); dims[0] = video->getHeight(); dims[2] = last_frame - first_frame + 1 ; } else { n_of_dims = 4; channels = 3; dims = new mwSize[4]; dims[1] = video->getWidth(); dims[0] = video->getHeight(); dims[3] = last_frame - first_frame + 1 ; dims[2] = 3; } IplImage* img = 0; //create 3 dimensional storage and assign it to output arguments plhs[0] = mxCreateNumericArray(n_of_dims, dims , mxUINT8_CLASS, mxREAL); //get pointer to data (cast necessary since data is not double, mxGetData returns a void*) unsigned char* video_ptr = (unsigned char*)mxGetData(plhs[0]); //frame counter int f = 0; //we skip frames until f == first_frame (we want to grab from here) //and we stop grabbing after f == last_frame while ((img = video->getNextFrame()) && f < last_frame) { ++f; //check if we are in the correct range (following annotation) if (f >= first_frame) { // && f <= last_frame ... no need to check this after last_frame we grab the last one video->convertFrame(video_ptr, img, color); //move pointer ahead one frame video_ptr += video->getHeight() * video->getWidth() * channels; } } mxFree(filename); return; }