/* ******** nrrdCrop() ** ** select some sub-volume inside a given nrrd, producing an output ** nrrd with the same dimensions, but with equal or smaller sizes ** along each axis. */ int nrrdCrop(Nrrd *nout, const Nrrd *nin, size_t *min, size_t *max) { char me[]="nrrdCrop", func[] = "crop", err[BIFF_STRLEN], buff1[NRRD_DIM_MAX*30], buff2[AIR_STRLEN_SMALL]; unsigned int ai; size_t I, lineSize, /* #bytes in one scanline to be copied */ typeSize, /* size of data type */ cIn[NRRD_DIM_MAX], /* coords for line start, in input */ cOut[NRRD_DIM_MAX], /* coords for line start, in output */ szIn[NRRD_DIM_MAX], szOut[NRRD_DIM_MAX], idxIn=0, idxOut=0, /* linear indices for input and output */ numLines; /* number of scanlines in output nrrd */ char *dataIn, *dataOut; /* errors */ if (!(nout && nin && min && max)) { sprintf(err, "%s: got NULL pointer", me); biffAdd(NRRD, err); return 1; } if (nout == nin) { sprintf(err, "%s: nout==nin disallowed", me); biffAdd(NRRD, err); return 1; } for (ai=0; ai<nin->dim; ai++) { if (!(min[ai] <= max[ai])) { sprintf(err, "%s: axis %d min (" _AIR_SIZE_T_CNV ") not <= max (" _AIR_SIZE_T_CNV ")", me, ai, min[ai], max[ai]); biffAdd(NRRD, err); return 1; } if (!( min[ai] < nin->axis[ai].size && max[ai] < nin->axis[ai].size )) { sprintf(err, "%s: axis %d min (" _AIR_SIZE_T_CNV ") or max (" _AIR_SIZE_T_CNV ") out of bounds [0," _AIR_SIZE_T_CNV "]", me, ai, min[ai], max[ai], nin->axis[ai].size-1); biffAdd(NRRD, err); return 1; } } /* this shouldn't actually be necessary .. */ if (!nrrdElementSize(nin)) { sprintf(err, "%s: nrrd reports zero element size!", me); biffAdd(NRRD, err); return 1; } /* allocate */ nrrdAxisInfoGet_nva(nin, nrrdAxisInfoSize, szIn); numLines = 1; for (ai=0; ai<nin->dim; ai++) { szOut[ai] = max[ai] - min[ai] + 1; if (ai) { numLines *= szOut[ai]; } } nout->blockSize = nin->blockSize; if (nrrdMaybeAlloc_nva(nout, nin->type, nin->dim, szOut)) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } lineSize = szOut[0]*nrrdElementSize(nin); /* the skinny */ typeSize = nrrdElementSize(nin); dataIn = (char *)nin->data; dataOut = (char *)nout->data; memset(cOut, 0, NRRD_DIM_MAX*sizeof(unsigned int)); /* printf("!%s: nin->dim = %d\n", me, nin->dim); printf("!%s: min = %d %d %d\n", me, min[0], min[1], min[2]); printf("!%s: szIn = %d %d %d\n", me, szIn[0], szIn[1], szIn[2]); printf("!%s: szOut = %d %d %d\n", me, szOut[0], szOut[1], szOut[2]); printf("!%s: lineSize = %d\n", me, lineSize); printf("!%s: typeSize = %d\n", me, typeSize); printf("!%s: numLines = %d\n", me, (int)numLines); */ for (I=0; I<numLines; I++) { for (ai=0; ai<nin->dim; ai++) { cIn[ai] = cOut[ai] + min[ai]; } NRRD_INDEX_GEN(idxOut, cOut, szOut, nin->dim); NRRD_INDEX_GEN(idxIn, cIn, szIn, nin->dim); /* printf("!%s: %5d: cOut=(%3d,%3d,%3d) --> idxOut = %5d\n", me, (int)I, cOut[0], cOut[1], cOut[2], (int)idxOut); printf("!%s: %5d: cIn=(%3d,%3d,%3d) --> idxIn = %5d\n", me, (int)I, cIn[0], cIn[1], cIn[2], (int)idxIn); */ memmove(dataOut + idxOut*typeSize, dataIn + idxIn*typeSize, lineSize); /* the lowest coordinate in cOut[] will stay zero, since we are copying one (1-D) scanline at a time */ NRRD_COORD_INCR(cOut, szOut, nin->dim, 1); } if (nrrdAxisInfoCopy(nout, nin, NULL, (NRRD_AXIS_INFO_SIZE_BIT | NRRD_AXIS_INFO_MIN_BIT | NRRD_AXIS_INFO_MAX_BIT ))) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } for (ai=0; ai<nin->dim; ai++) { nrrdAxisInfoPosRange(&(nout->axis[ai].min), &(nout->axis[ai].max), nin, ai, min[ai], max[ai]); /* do the safe thing first */ nout->axis[ai].kind = _nrrdKindAltered(nin->axis[ai].kind, AIR_FALSE); /* try cleverness */ if (!nrrdStateKindNoop) { if (nout->axis[ai].size == nin->axis[ai].size) { /* we can safely copy kind; the samples didn't change */ nout->axis[ai].kind = nin->axis[ai].kind; } else if (nrrdKind4Color == nin->axis[ai].kind && 3 == szOut[ai]) { nout->axis[ai].kind = nrrdKind3Color; } else if (nrrdKind4Vector == nin->axis[ai].kind && 3 == szOut[ai]) { nout->axis[ai].kind = nrrdKind3Vector; } else if ((nrrdKind4Vector == nin->axis[ai].kind || nrrdKind3Vector == nin->axis[ai].kind) && 2 == szOut[ai]) { nout->axis[ai].kind = nrrdKind2Vector; } else if (nrrdKindRGBAColor == nin->axis[ai].kind && 0 == min[ai] && 2 == max[ai]) { nout->axis[ai].kind = nrrdKindRGBColor; } else if (nrrdKind2DMaskedSymMatrix == nin->axis[ai].kind && 1 == min[ai] && max[ai] == szIn[ai]-1) { nout->axis[ai].kind = nrrdKind2DSymMatrix; } else if (nrrdKind2DMaskedMatrix == nin->axis[ai].kind && 1 == min[ai] && max[ai] == szIn[ai]-1) { nout->axis[ai].kind = nrrdKind2DMatrix; } else if (nrrdKind3DMaskedSymMatrix == nin->axis[ai].kind && 1 == min[ai] && max[ai] == szIn[ai]-1) { nout->axis[ai].kind = nrrdKind3DSymMatrix; } else if (nrrdKind3DMaskedMatrix == nin->axis[ai].kind && 1 == min[ai] && max[ai] == szIn[ai]-1) { nout->axis[ai].kind = nrrdKind3DMatrix; } } } strcpy(buff1, ""); for (ai=0; ai<nin->dim; ai++) { sprintf(buff2, "%s[" _AIR_SIZE_T_CNV "," _AIR_SIZE_T_CNV "]", (ai ? "x" : ""), min[ai], max[ai]); strcat(buff1, buff2); } if (nrrdContentSet_va(nout, func, nin, "%s", buff1)) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } if (nrrdBasicInfoCopy(nout, nin, NRRD_BASIC_INFO_DATA_BIT | NRRD_BASIC_INFO_TYPE_BIT | NRRD_BASIC_INFO_BLOCKSIZE_BIT | NRRD_BASIC_INFO_DIMENSION_BIT | NRRD_BASIC_INFO_SPACEORIGIN_BIT | NRRD_BASIC_INFO_CONTENT_BIT | NRRD_BASIC_INFO_COMMENTS_BIT | (nrrdStateKeyValuePairsPropagate ? 0 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT))) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } /* copy origin, then shift it along the spatial axes */ _nrrdSpaceVecCopy(nout->spaceOrigin, nin->spaceOrigin); for (ai=0; ai<nin->dim; ai++) { if (AIR_EXISTS(nin->axis[ai].spaceDirection[0])) { _nrrdSpaceVecScaleAdd2(nout->spaceOrigin, 1.0, nout->spaceOrigin, min[ai], nin->axis[ai].spaceDirection); } } return 0; }
/* ******** nrrdSpatialResample() ** ** general-purpose array-resampler: resamples a nrrd of any type ** (except block) and any dimension along any or all of its axes, with ** any combination of up- or down-sampling along the axes, with any ** kernel (specified by callback), with potentially a different kernel ** for each axis. Whether or not to resample along axis d is ** controlled by the non-NULL-ity of info->kernel[ai]. Where to sample ** on the axis is controlled by info->min[ai] and info->max[ai]; these ** specify a range of "positions" aka "world space" positions, as ** determined by the per-axis min and max of the input nrrd, which must ** be set for every resampled axis. ** ** we cyclically permute those axes being resampled, and never touch ** the position (in axis ordering) of axes along which we are not ** resampling. This strategy is certainly not the most intelligent ** one possible, but it does mean that the axis along which we're ** currently resampling-- the one along which we'll have to look at ** multiple adjecent samples-- is that resampling axis which is ** currently most contiguous in memory. It may make sense to precede ** the resampling with an axis permutation which bubbles all the ** resampled axes to the front (most contiguous) end of the axis list, ** and then puts them back in place afterwards, depending on the cost ** of such axis permutation overhead. */ int nrrdSpatialResample(Nrrd *nout, const Nrrd *nin, const NrrdResampleInfo *info) { char me[]="nrrdSpatialResample", func[]="resample", err[BIFF_STRLEN]; nrrdResample_t *array[NRRD_DIM_MAX], /* intermediate copies of the input data undergoing resampling; we don't need a full- fledged nrrd for these. Only about two of these arrays will be allocated at a time; intermediate results will be free()d when not needed */ *_inVec, /* current input vector being resampled; not necessarily contiguous in memory (if strideIn != 1) */ *inVec, /* buffer for input vector; contiguous */ *_outVec; /* output vector in context of volume; never contiguous */ double tmpF; double ratio, /* factor by which or up or downsampled */ ratios[NRRD_DIM_MAX]; /* record of "ratio" for all resampled axes, used to compute new spacing in output */ Nrrd *floatNin; /* if the input nrrd type is not nrrdResample_t, then we convert it and keep it here */ unsigned int ai, pi, /* current pass */ topLax, permute[NRRD_DIM_MAX], /* how to permute axes of last pass to get axes for current pass */ ax[NRRD_DIM_MAX+1][NRRD_DIM_MAX], /* axis ordering on each pass */ passes; /* # of passes needed to resample all axes */ int i, s, e, topRax, /* the lowest index of an axis which is resampled. If all axes are being resampled, then this is 0. If for some reason the "x" axis (fastest stride) is not being resampled, but "y" is, then topRax is 1 */ botRax, /* index of highest axis being resampled */ typeIn, typeOut; /* types of input and output of resampling */ size_t sz[NRRD_DIM_MAX+1][NRRD_DIM_MAX]; /* how many samples along each axis, changing on each pass */ /* all these variables have to do with the spacing of elements in memory for the current pass of resampling, and they (except strideIn) are re-set at the beginning of each pass */ nrrdResample_t *weight; /* sample weights */ unsigned int ci[NRRD_DIM_MAX+1], co[NRRD_DIM_MAX+1]; int sizeIn, sizeOut, /* lengths of input and output vectors */ dotLen, /* # input samples to dot with weights to get one output sample */ doRound, /* actually do rounding on output: we DO NOT round when info->round but the output type is not integral */ *index; /* dotLen*sizeOut 2D array of input indices */ size_t I, /* swiss-army int */ strideIn, /* the stride between samples in the input "scanline" being resampled */ strideOut, /* stride between samples in output "scanline" from resampling */ L, LI, LO, numLines, /* top secret */ numOut; /* # of _samples_, total, in output volume; this is for allocating the output */ airArray *mop; /* for cleaning up */ if (!(nout && nin && info)) { sprintf(err, "%s: got NULL pointer", me); biffAdd(NRRD, err); return 1; } if (nrrdBoundaryUnknown == info->boundary) { sprintf(err, "%s: need to specify a boundary behavior", me); biffAdd(NRRD, err); return 1; } typeIn = nin->type; typeOut = nrrdTypeDefault == info->type ? typeIn : info->type; if (_nrrdResampleCheckInfo(nin, info)) { sprintf(err, "%s: problem with arguments", me); biffAdd(NRRD, err); return 1; } _nrrdResampleComputePermute(permute, ax, sz, &topRax, &botRax, &passes, nin, info); topLax = topRax ? 0 : 1; /* not sure where else to put this: (want to put it before 0 == passes branch) We have to assume some centering when doing resampling, and it would be stupid to not record it in the outgoing nrrd, since the value of nrrdDefaultCenter could always change. */ for (ai=0; ai<nin->dim; ai++) { if (info->kernel[ai]) { nout->axis[ai].center = _nrrdCenter(nin->axis[ai].center); } } if (0 == passes) { /* actually, no resampling was desired. Copy input to output, but with the clamping that we normally do at the end of resampling */ nrrdAxisInfoGet_nva(nin, nrrdAxisInfoSize, sz[0]); if (nrrdMaybeAlloc_nva(nout, typeOut, nin->dim, sz[0])) { sprintf(err, "%s: couldn't allocate output", me); biffAdd(NRRD, err); return 1; } numOut = nrrdElementNumber(nout); for (I=0; I<numOut; I++) { tmpF = nrrdDLookup[nin->type](nin->data, I); tmpF = nrrdDClamp[typeOut](tmpF); nrrdDInsert[typeOut](nout->data, I, tmpF); } nrrdAxisInfoCopy(nout, nin, NULL, NRRD_AXIS_INFO_NONE); /* HEY: need to create textual representation of resampling parameters */ if (nrrdContentSet_va(nout, func, nin, "")) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } if (nrrdBasicInfoCopy(nout, nin, NRRD_BASIC_INFO_DATA_BIT | NRRD_BASIC_INFO_TYPE_BIT | NRRD_BASIC_INFO_BLOCKSIZE_BIT | NRRD_BASIC_INFO_DIMENSION_BIT | NRRD_BASIC_INFO_CONTENT_BIT | NRRD_BASIC_INFO_COMMENTS_BIT | (nrrdStateKeyValuePairsPropagate ? 0 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT))) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } return 0; } mop = airMopNew(); /* convert input nrrd to nrrdResample_t if necessary */ if (nrrdResample_nrrdType != typeIn) { if (nrrdConvert(floatNin = nrrdNew(), nin, nrrdResample_nrrdType)) { sprintf(err, "%s: couldn't create float copy of input", me); biffAdd(NRRD, err); airMopError(mop); return 1; } array[0] = (nrrdResample_t*)floatNin->data; airMopAdd(mop, floatNin, (airMopper)nrrdNuke, airMopAlways); } else { floatNin = NULL; array[0] = (nrrdResample_t*)nin->data; } /* compute strideIn; this is actually the same for every pass because (strictly speaking) in every pass we are resampling the same axis, and axes with lower indices are constant length */ strideIn = 1; for (ai=0; ai<(unsigned int)topRax; ai++) { /* HEY scrutinize casts */ strideIn *= nin->axis[ai].size; } /* printf("%s: strideIn = " _AIR_SIZE_T_CNV "\n", me, strideIn); */ /* go! */ for (pi=0; pi<passes; pi++) { /* printf("%s: --- pass %d --- \n", me, pi); */ numLines = strideOut = 1; for (ai=0; ai<nin->dim; ai++) { if (ai < (unsigned int)botRax) { /* HEY scrutinize cast */ strideOut *= sz[pi+1][ai]; } if (ai != (unsigned int)topRax) { /* HEY scrutinize cast */ numLines *= sz[pi][ai]; } } sizeIn = sz[pi][topRax]; sizeOut = sz[pi+1][botRax]; numOut = numLines*sizeOut; /* for the rest of the loop body, d is the original "dimension" for the axis being resampled */ ai = ax[pi][topRax]; /* printf("%s(%d): numOut = " _AIR_SIZE_T_CNV "\n", me, pi, numOut); printf("%s(%d): numLines = " _AIR_SIZE_T_CNV "\n", me, pi, numLines); printf("%s(%d): stride: In=%d, Out=%d\n", me, pi, (int)strideIn, (int)strideOut); printf("%s(%d): sizeIn = %d\n", me, pi, sizeIn); printf("%s(%d): sizeOut = %d\n", me, pi, sizeOut); */ /* we can free the input to the previous pass (if its not the given data) */ if (pi > 0) { if (pi == 1) { if (array[0] != nin->data) { airMopSub(mop, floatNin, (airMopper)nrrdNuke); floatNin = nrrdNuke(floatNin); array[0] = NULL; /* printf("%s: pi %d: freeing array[0]\n", me, pi); */ } } else { airMopSub(mop, array[pi-1], airFree); array[pi-1] = (nrrdResample_t*)airFree(array[pi-1]); /* printf("%s: pi %d: freeing array[%d]\n", me, pi, pi-1); */ } } /* allocate output volume */ array[pi+1] = (nrrdResample_t*)calloc(numOut, sizeof(nrrdResample_t)); if (!array[pi+1]) { sprintf(err, "%s: couldn't create array of " _AIR_SIZE_T_CNV " nrrdResample_t's for output of pass %d", me, numOut, pi); biffAdd(NRRD, err); airMopError(mop); return 1; } airMopAdd(mop, array[pi+1], airFree, airMopAlways); /* printf("%s: allocated array[%d]\n", me, pi+1); */ /* allocate contiguous input scanline buffer, we alloc one more than needed to provide a place for the pad value. That is, in fact, the over-riding reason to copy a scanline to a local array: so that there is a simple consistent (non-branchy) way to incorporate the pad values */ inVec = (nrrdResample_t *)calloc(sizeIn+1, sizeof(nrrdResample_t)); airMopAdd(mop, inVec, airFree, airMopAlways); inVec[sizeIn] = AIR_CAST(nrrdResample_t, info->padValue); dotLen = _nrrdResampleMakeWeightIndex(&weight, &index, &ratio, nin, info, ai); if (!dotLen) { sprintf(err, "%s: trouble creating weight and index vector arrays", me); biffAdd(NRRD, err); airMopError(mop); return 1; } ratios[ai] = ratio; airMopAdd(mop, weight, airFree, airMopAlways); airMopAdd(mop, index, airFree, airMopAlways); /* the skinny: resample all the scanlines */ _inVec = array[pi]; _outVec = array[pi+1]; memset(ci, 0, (NRRD_DIM_MAX+1)*sizeof(int)); memset(co, 0, (NRRD_DIM_MAX+1)*sizeof(int)); for (L=0; L<numLines; L++) { /* calculate the index to get to input and output scanlines, according the coordinates of the start of the scanline */ NRRD_INDEX_GEN(LI, ci, sz[pi], nin->dim); NRRD_INDEX_GEN(LO, co, sz[pi+1], nin->dim); _inVec = array[pi] + LI; _outVec = array[pi+1] + LO; /* read input scanline into contiguous array */ for (i=0; i<sizeIn; i++) { inVec[i] = _inVec[i*strideIn]; } /* do the weighting */ for (i=0; i<sizeOut; i++) { tmpF = 0.0; /* fprintf(stderr, "%s: i = %d (tmpF=0)\n", me, (int)i); */ for (s=0; s<dotLen; s++) { tmpF += inVec[index[s + dotLen*i]]*weight[s + dotLen*i]; /* fprintf(stderr, " tmpF += %g*%g == %g\n", inVec[index[s + dotLen*i]], weight[s + dotLen*i], tmpF); */ } _outVec[i*strideOut] = tmpF; /* fprintf(stderr, "--> out[%d] = %g\n", i*strideOut, _outVec[i*strideOut]); */ } /* update the coordinates for the scanline starts. We don't use the usual NRRD_COORD macros because we're subject to the unusual constraint that ci[topRax] and co[permute[topRax]] must stay exactly zero */ e = topLax; ci[e]++; co[permute[e]]++; while (L < numLines-1 && ci[e] == sz[pi][e]) { ci[e] = co[permute[e]] = 0; e++; e += e == topRax; ci[e]++; co[permute[e]]++; } } /* pass-specific clean up */ airMopSub(mop, weight, airFree); airMopSub(mop, index, airFree); airMopSub(mop, inVec, airFree); weight = (nrrdResample_t*)airFree(weight); index = (int*)airFree(index); inVec = (nrrdResample_t*)airFree(inVec); } /* clean up second-to-last array and scanline buffers */ if (passes > 1) { airMopSub(mop, array[passes-1], airFree); array[passes-1] = (nrrdResample_t*)airFree(array[passes-1]); /* printf("%s: now freeing array[%d]\n", me, passes-1); */ } else if (array[passes-1] != nin->data) { airMopSub(mop, floatNin, (airMopper)nrrdNuke); floatNin = nrrdNuke(floatNin); } array[passes-1] = NULL; /* create output nrrd and set axis info */ if (nrrdMaybeAlloc_nva(nout, typeOut, nin->dim, sz[passes])) { sprintf(err, "%s: couldn't allocate final output nrrd", me); biffAdd(NRRD, err); airMopError(mop); return 1; } airMopAdd(mop, nout, (airMopper)nrrdNuke, airMopOnError); nrrdAxisInfoCopy(nout, nin, NULL, (NRRD_AXIS_INFO_SIZE_BIT | NRRD_AXIS_INFO_MIN_BIT | NRRD_AXIS_INFO_MAX_BIT | NRRD_AXIS_INFO_SPACING_BIT | NRRD_AXIS_INFO_SPACEDIRECTION_BIT /* see below */ | NRRD_AXIS_INFO_THICKNESS_BIT | NRRD_AXIS_INFO_KIND_BIT)); for (ai=0; ai<nin->dim; ai++) { if (info->kernel[ai]) { /* we do resample this axis */ nout->axis[ai].spacing = nin->axis[ai].spacing/ratios[ai]; /* no way to usefully update thickness: we could be doing blurring but maintaining the number of samples: thickness increases, or we could be downsampling, in which the relationship between the sampled and the skipped regions of space becomes complicated: no single scalar can represent it, or we could be upsampling, in which the notion of "skip" could be rendered meaningless */ nout->axis[ai].thickness = AIR_NAN; nout->axis[ai].min = info->min[ai]; nout->axis[ai].max = info->max[ai]; /* HEY: this is currently a bug: all this code was written long before there were space directions, so min/max are always set, regardless of whethere there are incoming space directions which then disallows output space directions on the same axes _nrrdSpaceVecScale(nout->axis[ai].spaceDirection, 1.0/ratios[ai], nin->axis[ai].spaceDirection); */ nout->axis[ai].kind = _nrrdKindAltered(nin->axis[ai].kind, AIR_TRUE); } else { /* this axis remains untouched */ nout->axis[ai].min = nin->axis[ai].min; nout->axis[ai].max = nin->axis[ai].max; nout->axis[ai].spacing = nin->axis[ai].spacing; nout->axis[ai].thickness = nin->axis[ai].thickness; nout->axis[ai].kind = nin->axis[ai].kind; } } /* HEY: need to create textual representation of resampling parameters */ if (nrrdContentSet_va(nout, func, nin, "")) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } /* copy the resampling final result into the output nrrd, maybe rounding as we go to make sure that 254.9999 is saved as 255 in uchar output, and maybe clamping as we go to insure that integral results don't have unexpected wrap-around. */ if (info->round) { if (nrrdTypeInt == typeOut || nrrdTypeUInt == typeOut || nrrdTypeLLong == typeOut || nrrdTypeULLong == typeOut) { fprintf(stderr, "%s: WARNING: possible erroneous output with " "rounding of %s output type due to int-based implementation " "of rounding\n", me, airEnumStr(nrrdType, typeOut)); } doRound = nrrdTypeIsIntegral[typeOut]; } else { doRound = AIR_FALSE; } numOut = nrrdElementNumber(nout); for (I=0; I<numOut; I++) { tmpF = array[passes][I]; if (doRound) { tmpF = AIR_CAST(nrrdResample_t, AIR_ROUNDUP(tmpF)); } if (info->clamp) { tmpF = nrrdDClamp[typeOut](tmpF); } nrrdDInsert[typeOut](nout->data, I, tmpF); } if (nrrdBasicInfoCopy(nout, nin, NRRD_BASIC_INFO_DATA_BIT | NRRD_BASIC_INFO_TYPE_BIT | NRRD_BASIC_INFO_BLOCKSIZE_BIT | NRRD_BASIC_INFO_DIMENSION_BIT | NRRD_BASIC_INFO_CONTENT_BIT | NRRD_BASIC_INFO_COMMENTS_BIT | (nrrdStateKeyValuePairsPropagate ? 0 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT))) { sprintf(err, "%s:", me); biffAdd(NRRD, err); return 1; } /* enough already */ airMopOkay(mop); return 0; }
/* ******** nrrdShuffle ** ** rearranges hyperslices of a nrrd along a given axis according to ** given permutation. This could be used to on a 4D array, ** representing a 3D volume of vectors, to re-order the vector ** components. ** ** the given permutation array must allocated for at least as long as ** the input nrrd along the chosen axis. perm[j] = i means that the ** value at position j in the _new_ array should come from position i ** in the _old_array. The standpoint is from the new, looking at ** where to find the values amid the old array (perm answers "what do ** I put here", not "where do I put this"). This allows multiple ** positions in the new array to copy from the same old position, and ** insures that there is an source for all positions along the new ** array. */ int nrrdShuffle(Nrrd *nout, const Nrrd *nin, unsigned int axis, const size_t *perm) { static const char me[]="nrrdShuffle", func[]="shuffle"; char buff2[AIR_STRLEN_SMALL]; /* Sun Feb 8 13:13:58 CST 2009: There was a memory bug here caused by using the same buff1[NRRD_DIM_MAX*30] declaration that had worked fine for nrrdAxesPermute and nrrdReshape, but does NOT work here because now samples along an axes are re-ordered, not axes, so its often not allocated for long enough to hold the string that's printed to it. Ideally there'd be another argument that says whether to document the shuffle in the content string, which would mean an API change. Or, we can use a secret heuristic (or maybe later a nrrdState variable) for determining when an axis is short enough to make documenting the shuffle interesting. This is useful since functions like nrrdFlip() probably do *not* need the shuffle (the sample reversal) to be documented for long axes */ #define LONGEST_INTERESTING_AXIS 42 char buff1[LONGEST_INTERESTING_AXIS*30]; unsigned int ai, ldim, len; size_t idxInB=0, idxOut, lineSize, numLines, size[NRRD_DIM_MAX], *lsize, cIn[NRRD_DIM_MAX+1], cOut[NRRD_DIM_MAX+1]; char *dataIn, *dataOut; if (!(nin && nout && perm)) { biffAddf(NRRD, "%s: got NULL pointer", me); return 1; } if (nout == nin) { biffAddf(NRRD, "%s: nout==nin disallowed", me); return 1; } if (!( axis < nin->dim )) { biffAddf(NRRD, "%s: axis %d outside valid range [0,%d]", me, axis, nin->dim-1); return 1; } len = AIR_CAST(unsigned int, nin->axis[axis].size); for (ai=0; ai<len; ai++) { if (!( perm[ai] < len )) { char stmp[AIR_STRLEN_SMALL]; biffAddf(NRRD, "%s: perm[%d] (%s) outside valid range [0,%d]", me, ai, airSprintSize_t(stmp, perm[ai]), len-1); return 1; } } /* this shouldn't actually be necessary .. */ if (!nrrdElementSize(nin)) { biffAddf(NRRD, "%s: nrrd reports zero element size!", me); return 1; } /* set information in new volume */ nout->blockSize = nin->blockSize; nrrdAxisInfoGet_nva(nin, nrrdAxisInfoSize, size); if (nrrdMaybeAlloc_nva(nout, nin->type, nin->dim, size)) { biffAddf(NRRD, "%s: failed to allocate output", me); return 1; } if (nrrdAxisInfoCopy(nout, nin, NULL, NRRD_AXIS_INFO_NONE)) { biffAddf(NRRD, "%s:", me); return 1; } /* the min and max along the shuffled axis are now meaningless */ nout->axis[axis].min = nout->axis[axis].max = AIR_NAN; /* do the safe thing first */ nout->axis[axis].kind = _nrrdKindAltered(nin->axis[axis].kind, AIR_FALSE); /* try cleverness */ if (!nrrdStateKindNoop) { if (0 == nrrdKindSize(nin->axis[axis].kind) || nrrdKindStub == nin->axis[axis].kind || nrrdKindScalar == nin->axis[axis].kind || nrrdKind2Vector == nin->axis[axis].kind || nrrdKind3Color == nin->axis[axis].kind || nrrdKind4Color == nin->axis[axis].kind || nrrdKind3Vector == nin->axis[axis].kind || nrrdKind3Gradient == nin->axis[axis].kind || nrrdKind3Normal == nin->axis[axis].kind || nrrdKind4Vector == nin->axis[axis].kind) { /* these kinds have no intrinsic ordering */ nout->axis[axis].kind = nin->axis[axis].kind; } } /* the skinny */ lineSize = 1; for (ai=0; ai<axis; ai++) { lineSize *= nin->axis[ai].size; } numLines = nrrdElementNumber(nin)/lineSize; lineSize *= nrrdElementSize(nin); lsize = size + axis; ldim = nin->dim - axis; dataIn = AIR_CAST(char *, nin->data); dataOut = AIR_CAST(char *, nout->data); memset(cIn, 0, sizeof(cIn)); memset(cOut, 0, sizeof(cOut)); for (idxOut=0; idxOut<numLines; idxOut++) { memcpy(cIn, cOut, sizeof(cIn)); cIn[0] = perm[cOut[0]]; NRRD_INDEX_GEN(idxInB, cIn, lsize, ldim); NRRD_INDEX_GEN(idxOut, cOut, lsize, ldim); memcpy(dataOut + idxOut*lineSize, dataIn + idxInB*lineSize, lineSize); NRRD_COORD_INCR(cOut, lsize, ldim, 0); } /* Set content. The LONGEST_INTERESTING_AXIS hack avoids the previous array out-of-bounds bug */ if (len <= LONGEST_INTERESTING_AXIS) { strcpy(buff1, ""); for (ai=0; ai<len; ai++) { char stmp[AIR_STRLEN_SMALL]; sprintf(buff2, "%s%s", (ai ? "," : ""), airSprintSize_t(stmp, perm[ai])); strcat(buff1, buff2); } if (nrrdContentSet_va(nout, func, nin, "%s", buff1)) { biffAddf(NRRD, "%s:", me); return 1; } } else { if (nrrdContentSet_va(nout, func, nin, "")) { biffAddf(NRRD, "%s:", me); return 1; } } if (nrrdBasicInfoCopy(nout, nin, NRRD_BASIC_INFO_DATA_BIT | NRRD_BASIC_INFO_TYPE_BIT | NRRD_BASIC_INFO_BLOCKSIZE_BIT | NRRD_BASIC_INFO_DIMENSION_BIT | NRRD_BASIC_INFO_CONTENT_BIT | NRRD_BASIC_INFO_COMMENTS_BIT | (nrrdStateKeyValuePairsPropagate ? 0 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT))) { biffAddf(NRRD, "%s:", me); return 1; } return 0; #undef LONGEST_INTERESTING_AXIS }