예제 #1
0
ImageTestData ImageTestData::initializeSecondaryData(vtkImageDataPtr source, QString filename)
{
	ImageTestData retval;
	QString colorFormat = "R";
	if (source->GetNumberOfScalarComponents() == 3)
	{
		vtkSmartPointer < vtkImageLuminance > luminance = vtkSmartPointer < vtkImageLuminance > ::New();
		luminance->SetInputData(source);
		luminance->Update();
		vtkImageDataPtr outData = luminance->GetOutput();
		retval.mImageData = outData;
		colorFormat = "R";
	}
	else if (source->GetNumberOfScalarComponents() == 4)
	{
		retval.mImageData = source;
		colorFormat = "RGBA";
	}
	else if (source->GetNumberOfScalarComponents() == 1)
	{
		retval.mImageData = source;
		colorFormat = "R";
	}

	retval.mRawUid = QString("uchar %1[%2]").arg(QFileInfo(filename).completeBaseName()).arg(colorFormat);
	retval.mDataSource.reset(new SplitFramesContainer(retval.mImageData));
	retval.mCurrentFrame = 0;
	return retval;
}
vtkImageDataPtr IGTLinkConversionSonixCXLegacy::createFilterAny2RGB(int R, int G, int B, vtkImageDataPtr input)
{
	if (input->GetNumberOfScalarComponents() == 1)
		return input;
	if (( input->GetNumberOfScalarComponents()==3 )&&( R==0 )&&( G==1 )&&( B==2 ))
		return input;

	vtkImageAppendComponentsPtr merger = vtkImageAppendComponentsPtr::New();
	vtkImageExtractComponentsPtr splitterRGB = vtkImageExtractComponentsPtr::New();
	splitterRGB->SetInputData(input);
	splitterRGB->SetComponents(R, G, B);
	merger->AddInputConnection(splitterRGB->GetOutputPort());
	merger->Update();
	return merger->GetOutput();
}
예제 #3
0
vtkImageDataPtr USFrameData::convertTo8bit(vtkImageDataPtr input) const
{
	vtkImageDataPtr retval = input;
	if (input->GetScalarSize() > 1)
	{
		ImagePtr tempImage = cx::ImagePtr(new cx::Image("tempImage", input, "tempImage"));
		tempImage->resetTransferFunctions();
		retval = tempImage->get8bitGrayScaleVtkImageData();
	}
	return retval;
}
예제 #4
0
ImageTestData ImageTestData::initializePrimaryData(vtkImageDataPtr source, QString filename)
{
	ImageTestData retval;
	QString colorFormat = "R";
	if (source->GetNumberOfScalarComponents() == 3)
	{
		vtkImageAppendComponentsPtr merger = vtkImageAppendComponentsPtr::New();
		vtkImageExtractComponentsPtr splitterRGB = vtkImageExtractComponentsPtr::New();
		splitterRGB->SetInputData(source);
		splitterRGB->SetComponents(0, 1, 2);
//		merger->AddInputConnection(0, splitterRGB->GetOutputPort());
		merger->AddInputConnection(splitterRGB->GetOutputPort());
		vtkImageExtractComponentsPtr splitterA = vtkImageExtractComponentsPtr::New();
		splitterA->SetInputData(source);
		splitterA->SetComponents(0);
		merger->AddInputConnection(splitterA->GetOutputPort());
//		merger->AddInputConnection(1, splitterA->GetOutputPort());
		merger->Update();
		retval.mImageData = merger->GetOutput();
		colorFormat = "RGBA";
	}
	else if (source->GetNumberOfScalarComponents() == 4)
	{
		retval.mImageData = source;
		colorFormat = "RGBA";
	}
	else if (source->GetNumberOfScalarComponents() == 1)
	{
		retval.mImageData = source;
		colorFormat = "R";
	}

	retval.mRawUid = QString("%1 [%2]").arg(QFileInfo(filename).completeBaseName()).arg(colorFormat);
	retval.mDataSource.reset(new SplitFramesContainer(retval.mImageData));
	retval.mCurrentFrame = 0;
	return retval;
}
예제 #5
0
/**Convert input to grayscale, and return a COPY of that volume ( in order to break the pipeline for memory purposes)
 * ALSO: remove data in image outside extent - required by reconstruction.
 * Convert to 8 bit as current US reconstruction algorithms only handles 8 bit
 */
vtkImageDataPtr USFrameData::to8bitGrayscaleAndEffectuateCropping(vtkImageDataPtr input) const
{
	vtkImageDataPtr grayScaleData;

	if (input->GetNumberOfScalarComponents() == 1) // already gray
	{
		// crop (slow)
		grayScaleData = input;
//		outData->Crop();
	}
	else
	{
		// convert and crop as side effect (optimization)
		grayScaleData = convertImageDataToGrayScale(input);
	}

	vtkImageDataPtr outData = this->convertTo8bit(grayScaleData);

	vtkImageDataPtr copy = vtkImageDataPtr::New();
	copy->DeepCopy(outData);
	return copy;
}
예제 #6
0
template<typename scalartype> static int getRGBMax(vtkImageDataPtr image)
{
	int max = 0;
	vtkImageIterator<scalartype> iter(image, image->GetExtent());
	while (!iter.IsAtEnd())
	{
		typename vtkImageIterator<scalartype>::SpanIterator siter = iter.BeginSpan();
		while (siter != iter.EndSpan())
		{
			int value = *siter;
			++siter;
			value += *siter;
			++siter;
			value += *siter;
			++siter;
			if (value > max)
			{
				max = value;
			}
		}
		iter.NextSpan();
	}
	return max/3;
}
예제 #7
0
	void checkImagesEqual(vtkImageDataPtr input1, vtkImageDataPtr input2)
	{
		REQUIRE(input1.Get()!=(vtkImageData*)NULL);
		REQUIRE(input2.Get()!=(vtkImageData*)NULL);
		REQUIRE(input1->GetDataDimension() == input2->GetDataDimension());
		REQUIRE(input1->GetScalarType() == input2->GetScalarType());
		REQUIRE(input1->GetNumberOfScalarComponents() == input2->GetNumberOfScalarComponents());
		REQUIRE(Eigen::Array3i(input1->GetDimensions()).isApprox(Eigen::Array3i(input2->GetDimensions())));
		CHECK(Eigen::Array3d(input1->GetSpacing()).isApprox(Eigen::Array3d(input2->GetSpacing()), 1.0E-2));
		CHECK(Eigen::Array3d(input1->GetOrigin()).isApprox(Eigen::Array3d(input2->GetOrigin())));
		// check spacing, dim, type, origin

		vtkImageMathematicsPtr diff = vtkImageMathematicsPtr::New();
		diff->SetOperationToSubtract();
		diff->SetInput1Data(input1);
		diff->SetInput2Data(input2);
		diff->Update();

		vtkImageAccumulatePtr histogram = vtkImageAccumulatePtr::New();
		histogram->SetInputData(0, diff->GetOutput());
		histogram->Update();

		Eigen::Array3d histogramRange = Eigen::Array3d(histogram->GetMax()) - Eigen::Array3d(histogram->GetMin());

		for (int i=0; i<input1->GetNumberOfScalarComponents(); ++i)
		{
			CHECK(histogramRange[i] <  0.01);
			CHECK(histogramRange[i] > -0.01);
		}
	}
예제 #8
0
	virtual void updateTexture()
	{
		if (mMTime == mTexture->GetMTime())
		{
			return;
		}
		mMTime = mTexture->GetMTime();
		//vtkgl::ActiveTexture(getGLTextureForVolume(textureUnitIndex)); //TODO is this OK?
		GLenum size,internalType;
		boost::uint32_t dimx = mTexture ->GetDimensions( )[0];
		boost::uint32_t dimy = mTexture ->GetDimensions( )[1];
		boost::uint32_t dimz = mTexture ->GetDimensions( )[2];
		mMemorySize = dimx * dimy * dimz;

		glEnable( GL_TEXTURE_3D );
		glBindTexture(GL_TEXTURE_3D, textureId);
		report_gl_error();
		glTexParameteri( GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP );
		glTexParameteri( GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP );
		glTexParameteri( GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP );
		glTexParameteri( GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
		glTexParameteri( GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
		switch (mTexture->GetScalarType())
		{
		case VTK_UNSIGNED_CHAR:
		{
			size = GL_UNSIGNED_BYTE;
			internalType = GL_LUMINANCE;
		}
			break; //8UI_EXT; break;
		case VTK_UNSIGNED_SHORT:
		{
			size = GL_UNSIGNED_SHORT;
			internalType = GL_LUMINANCE16;
			mMemorySize *= 2;
		}
			break; //16UI_EXT; break;
		default:
			size = 0;
			internalType = 0;
			std::cout << "Bit size not supported!" << std::endl;
			QString dataType(mTexture->GetScalarTypeAsString());
			CX_LOG_ERROR() << QString("Attempt to update 3D GL texture from type %1 failed. Only unsigned types supported").arg(dataType);
			break;
		}

		if (mTexture->GetNumberOfScalarComponents()==1)
		{
			void* data = mTexture->GetPointData()->GetScalars()->GetVoidPointer(0);
			glTexImage3D(GL_TEXTURE_3D, 0, internalType, dimx, dimy, dimz, 0, GL_LUMINANCE, size, data);
		}
		else if (mTexture->GetNumberOfScalarComponents()==3)
		{
			internalType = GL_RGB;
			void* data = mTexture->GetPointData()->GetScalars()->GetVoidPointer(0);
			glTexImage3D(GL_TEXTURE_3D, 0, internalType, dimx, dimy, dimz, 0, GL_RGB, size, data);
			mMemorySize *= 3;
		}
		else
		{
			std::cout << "unsupported number of image components" << std::endl;
		}

		glDisable(GL_TEXTURE_3D);

		report_gl_error();
	}
예제 #9
0
bool VNNclAlgorithm::reconstruct(ProcessedUSInputDataPtr input, vtkImageDataPtr outputData, float radius, int nClosePlanes)
{
	mMeasurementNames.clear();

	int numBlocks = 10; // FIXME? needs to be the same as the number of input bscans to the voxel_method kernel

	// Split input US into blocks
	// Splits and copies data from the processed input in the way the kernel will processes it, which is per frameBlock
	frameBlock_t* inputBlocks = new frameBlock_t[numBlocks];
	size_t nPlanes_numberOfInputImages = input->getDimensions()[2];
	this->initializeFrameBlocks(inputBlocks, numBlocks, input);

	// Allocate CL memory for each frame block
	VECTOR_CLASS<cl::Buffer> clBlocks;
	report("Allocating OpenCL input block buffers");
	for (int i = 0; i < numBlocks; i++)
	{
		//TODO why does the context suddenly contain a "dummy" device?
		cl::Buffer buffer = mOulContex->createBuffer(mOulContex->getContext(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, inputBlocks[i].length, inputBlocks[i].data, "block buffer "+QString::number(i).toStdString());
		clBlocks.push_back(buffer);
	}
	// Allocate output memory
	int *outputDims = outputData->GetDimensions();

	size_t outputVolumeSize = outputDims[0] * outputDims[1] * outputDims[2] * sizeof(unsigned char);

	report(QString("Allocating CL output buffer, size %1").arg(outputVolumeSize));

	cl_ulong globalMemUse = 10 * inputBlocks[0].length + outputVolumeSize + sizeof(float) * 16 * nPlanes_numberOfInputImages + sizeof(cl_uchar) * input->getDimensions()[0] * input->getDimensions()[1];
	if(isUsingTooMuchMemory(outputVolumeSize, inputBlocks[0].length, globalMemUse))
		return false;

	cl::Buffer outputBuffer = mOulContex->createBuffer(mOulContex->getContext(), CL_MEM_WRITE_ONLY, outputVolumeSize, NULL, "output volume buffer");

	// Fill the plane matrices
	float *planeMatrices = new float[16 * nPlanes_numberOfInputImages]; //4x4 (matrix) = 16
	this->fillPlaneMatrices(planeMatrices, input);

	cl::Buffer clPlaneMatrices = mOulContex->createBuffer(mOulContex->getContext(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, nPlanes_numberOfInputImages * sizeof(float) * 16, planeMatrices, "plane matrices buffer");

	// US Probe mask
	cl::Buffer clMask = mOulContex->createBuffer(mOulContex->getContext(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
			sizeof(cl_uchar) * input->getMask()->GetDimensions()[0] * input->getMask()->GetDimensions()[1],
			input->getMask()->GetScalarPointer(), "mask buffer");

	double *out_spacing = outputData->GetSpacing();
	float spacings[2];
	float f_out_spacings[3];
	f_out_spacings[0] = out_spacing[0];
	f_out_spacings[1] = out_spacing[1];
	f_out_spacings[2] = out_spacing[2];


	spacings[0] = input->getSpacing()[0];
	spacings[1] = input->getSpacing()[1];

	//TODO why 4? because float4 is used??
	size_t planes_eqs_size =  sizeof(cl_float)*4*nPlanes_numberOfInputImages;

	// Find the optimal local work size
	size_t local_work_size;
	unsigned int deviceNumber = 0;
	cl::Device device = mOulContex->getDevice(deviceNumber);
	mKernel.getWorkGroupInfo(device, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, &local_work_size);

	size_t close_planes_size = this->calculateSpaceNeededForClosePlanes(mKernel, device, local_work_size, nPlanes_numberOfInputImages, nClosePlanes);

	this->setKernelArguments(
			mKernel,
			outputDims[0],
			outputDims[1],
			outputDims[2],
			f_out_spacings[0],
			f_out_spacings[1],
			f_out_spacings[2],
			input->getDimensions()[0],
			input->getDimensions()[1],
			spacings[0],
			spacings[1],
			clBlocks,
			outputBuffer,
			clPlaneMatrices,
			clMask,
			planes_eqs_size,
			close_planes_size,
			radius);

	report(QString("Using %1 as local workgroup size").arg(local_work_size));

	// We will divide the work into cubes of CUBE_DIM^3 voxels. The global work size is the total number of voxels divided by that.
	int cube_dim = 4;
	int cube_dim_pow3 = cube_dim * cube_dim * cube_dim;
	// Global work items:
	size_t global_work_size = (((outputDims[0] + cube_dim) * (outputDims[1] + cube_dim) * (outputDims[2] + cube_dim)) / cube_dim_pow3); // = number of cubes = number of kernels to run

	// Round global_work_size up to nearest multiple of local_work_size
	if (global_work_size % local_work_size)
		global_work_size = ((global_work_size / local_work_size) + 1) * local_work_size; // ceil(...)

	unsigned int queueNumber = 0;
	cl::CommandQueue queue = mOulContex->getQueue(queueNumber);
	this->measureAndExecuteKernel(queue, mKernel, global_work_size, local_work_size, mKernelMeasurementName);
	this->measureAndReadBuffer(queue, outputBuffer, outputVolumeSize, outputData->GetScalarPointer(), "vnncl_read_buffer");
	setDeepModified(outputData);
	// Cleaning up
	report(QString("Done, freeing GPU memory"));
	this->freeFrameBlocks(inputBlocks, numBlocks);
	delete[] inputBlocks;

	inputBlocks = NULL;

	return true;
}
void IGTLinkConversionFixture::setValue(vtkImageDataPtr data, int x, int y, int z, unsigned char val)
{
    *reinterpret_cast<unsigned char*>(data->GetScalarPointer(x,y,z)) = val;
}
예제 #11
0
vtkImageDataPtr USFrameData::useAngio(vtkImageDataPtr inData, vtkImageDataPtr grayFrame, int frameNum) const
{
	// Some of the code here is borrowed from the vtk examples:
	// http://public.kitware.com/cgi-bin/viewcvs.cgi/*checkout*/Examples/Build/vtkMy/Imaging/vtkImageFoo.cxx?root=VTK&content-type=text/plain

	if (inData->GetNumberOfScalarComponents() != 3)
	{
		if(frameNum == 0) //Only report warning once
			reportWarning("Angio requested for grayscale ultrasound");
		return grayFrame;
	}

	vtkImageDataPtr outData = vtkImageDataPtr::New();
	outData->DeepCopy(grayFrame);
//	outData->Update(); // updates whole extent.

//	printStuff("Clipped color in", inData);
//	printStuff("Grayscale in", outData);

//	int* inExt = inData->GetWholeExtent();
	int* outExt = outData->GetExtent();

	// Remember that the input might (and do due to vtkImageClip) contain leaps.
	// This means that the wholeextent might be larger than the extent, thus
	// we must use a startoffset and leaps between lines.

	unsigned char *inPtr = static_cast<unsigned char*> (inData->GetScalarPointerForExtent(inData->GetExtent()));
	unsigned char *outPtr = static_cast<unsigned char*> (outData->GetScalarPointerForExtent(outData->GetExtent()));

	int maxX = outExt[1] - outExt[0];
	int maxY = outExt[3] - outExt[2];
	int maxZ = outExt[5] - outExt[4];

	Eigen::Array<vtkIdType,3,1> inInc;
	inData->GetContinuousIncrements(inData->GetExtent(), inInc[0], inInc[1], inInc[2]);
	CX_ASSERT(inInc[0]==0);
	// we assume (wholeextent == extent) for the outData in the algorithm below. assert here.
	Eigen::Array<vtkIdType,3,1> outInc;
	outData->GetContinuousIncrements(outData->GetExtent(), outInc[0], outInc[1], outInc[2]);
	CX_ASSERT(outInc[0]==0);
	CX_ASSERT(outInc[1]==0);
	CX_ASSERT(outInc[2]==0);

	for (int z=0; z<=maxZ; z++)
	{
		for (int y=0; y<=maxY; y++)
		{
			for (int x=0; x<=maxX; x++)
			{
				//Look at 3 scalar components at the same time (RGB),
				//if color is grayscale or close to grayscale: set to zero.

				if (((*inPtr) == (*(inPtr + 1))) && ((*inPtr) == (*(inPtr + 2))))
				{
					(*outPtr) = 0;
				}
				else
				{
					// strong check: look for near-gray values and remove them.
					double r = inPtr[0];
					double g = inPtr[1];
					double b = inPtr[2];
					int metric = (fabs(r-g) + fabs(r-b) + fabs(g-b)) / 3; // average absolute diff must be less than or equal to this
//					std::cout << QString("  %1,%2,%3 \t %4, %5 -- %6").arg(int(inPtr[0])).arg(int(inPtr[1])).arg(int(inPtr[2])).arg(idxR).arg(idxY).arg(metric) << std::endl;
					if (metric <= 3)
						(*outPtr) = 0;

				}
				//Assume the outVolume is treated with the luminance filter first
				outPtr++;
				inPtr += 3;
			}
			inPtr += inInc[1];
		}
		inPtr += inInc[2];
	}

	return outData;
}