Example #1
0
void
Hdf5VolumeStore::saveAffinities(
		const ExplicitVolume<float>& xAffinities,
		const ExplicitVolume<float>& yAffinities,
		const ExplicitVolume<float>& zAffinities) {

	writeVolume(xAffinities, "xAffinities");
	writeVolume(yAffinities, "yAffinities");
	writeVolume(zAffinities, "zAffinities");

}
Example #2
0
void
Hdf5VolumeStore::saveIntensities(const ExplicitVolume<float>& intensities) {

	_hdfFile.root();
	_hdfFile.cd_mk("volumes");

	writeVolume(intensities, "intensities");
}
Example #3
0
void
Hdf5VolumeStore::saveLabels(const ExplicitVolume<int>& labels) {

	_hdfFile.root();
	_hdfFile.cd_mk("volumes");

	writeVolume(labels, "labels");
}
Example #4
0
inline void
Volume<T>::write(HDF5Id file_id) const
{
    // a volume root group is needed in order to allow random access to
    // groups by creation index. We can't set this index creation property
    // on the root group so we create a volume root group under the root
    
    // check if the volume root exists
    HDF5Group volume_root_group;
    HDF5Group::getOrCreateRootGroup(file_id, kVolumeRootGroup, 
                                    volume_root_group);

    // get the volume name 
    const String volume_name = 
        m_attributes.value<const String>(kVolumeNameAttr);

    // write the volume
    writeVolume(volume_root_group.id(), volume_name); 
}
Example #5
0
afs_int32
writeDatabase(struct ubik_trans *ut, int fid)
{
    dbadr dbAddr, dbAppAddr;
    struct dump diskDump, apDiskDump;
    dbadr tapeAddr;
    struct tape diskTape;
    dbadr volFragAddr;
    struct volFragment diskVolFragment;
    struct volInfo diskVolInfo;
    int length, hash;
    int old = 0;
    int entrySize;
    afs_int32 code = 0, tcode;
    afs_int32 appDumpAddrs[MAXAPPENDS], numaddrs, appcount, j;

    struct memoryHashTable *mht;

    LogDebug(4, "writeDatabase:\n");

    /* write out a header identifying this database etc */
    tcode = writeDbHeader(fid);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write Header\n");
	ERROR(tcode);
    }

    /* write out the tree of dump structures */

    mht = ht_GetType(HT_dumpIden_FUNCTION, &entrySize);
    if (!mht) {
	LogError(tcode, "writeDatabase: Can't get dump type\n");
	ERROR(BUDB_BADARGUMENT);
    }

    for (old = 0; old <= 1; old++) {
	/*oldnew */
	/* only two states, old or not old */
	length = (old ? mht->oldLength : mht->length);
	if (!length)
	    continue;

	for (hash = 0; hash < length; hash++) {
	    /*hashBuckets */
	    /* dump all the dumps in this hash bucket
	     */
	    for (dbAddr = ht_LookupBucket(ut, mht, hash, old); dbAddr; dbAddr = ntohl(diskDump.idHashChain)) {	/*initialDumps */
		/* now check if this dump had any errors/inconsistencies.
		 * If so, don't dump it
		 */
		if (badEntry(dbAddr)) {
		    LogError(0,
			     "writeDatabase: Damaged dump entry at addr 0x%x\n",
			     dbAddr);
		    Log("     Skipping remainder of dumps on hash chain %d\n",
			hash);
		    break;
		}

		tcode =
		    cdbread(ut, dump_BLOCK, dbAddr, &diskDump,
			    sizeof(diskDump));
		if (tcode) {
		    LogError(tcode,
			     "writeDatabase: Can't read dump entry (addr 0x%x)\n",
			     dbAddr);
		    Log("     Skipping remainder of dumps on hash chain %d\n",
			hash);
		    break;
		}

		/* Skip appended dumps, only start with initial dumps */
		if (diskDump.initialDumpID != 0)
		    continue;

		/* Skip appended dumps, only start with initial dumps. Then
		 * follow the appended dump chain so they are in order for restore.
		 */
		appcount = numaddrs = 0;
		for (dbAppAddr = dbAddr; dbAppAddr;
		     dbAppAddr = ntohl(apDiskDump.appendedDumpChain)) {
		    /*appendedDumps */
		    /* Check to see if we have a circular loop of appended dumps */
		    for (j = 0; j < numaddrs; j++) {
			if (appDumpAddrs[j] == dbAppAddr)
			    break;	/* circular loop */
		    }
		    if (j < numaddrs) {	/* circular loop */
			Log("writeDatabase: Circular loop found in appended dumps\n");
			Log("Skipping rest of appended dumps of dumpID %u\n",
			    ntohl(diskDump.id));
			break;
		    }
		    if (numaddrs >= MAXAPPENDS)
			numaddrs = MAXAPPENDS - 1;	/* don't overflow */
		    appDumpAddrs[numaddrs] = dbAppAddr;
		    numaddrs++;

		    /* If we dump a 1000 appended dumps, assume a loop */
		    if (appcount >= 5 * MAXAPPENDS) {
			Log("writeDatabase: Potential circular loop of appended dumps\n");
			Log("Skipping rest of appended dumps of dumpID %u. Dumped %d\n", ntohl(diskDump.id), appcount);
			break;
		    }
		    appcount++;

		    /* Read the dump entry */
		    if (dbAddr == dbAppAddr) {
			/* First time through, don't need to read the dump entry again */
			memcpy(&apDiskDump, &diskDump, sizeof(diskDump));
		    } else {
			if (badEntry(dbAppAddr)) {
			    LogError(0,
				     "writeDatabase: Damaged appended dump entry at addr 0x%x\n",
				     dbAddr);
			    Log("     Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
			    break;
			}

			tcode =
			    cdbread(ut, dump_BLOCK, dbAppAddr, &apDiskDump,
				    sizeof(apDiskDump));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't read appended dump entry (addr 0x%x)\n",
				     dbAppAddr);
			    Log("     Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
			    break;
			}

			/* Verify that this appended dump points to the initial dump */
			if (ntohl(apDiskDump.initialDumpID) !=
			    ntohl(diskDump.id)) {
			    LogError(0,
				     "writeDatabase: Appended dumpID %u does not reference initial dumpID %u\n",
				     ntohl(apDiskDump.id),
				     ntohl(diskDump.id));
			    Log("     Skipping this appended dump\n");
			    continue;
			}
		    }

		    /* Save the dump entry */
		    tcode = writeDump(fid, &apDiskDump);
		    if (tcode) {
			LogError(tcode,
				 "writeDatabase: Can't write dump entry\n");
			ERROR(tcode);
		    }

		    /* For each tape on this dump
		     */
		    for (tapeAddr = ntohl(apDiskDump.firstTape); tapeAddr; tapeAddr = ntohl(diskTape.nextTape)) {	/*tapes */
			/* read the tape entry */
			tcode =
			    cdbread(ut, tape_BLOCK, tapeAddr, &diskTape,
				    sizeof(diskTape));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't read tape entry (addr 0x%x) of dumpID %u\n",
				     tapeAddr, ntohl(apDiskDump.id));
			    Log("     Skipping this and remaining tapes in the dump (and all their volumes)\n");
			    break;
			}

			/* Save the tape entry */
			tcode =
			    writeTape(fid, &diskTape, ntohl(apDiskDump.id));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't write tape entry\n");
			    ERROR(tcode);
			}

			/* For each volume on this tape.
			 */
			for (volFragAddr = ntohl(diskTape.firstVol); volFragAddr; volFragAddr = ntohl(diskVolFragment.sameTapeChain)) {	/*volumes */
			    /* Read the volume Fragment entry */
			    tcode =
				cdbread(ut, volFragment_BLOCK, volFragAddr,
					&diskVolFragment,
					sizeof(diskVolFragment));
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't read volfrag entry (addr 0x%x) of dumpID %u\n",
					 volFragAddr, ntohl(apDiskDump.id));
				Log("     Skipping this and remaining volumes on tape '%s'\n", diskTape.name);
				break;
			    }

			    /* Read the volume Info entry */
			    tcode =
				cdbread(ut, volInfo_BLOCK,
					ntohl(diskVolFragment.vol),
					&diskVolInfo, sizeof(diskVolInfo));
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't read volinfo entry (addr 0x%x) of dumpID %u\n",
					 ntohl(diskVolFragment.vol),
					 ntohl(apDiskDump.id));
				Log("     Skipping volume on tape '%s'\n",
				    diskTape.name);
				continue;
			    }

			    /* Save the volume entry */
			    tcode =
				writeVolume(ut, fid, &diskVolFragment,
					    &diskVolInfo,
					    ntohl(apDiskDump.id),
					    diskTape.name);
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't write volume entry\n");
				ERROR(tcode);
			    }
			}	/*volumes */
		    }		/*tapes */
		}		/*appendedDumps */
	    }			/*initialDumps */
	}			/*hashBuckets */
    }				/*oldnew */

    /* write out the textual configuration information */
    tcode = writeText(ut, fid, TB_DUMPSCHEDULE);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write dump schedule\n");
	ERROR(tcode);
    }
    tcode = writeText(ut, fid, TB_VOLUMESET);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write volume set\n");
	ERROR(tcode);
    }
    tcode = writeText(ut, fid, TB_TAPEHOSTS);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write tape hosts\n");
	ERROR(tcode);
    }

    tcode = writeStructHeader(fid, SD_END);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write end savedb\n");
	ERROR(tcode);
    }

  error_exit:
    doneWriting(code);
    return (code);
}
Example #6
0
	int run(int argc, char **argv) {
		if (argc != 4) {
			cout << "Use k-means for albedo voxels clustering" << endl;
			cout << "Syntax: mtsutil albedoCluster <albedo_volume> <number_of_clusters> <segmentation_volume>" << endl;
			return -1;
		}

		char *end_ptr = NULL;
		numClusters = strtol(argv[2], &end_ptr, 10);
		if (*end_ptr != '\0')
			SLog(EError, "Could not parse integer value");
		
		Properties props("gridvolume");
		props.setString("filename", argv[1]);
		props.setBoolean("sendData", false);

		VolumeDataSource *originVol = static_cast<VolumeDataSource *> (PluginManager::getInstance()->
			createObject(MTS_CLASS(VolumeDataSource), props));
		originVol->configure();

		Log(EInfo, "%s", originVol->getClass()->getName().c_str());
		Log(EInfo, "res = (%d, %d, %d)", originVol->getResolution().x, originVol->getResolution().y, originVol->getResolution().z);
		Log(EInfo, "channels = %d", originVol->getChannels());
		Log(EInfo, "min = (%.6f, %.6f, %.6f)", originVol->getAABB().min.x, originVol->getAABB().min.y, originVol->getAABB().min.z);
		Log(EInfo, "max = (%.6f, %.6f, %.6f)", originVol->getAABB().max.x, originVol->getAABB().max.y, originVol->getAABB().max.z);

		AABB bbox = originVol->getAABB();
		res = originVol->getResolution();
		N = res.x * res.y * res.z;

		numColors = 0;
		hasht.clear();
		diffIndices.clear();

		double spaceTerm = 0.1;

		data.resize(N);
		for (int i = 0; i < res.x; i++) {
			for (int j = 0; j < res.y; j++) {
				for (int k = 0; k < res.z; k++) {
					int index = (k * res.y + j) * res.x + i;
					Pt p;
					for (int c = 0; c < 3; c++) {
						p[c] = originVol->lookupFloat(i, j, k, c);
					}
					/*
					Point3 pos((i + 1.f) / res.x, (j + 1.f) / res.y, (k + 1.f) / res.z);
					pos *= spaceTerm;
					p[3] = pos[0]; p[4] = pos[1]; p[5] = pos[2];
					*/
					data[index] = p;

					int hashValue = hashFunc(p);
					if (numColors < 1000 && hasht.find(hashValue) == hasht.end()) {
						//Log(EInfo, "%d, (%.6f, %.6f, %.6f)", hashValue, p.x, p.y, p.z);
						hasht[hashValue] = numColors;
						diffIndices.push_back(index);
						numColors++;
					}
				}
			}
		}

		segs = new float[N];
		for (int i = 0; i < N; i++) segs[i] = -1.f;
		
		Log(EInfo, "Start Clustering...");
		
		initCluster();
		kMeans();

		ref<FileStream> outFile = new FileStream(argv[3], FileStream::ETruncReadWrite);
		writeVolume(bbox, 1, outFile);
		
		return 0;
	}
Example #7
0
	int run(int argc, char **argv) {
		if(argc != 7 && argc != 9) {
			cout << "Down-sample grid volume data by a scale" << endl;
			cout << "Syntax: mtsutil downSampleVolume 0 <grid_volume> <scale x> <scale y> <scale z> <target_volume>" << endl;
			cout << "Syntax: mtsutil downSampleVolume 1 <hgrid_volume_dict> <scale x> <scale y> <scale z> <prefix> <origin_suffix> <target_suffix>" << endl;
			return -1;
		}

		if (strcmp(argv[1], "0") == 0) {
			char *end_ptr = NULL;
			scale.x = strtol(argv[3], &end_ptr, 10);
			if (*end_ptr != '\0')
				SLog(EError, "Could not parse integer value");
			scale.y = strtol(argv[4], &end_ptr, 10);
			if (*end_ptr != '\0')
				SLog(EError, "Could not parse integer value");
			scale.z = strtol(argv[5], &end_ptr, 10);
			if (*end_ptr != '\0')
				SLog(EError, "Could not parse integer value");

			Properties props("gridvolume");
			props.setString("filename", argv[2]);
			props.setBoolean("sendData", false);

			VolumeDataSource *originVol = static_cast<VolumeDataSource *> (PluginManager::getInstance()->
				createObject(MTS_CLASS(VolumeDataSource), props));
			originVol->configure();

			Log(EInfo, "%s", originVol->getClass()->getName().c_str());
			Log(EInfo, "res = (%d, %d, %d)", originVol->getResolution().x, originVol->getResolution().y, originVol->getResolution().z);
			Log(EInfo, "channels = %d", originVol->getChannels());
			Log(EInfo, "min = (%.6f, %.6f, %.6f)", originVol->getAABB().min.x, originVol->getAABB().min.y, originVol->getAABB().min.z);
			Log(EInfo, "max = (%.6f, %.6f, %.6f)", originVol->getAABB().max.x, originVol->getAABB().max.y, originVol->getAABB().max.z);

			AABB bbox = originVol->getAABB();

			GridData s;
			downSample(originVol, s);

			Log(EInfo, "finish down-sampling, save volume data to file");
			ref<FileStream> outFile = new FileStream(argv[6], FileStream::ETruncReadWrite);
			writeVolume(s, bbox, originVol->getChannels(), outFile);
		}
		else if (strcmp(argv[1], "1") == 0) {
			char *end_ptr = NULL;
			scale.x = strtol(argv[3], &end_ptr, 10);
			if (*end_ptr != '\0')
				SLog(EError, "Could not parse integer value");
			scale.y = strtol(argv[4], &end_ptr, 10);
			if (*end_ptr != '\0')
				SLog(EError, "Could not parse integer value");
			scale.z = strtol(argv[5], &end_ptr, 10);
			if (*end_ptr != '\0')
				SLog(EError, "Could not parse integer value");

			fs::path resolved = Thread::getThread()->getFileResolver()->resolve(argv[2]);
			Log(EInfo, "Loading hierarchical grid dictrionary \"%s\"", argv[2]);
			ref<FileStream> stream = new FileStream(resolved, FileStream::EReadOnly);
			stream->setByteOrder(Stream::ELittleEndian);

			Float xmin = stream->readSingle(), ymin = stream->readSingle(), zmin = stream->readSingle();
			Float xmax = stream->readSingle(), ymax = stream->readSingle(), zmax = stream->readSingle();
			AABB aabb = AABB(Point(xmin, ymin, zmin), Point(xmax, ymax, zmax));

			Vector3i res = Vector3i(stream);
			int nCells = res.x * res.y * res.z;

			int numBlocks = 0;
			while (!stream->isEOF()) {
				Vector3i block = Vector3i(stream);
				Assert(block.x >= 0 && block.y >= 0 && block.z >= 0
					&& block.x < res.x && block.y < res.y && block.z < res.z);

				Properties props("gridvolume");
				props.setString("filename", formatString("%s%03i_%03i_%03i%s",
					argv[6], block.x, block.y, block.z, argv[7]));
				props.setBoolean("sendData", false);

				VolumeDataSource *ori = static_cast<VolumeDataSource *> (PluginManager::getInstance()->
					createObject(MTS_CLASS(VolumeDataSource), props));
				ori->configure();

				//Log(EInfo, "Loading grid %03i_%03i_%03i", block.x, block.y, block.z);

				AABB bbox = ori->getAABB();

				GridData s;
				downSample(ori, s);

				std::string filename(formatString("%s%03i_%03i_%03i%s", argv[6], block.x, block.y, block.z, argv[8]));
				ref<FileStream> outFile = new FileStream(filename.c_str(), FileStream::ETruncReadWrite);

				writeVolume(s, bbox, ori->getChannels(), outFile);

				++numBlocks;
			}
			Log(EInfo, "%i blocks total, %s, resolution=%s", numBlocks,
				aabb.toString().c_str(), res.toString().c_str());
		}

		return 0;
	}
Example #8
0
void
Hdf5VolumeStore::saveBoundaries(const ExplicitVolume<float>& boundaries) {

	writeVolume(boundaries, "boundaries");
}
Example #9
0
void
Hdf5VolumeStore::saveIntensities(const ExplicitVolume<float>& intensities) {

	writeVolume(intensities, "intensities");
}
Example #10
0
void
Hdf5VolumeStore::saveGroundTruth(const ExplicitVolume<int>& labels) {

	writeVolume(labels, "groundtruth");
}