point_count_t BpfReader::readPointMajor(PointViewPtr view, point_count_t count) { PointId nextId = view->size(); PointId idx = m_index; point_count_t numRead = 0; seekPointMajor(idx); while (numRead < count && idx < numPoints()) { for (size_t d = 0; d < m_dims.size(); ++d) { float f; m_stream >> f; view->setField(m_dims[d].m_id, nextId, f + m_dims[d].m_offset); } // Transformation only applies to X, Y and Z double x = view->getFieldAs<double>(Dimension::Id::X, nextId); double y = view->getFieldAs<double>(Dimension::Id::Y, nextId); double z = view->getFieldAs<double>(Dimension::Id::Z, nextId); m_header.m_xform.apply(x, y, z); view->setField(Dimension::Id::X, nextId, x); view->setField(Dimension::Id::Y, nextId, y); view->setField(Dimension::Id::Z, nextId, z); if (m_cb) m_cb(*view, nextId); idx++; numRead++; nextId++; } m_index = idx; return numRead; }
PointViewPtr IcpFilter::icp(PointViewPtr fixed, PointViewPtr moving) const { typedef pcl::PointXYZ Point; typedef pcl::PointCloud<Point> Cloud; Cloud::Ptr fixedCloud(new Cloud()); pclsupport::PDALtoPCD(fixed, *fixedCloud); Cloud::Ptr movingCloud(new Cloud()); pclsupport::PDALtoPCD(moving, *movingCloud); pcl::IterativeClosestPoint<Point, Point> icp; icp.setInputSource(movingCloud); icp.setInputTarget(fixedCloud); Cloud result; icp.align(result); MetadataNode root = getMetadata(); // I couldn't figure out the template-fu to get // `MetadataNodeImpl::setValue` to work for all Eigen matrices with one // function, so I'm just brute-forcing the cast for now. root.add("transform", Eigen::MatrixXd(icp.getFinalTransformation().cast<double>())); root.add("converged", icp.hasConverged()); root.add("fitness", icp.getFitnessScore()); assert(moving->size() == result.points.size()); for (PointId i = 0; i < moving->size(); ++i) { moving->setField(Dimension::Id::X, i, result.points[i].x); moving->setField(Dimension::Id::Y, i, result.points[i].y); moving->setField(Dimension::Id::Z, i, result.points[i].z); } return moving; }
virtual point_count_t read(PointViewPtr v, point_count_t count) { using namespace Dimension; for (PointId idx = 0; idx < count; ++idx) { v->setField(Id::X, idx, idx); v->setField(Id::Y, idx, 10 * idx); v->setField(Id::Z, idx, 1.152); } return count; }
point_count_t QfitReader::read(PointViewPtr data, point_count_t count) { if (!m_istream->good()) { throw pdal_error("QFIT file stream is no good!"); } if (m_istream->stream()->eof()) { throw pdal_error("QFIT file stream is eof!"); } count = std::min(m_numPoints - m_index, count); std::vector<char> buf(m_size); PointId nextId = data->size(); point_count_t numRead = 0; while (count--) { m_istream->get(buf); SwitchableExtractor extractor(buf.data(), m_size, m_littleEndian); // always read the base fields { int32_t time, y, xi, z, start_pulse, reflected_pulse, scan_angle, pitch, roll; extractor >> time >> y >> xi >> z >> start_pulse >> reflected_pulse >> scan_angle >> pitch >> roll; double x = xi / 1000000.0; if (m_flip_x && x > 180) x -= 360; data->setField(Dimension::Id::OffsetTime, nextId, time); data->setField(Dimension::Id::Y, nextId, y / 1000000.0); data->setField(Dimension::Id::X, nextId, x); data->setField(Dimension::Id::Z, nextId, z * m_scale_z); data->setField(Dimension::Id::StartPulse, nextId, start_pulse); data->setField(Dimension::Id::ReflectedPulse, nextId, reflected_pulse); data->setField(Dimension::Id::ScanAngleRank, nextId, scan_angle / 1000.0); data->setField(Dimension::Id::Pitch, nextId, pitch / 1000.0); data->setField(Dimension::Id::Roll, nextId, roll / 1000.0); } if (m_format == QFIT_Format_12) { int32_t pdop, pulse_width; extractor >> pdop >> pulse_width; data->setField(Dimension::Id::Pdop, nextId, pdop / 10.0); data->setField(Dimension::Id::PulseWidth, nextId, pulse_width); } else if (m_format == QFIT_Format_14)
PointViewSet PMFFilter::run(PointViewPtr input) { bool logOutput = log()->getLevel() > LogLevel::Debug1; if (logOutput) log()->floatPrecision(8); log()->get(LogLevel::Debug2) << "Process PMFFilter...\n"; auto idx = processGround(input); PointViewSet viewSet; if (!idx.empty() && (m_classify || m_extract)) { if (m_classify) { log()->get(LogLevel::Debug2) << "Labeled " << idx.size() << " ground returns!\n"; // set the classification label of ground returns as 2 // (corresponding to ASPRS LAS specification) for (const auto& i : idx) { input->setField(Dimension::Id::Classification, i, 2); } viewSet.insert(input); } if (m_extract) { log()->get(LogLevel::Debug2) << "Extracted " << idx.size() << " ground returns!\n"; // create new PointView containing only ground returns PointViewPtr output = input->makeNew(); for (const auto& i : idx) { output->appendPoint(*input, i); } viewSet.erase(input); viewSet.insert(output); } } else { if (idx.empty()) log()->get(LogLevel::Debug2) << "Filtered cloud has no ground returns!\n"; if (!(m_classify || m_extract)) log()->get(LogLevel::Debug2) << "Must choose --classify or --extract\n"; // return the input buffer unchanged viewSet.insert(input); } return viewSet; }
point_count_t GDALReader::read(PointViewPtr view, point_count_t num) { point_count_t count = std::min(num, m_count - m_index); PointId nextId = view->size(); std::array<double, 2> coords; for (int row = 0; row < m_raster->m_raster_y_size; ++row) { for (int col = 0; col < m_raster->m_raster_x_size; ++col) { m_raster->pixelToCoord(col, row, coords); view->setField(Dimension::Id::X, nextId, coords[0]); view->setField(Dimension::Id::Y, nextId, coords[1]); nextId++; } } std::vector<uint8_t> band; std::vector<Dimension::Type> band_types = m_raster->getPDALDimensionTypes(); for (int b = 0; b < m_raster->m_band_count; ++b) { // Bands count from 1 m_raster->readBand(band, b + 1); std::stringstream oss; oss << "band-" << (b + 1); log()->get(LogLevel::Info) << "Read band '" << oss.str() << "'" << std::endl; Dimension::Id d = view->layout()->findDim(oss.str()); size_t dimSize = Dimension::size(band_types[b]); uint8_t* p = band.data(); for (point_count_t i = 0; i < count; ++i) { view->setField(d, band_types[b], i, p); p = p + dimSize; } } return view->size(); }
PointViewSet SMRFilter::run(PointViewPtr view) { log()->get(LogLevel::Info) << "run: Process SMRFilter...\n"; std::vector<PointId> idx = processGround(view); PointViewSet viewSet; if (!idx.empty() && (m_classify || m_extract)) { if (m_classify) { log()->get(LogLevel::Info) << "run: Labeled " << idx.size() << " ground returns!\n"; // set the classification label of ground returns as 2 // (corresponding to ASPRS LAS specification) for (const auto& i : idx) { view->setField(Dimension::Id::Classification, i, 2); } viewSet.insert(view); } if (m_extract) { log()->get(LogLevel::Info) << "run: Extracted " << idx.size() << " ground returns!\n"; // create new PointView containing only ground returns PointViewPtr output = view->makeNew(); for (const auto& i : idx) { output->appendPoint(*view, i); } viewSet.erase(view); viewSet.insert(output); } } else { if (idx.empty()) log()->get(LogLevel::Info) << "run: Filtered cloud has no ground returns!\n"; if (!(m_classify || m_extract)) log()->get(LogLevel::Info) << "run: Must choose --classify or --extract\n"; // return the view buffer unchanged viewSet.insert(view); } return viewSet; }
point_count_t BpfReader::readDimMajor(PointViewPtr data, point_count_t count) { PointId idx(0); PointId startId = data->size(); point_count_t numRead = 0; for (size_t d = 0; d < m_dims.size(); ++d) { idx = m_index; PointId nextId = startId; numRead = 0; seekDimMajor(d, idx); for (; numRead < count && idx < numPoints(); idx++, numRead++, nextId++) { float f; m_stream >> f; data->setField(m_dims[d].m_id, nextId, f + m_dims[d].m_offset); } } m_index = idx; // Transformation only applies to X, Y and Z for (PointId idx = startId; idx < data->size(); idx++) { double x = data->getFieldAs<double>(Dimension::Id::X, idx); double y = data->getFieldAs<double>(Dimension::Id::Y, idx); double z = data->getFieldAs<double>(Dimension::Id::Z, idx); m_header.m_xform.apply(x, y, z); data->setField(Dimension::Id::X, idx, x); data->setField(Dimension::Id::Y, idx, y); data->setField(Dimension::Id::Z, idx, z); if (m_cb) m_cb(*data, idx); } return numRead; }
point_count_t PtsReader::read(PointViewPtr view, point_count_t numPts) { PointId idx = view->size(); point_count_t cnt = 0; size_t line = 1; while (m_istream->good() && cnt < numPts) { std::string buf; StringList fields; std::getline(*m_istream, buf); line++; if (buf.empty()) continue; fields = Utils::split2(buf, m_separator); if (fields.size() != m_dims.size()) { log()->get(LogLevel::Error) << "Line " << line << " in '" << m_filename << "' contains " << fields.size() << " fields when " << m_dims.size() << " were expected. " "Ignoring." << std::endl; continue; } double d; for (size_t i = 0; i < fields.size(); ++i) { if (!Utils::fromString(fields[i], d)) { log()->get(LogLevel::Error) << "Can't convert " "field '" << fields[i] << "' to numeric value on line " << line << " in '" << m_filename << "'. Setting to 0." << std::endl; d = 0; } if (i == 3) // Intensity field in PTS is -2048 to 2047, we map to 0 4095 { d += 2048; } view->setField(m_dims[i], idx, d); } cnt++; idx++; } return cnt; }
point_count_t BpfReader::readByteMajor(PointViewPtr data, point_count_t count) { PointId idx(0); PointId startId = data->size(); point_count_t numRead = 0; // We need a temp buffer for the point data union uu { float f; uint32_t u32; }; std::unique_ptr<union uu> uArr( new uu[std::min(count, numPoints() - m_index)]); for (size_t d = 0; d < m_dims.size(); ++d) { for (size_t b = 0; b < sizeof(float); ++b) { idx = m_index; numRead = 0; PointId nextId = startId; seekByteMajor(d, b, idx); for (;numRead < count && idx < numPoints(); idx++, numRead++, nextId++) { union uu& u = *(uArr.get() + numRead); if (b == 0) u.u32 = 0; uint8_t u8; m_stream >> u8; u.u32 |= ((uint32_t)u8 << (b * CHAR_BIT)); if (b == 3) { u.f += m_dims[d].m_offset; data->setField(m_dims[d].m_id, nextId, u.f); } } } } m_index = idx; // Transformation only applies to X, Y and Z for (PointId idx = startId; idx < data->size(); idx++) { double x = data->getFieldAs<double>(Dimension::Id::X, idx); double y = data->getFieldAs<double>(Dimension::Id::Y, idx); double z = data->getFieldAs<double>(Dimension::Id::Z, idx); m_header.m_xform.apply(x, y, z); data->setField(Dimension::Id::X, idx, x); data->setField(Dimension::Id::Y, idx, y); data->setField(Dimension::Id::Z, idx, z); if (m_cb) m_cb(*data, idx); } return numRead; }
point_count_t TerrasolidReader::read(PointViewPtr view, point_count_t count) { count = std::min(count, getNumPoints() - m_index); std::vector<char> buf(m_size * count); m_istream->get(buf); LeExtractor extractor(buf.data(), buf.size()); // See https://www.terrasolid.com/download/tscan.pdf // This spec is awful, but it's something. // The scaling adjustments are different than what we used to do and // seem wrong (scaling the offset is odd), but that's what the document // says. // Also modified the fetch of time/color based on header flag (rather // than just not write the data into the buffer). PointId nextId = view->size(); while (!eof()) { if (m_format == TERRASOLID_Format_1) { uint8_t classification, flight_line, echo_int, x, y, z; extractor >> classification >> flight_line >> echo_int >> x >> y >> z; view->setField(Dimension::Id::Classification, nextId, classification); view->setField(Dimension::Id::PointSourceId, nextId, flight_line); switch (echo_int) { case 0: // only echo view->setField(Dimension::Id::ReturnNumber, nextId, 1); view->setField(Dimension::Id::NumberOfReturns, nextId, 1); break; case 1: // first of many echos view->setField(Dimension::Id::ReturnNumber, nextId, 1); break; default: // intermediate echo or last of many echos break; } view->setField(Dimension::Id::X, nextId, (x - m_header->OrgX) / m_header->Units); view->setField(Dimension::Id::Y, nextId, (y - m_header->OrgY) / m_header->Units); view->setField(Dimension::Id::Z, nextId, (z - m_header->OrgZ) / m_header->Units); } if (m_format == TERRASOLID_Format_2) { int32_t x, y, z; uint8_t classification, echo_int, flag, mark; uint16_t flight_line, intensity; extractor >> x >> y >> z >> classification >> echo_int >> flag >> mark >> flight_line >> intensity; view->setField(Dimension::Id::X, nextId, (x - m_header->OrgX) / m_header->Units); view->setField(Dimension::Id::Y, nextId, (y - m_header->OrgY) / m_header->Units); view->setField(Dimension::Id::Z, nextId, (z - m_header->OrgZ) / m_header->Units); view->setField(Dimension::Id::Classification, nextId, classification); switch (echo_int) { case 0: // only echo view->setField(Dimension::Id::ReturnNumber, nextId, 1); view->setField(Dimension::Id::NumberOfReturns, nextId, 1); break; case 1: // first of many echos view->setField(Dimension::Id::ReturnNumber, nextId, 1); break; default: // intermediate echo or last of many echos break; } view->setField(Dimension::Id::Flag, nextId, flag); view->setField(Dimension::Id::Mark, nextId, mark); view->setField(Dimension::Id::PointSourceId, nextId, flight_line); view->setField(Dimension::Id::Intensity, nextId, intensity); }
PointViewSet RadiusOutlierFilter::run(PointViewPtr input) { bool logOutput = log()->getLevel() > LogLevel::Debug1; if (logOutput) log()->floatPrecision(8); log()->get(LogLevel::Debug2) << "Process RadiusOutlierFilter...\n"; // convert PointView to PointXYZ typedef pcl::PointCloud<pcl::PointXYZ> Cloud; Cloud::Ptr cloud(new Cloud); BOX3D bounds; input->calculateBounds(bounds); pclsupport::PDALtoPCD(input, *cloud, bounds); pclsupport::setLogLevel(log()->getLevel()); // setup the outlier filter pcl::RadiusOutlierRemoval<pcl::PointXYZ> ror(true); ror.setInputCloud(cloud); ror.setMinNeighborsInRadius(m_min_neighbors); ror.setRadiusSearch(m_radius); pcl::PointCloud<pcl::PointXYZ> output; ror.setNegative(true); ror.filter(output); // filtered to return inliers pcl::PointIndicesPtr inliers(new pcl::PointIndices); ror.getRemovedIndices(*inliers); PointViewSet viewSet; if (inliers->indices.empty()) { log()->get(LogLevel::Warning) << "Requested filter would remove all points. Try a larger radius/smaller minimum neighbors.\n"; viewSet.insert(input); return viewSet; } // inverse are the outliers std::vector<int> outliers(input->size()-inliers->indices.size()); for (PointId i = 0, j = 0, k = 0; i < input->size(); ++i) { if (i == (PointId)inliers->indices[j]) { j++; continue; } outliers[k++] = i; } if (!outliers.empty() && (m_classify || m_extract)) { if (m_classify) { log()->get(LogLevel::Debug2) << "Labeled " << outliers.size() << " outliers as noise!\n"; // set the classification label of outlier returns as 18 // (corresponding to ASPRS LAS specification for high noise) for (const auto& i : outliers) { input->setField(Dimension::Id::Classification, i, 18); } viewSet.insert(input); } if (m_extract) { log()->get(LogLevel::Debug2) << "Extracted " << inliers->indices.size() << " inliers!\n"; // create new PointView containing only outliers PointViewPtr output = input->makeNew(); for (const auto& i : inliers->indices) { output->appendPoint(*input, i); } viewSet.erase(input); viewSet.insert(output); } } else { if (outliers.empty()) log()->get(LogLevel::Warning) << "Filtered cloud has no outliers!\n"; if (!(m_classify || m_extract)) log()->get(LogLevel::Warning) << "Must choose --classify or --extract\n"; // return the input buffer unchanged viewSet.insert(input); } return viewSet; }
point_count_t GreyhoundReader::read(PointViewPtr view, point_count_t count) { const std::string url(m_params.root() + "read" + m_params.qs()); log()->get(LogLevel::Debug) << "Reading: " << url << std::endl; auto response(m_arbiter->getBinary(url)); const std::size_t pointSize(view->layout()->pointSize()); uint32_t numPoints(0); std::copy( response.data() + response.size() - sizeof(uint32_t), response.data() + response.size(), reinterpret_cast<char*>(&numPoints)); log()->get(LogLevel::Debug) << "Fetched " << numPoints << " points" << std::endl; log()->get(LogLevel::Debug) << "Fetched " << response.size() << " bytes" << std::endl; response.resize(response.size() - sizeof(uint32_t)); const auto dimTypes(m_readLayout.dimTypes()); #ifdef PDAL_HAVE_LAZPERF auto cb = [this, &view, &dimTypes](char *buf, size_t bufsize) { view->setPackedPoint(dimTypes, view->size(), buf); if (m_cb) m_cb(*view, view->size() - 1); }; LazPerfDecompressor(cb, dimTypes, numPoints). decompress(response.data(), response.size()); #else const char* end(response.data() + response.size()); for (const char* pos(response.data()); pos < end; pos += pointSize) { view->setPackedPoint(dimTypes, view->size(), pos); if (m_cb) m_cb(*view, view->size() - 1); } #endif if (!m_params.obounds().isNull()) { greyhound::Bounds obounds(m_params.obounds()); greyhound::Point p; for (std::size_t i(0); i < view->size(); ++i) { p.x = view->getFieldAs<double>(Dimension::Id::X, i); p.y = view->getFieldAs<double>(Dimension::Id::Y, i); p.z = view->getFieldAs<double>(Dimension::Id::Z, i); if (!obounds.contains(p)) view->setField(Dimension::Id::Omit, i, 1); } } for (std::size_t i(0); i < view->size(); ++i) { view->setField(Dimension::Id::PointId, i, i); } return numPoints; }
PointViewSet StatisticalOutlierFilter::run(PointViewPtr input) { bool logOutput = log()->getLevel() > LogLevel::Debug1; if (logOutput) log()->floatPrecision(8); log()->get(LogLevel::Debug2) << "Process StatisticalOutlierFilter...\n"; // convert PointView to PointXYZ typedef pcl::PointCloud<pcl::PointXYZ> Cloud; Cloud::Ptr cloud(new Cloud); BOX3D bounds; input->calculateBounds(bounds); pclsupport::PDALtoPCD(input, *cloud, bounds); // PCL should provide console output at similar verbosity level as PDAL int level = log()->getLevel(); switch (level) { case 0: pcl::console::setVerbosityLevel(pcl::console::L_ALWAYS); break; case 1: pcl::console::setVerbosityLevel(pcl::console::L_ERROR); break; case 2: pcl::console::setVerbosityLevel(pcl::console::L_WARN); break; case 3: pcl::console::setVerbosityLevel(pcl::console::L_INFO); break; case 4: pcl::console::setVerbosityLevel(pcl::console::L_DEBUG); break; default: pcl::console::setVerbosityLevel(pcl::console::L_VERBOSE); break; } // setup the outlier filter pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor(true); sor.setInputCloud(cloud); sor.setMeanK(m_meanK); sor.setStddevMulThresh(m_multiplier); pcl::PointCloud<pcl::PointXYZ> output; sor.setNegative(true); sor.filter(output); // filtered to return inliers pcl::PointIndicesPtr inliers(new pcl::PointIndices); sor.getRemovedIndices(*inliers); log()->get(LogLevel::Debug2) << inliers->indices.size() << std::endl; PointViewSet viewSet; if (inliers->indices.empty()) { log()->get(LogLevel::Warning) << "Requested filter would remove all points. Try increasing the multiplier.\n"; viewSet.insert(input); return viewSet; } // inverse are the outliers std::vector<int> outliers(input->size()-inliers->indices.size()); for (PointId i = 0, j = 0, k = 0; i < input->size(); ++i) { if (i == (PointId)inliers->indices[j]) { j++; continue; } outliers[k++] = i; } if (!outliers.empty() && (m_classify || m_extract)) { if (m_classify) { log()->get(LogLevel::Debug2) << "Labeled " << outliers.size() << " outliers as noise!\n"; // set the classification label of outlier returns as 18 // (corresponding to ASPRS LAS specification for high noise) for (const auto& i : outliers) { input->setField(Dimension::Id::Classification, i, 18); } viewSet.insert(input); } if (m_extract) { log()->get(LogLevel::Debug2) << "Extracted " << inliers->indices.size() << " inliers!\n"; // create new PointView containing only outliers PointViewPtr output = input->makeNew(); for (const auto& i : inliers->indices) { output->appendPoint(*input, i); } viewSet.erase(input); viewSet.insert(output); } } else { if (outliers.empty()) log()->get(LogLevel::Warning) << "Filtered cloud has no outliers!\n"; if (!(m_classify || m_extract)) log()->get(LogLevel::Warning) << "Must choose --classify or --extract\n"; // return the input buffer unchanged viewSet.insert(input); } return viewSet; }
point_count_t OptechReader::read(PointViewPtr data, point_count_t countRequested) { point_count_t numRead = 0; point_count_t dataIndex = data->size(); while (numRead < countRequested) { if (m_returnIndex == 0) { if (!m_extractor.good()) { if (m_recordIndex >= m_header.numRecords) { break; } m_recordIndex += fillBuffer(); } m_extractor >> m_pulse.gpsTime >> m_pulse.returnCount >> m_pulse.range[0] >> m_pulse.range[1] >> m_pulse.range[2] >> m_pulse.range[3] >> m_pulse.intensity[0] >> m_pulse.intensity[1] >> m_pulse.intensity[2] >> m_pulse.intensity[3] >> m_pulse.scanAngle >> m_pulse.roll >> m_pulse.pitch >> m_pulse.heading >> m_pulse.latitude >> m_pulse.longitude >> m_pulse.elevation; if (m_pulse.returnCount == 0) { m_returnIndex = 0; continue; } // In all the csd files that we've tested, the longitude // values have been less than -2pi. if (m_pulse.longitude < -M_PI * 2) { m_pulse.longitude = m_pulse.longitude + M_PI * 2; } else if (m_pulse.longitude > M_PI * 2) { m_pulse.longitude = m_pulse.longitude - M_PI * 2; } } georeference::Xyz gpsPoint = georeference::Xyz( m_pulse.longitude, m_pulse.latitude, m_pulse.elevation); georeference::RotationMatrix rotationMatrix = createOptechRotationMatrix(m_pulse.roll, m_pulse.pitch, m_pulse.heading); georeference::Xyz point = pdal::georeference::georeferenceWgs84( m_pulse.range[m_returnIndex], m_pulse.scanAngle, m_boresightMatrix, rotationMatrix, gpsPoint); data->setField(Dimension::Id::X, dataIndex, point.X * 180 / M_PI); data->setField(Dimension::Id::Y, dataIndex, point.Y * 180 / M_PI); data->setField(Dimension::Id::Z, dataIndex, point.Z); data->setField(Dimension::Id::GpsTime, dataIndex, m_pulse.gpsTime); if (m_returnIndex == MaximumNumberOfReturns - 1) { data->setField(Dimension::Id::ReturnNumber, dataIndex, m_pulse.returnCount); } else { data->setField(Dimension::Id::ReturnNumber, dataIndex, m_returnIndex + 1); } data->setField(Dimension::Id::NumberOfReturns, dataIndex, m_pulse.returnCount); data->setField(Dimension::Id::EchoRange, dataIndex, m_pulse.range[m_returnIndex]); data->setField(Dimension::Id::Intensity, dataIndex, m_pulse.intensity[m_returnIndex]); data->setField(Dimension::Id::ScanAngleRank, dataIndex, m_pulse.scanAngle * 180 / M_PI); if (m_cb) m_cb(*data, dataIndex); ++dataIndex; ++numRead; ++m_returnIndex; if (m_returnIndex >= m_pulse.returnCount || m_returnIndex >= MaximumNumberOfReturns) { m_returnIndex = 0; } } return numRead; }
point_count_t IcebridgeReader::read(PointViewPtr view, point_count_t count) { //All data we read for icebridge is currently 4 bytes wide, so // just allocate once and forget it. //This could be a huge allocation. Perhaps we should do something // in the icebridge handler? PointId startId = view->size(); point_count_t remaining = m_hdf5Handler.getNumPoints() - m_index; count = std::min(count, remaining); std::unique_ptr<unsigned char> rawData(new unsigned char[count * sizeof(float)]); //Not loving the position-linked data, but fine for now. Dimension::IdList dims = getDefaultDimensions(); auto di = dims.begin(); for (auto ci = hdf5Columns.begin(); ci != hdf5Columns.end(); ++ci, ++di) { PointId nextId = startId; PointId idx = m_index; const hdf5::Hdf5ColumnData& column = *ci; try { m_hdf5Handler.getColumnEntries(rawData.get(), column.name, count, m_index); void *p = (void *)rawData.get(); // This is ugly but avoids a test in a tight loop. if (column.predType == H5::PredType::NATIVE_FLOAT) { // Offset time is in ms but icebridge stores in seconds. if (*di == Dimension::Id::OffsetTime) { float *fval = (float *)p; for (PointId i = 0; i < count; ++i) { view->setField(*di, nextId++, *fval * 1000); fval++; } } else { float *fval = (float *)p; for (PointId i = 0; i < count; ++i) view->setField(*di, nextId++, *fval++); } } else if (column.predType == H5::PredType::NATIVE_INT) { int32_t *ival = (int32_t *)p; for (PointId i = 0; i < count; ++i) view->setField(*di, nextId++, *ival++); } } catch(...) { throw icebridge_error("Error fetching column data"); } } return count; }