void MEShowNodesWindow::on_showNode() { m_selectedRow = m_selection->get_selected(); if( m_selectedRow ) { Gtk::TreePath path = m_listStore->get_path( m_selectedRow ); mc2dbg8 << "Row " << path.to_string() << " selected!!!" << endl; Glib::ustring tmpUstring = path.to_string(); const char* tmpCharStr = tmpUstring.c_str(); uint32 rowNbr = strtoul(tmpCharStr, NULL, 0); mc2dbg8 << "Row " << rowNbr << " selected!!!" << endl; uint32 fromNodeID = processRow( *m_selectedRow ); OldItem* item = m_mapArea->getMap()->itemLookup(fromNodeID); int32 lat, lon; if ((fromNodeID & 0x80000000) != 0) { lat = item->getGfxData()->getLat(0,0); lon = item->getGfxData()->getLon(0,0); } else { lat = item->getGfxData()->getLastLat(0); lon = item->getGfxData()->getLastLon(0); } const int32 delta = 50000; m_mapArea->zoomToBBox(lat+delta, lon-delta, lat-delta, lon+delta); } }
QImage * GaussianFilter::filter(const QImage & source) const { uint w = source.width(); uint h = source.height(); QImage * dest = new QImage(source); QRgb * to = reinterpret_cast<QRgb *>(dest->bits()); const QRgb * rgb = reinterpret_cast<const QRgb *>(source.constBits()); QImage * mediumImage = new QImage(source); QRgb * medium = reinterpret_cast<QRgb *>(mediumImage->bits()); for (int y = area.y(); y < area.y() + area.height(); ++y) { for (int x = area.x(); x < area.x() + area.width(); ++x) { medium[y * w + x] = processRow(rgb, x, y, w, h); } } for (int y = area.y(); y < area.y() + area.height(); ++y) { for (int x = area.x(); x < area.x() + area.width(); ++x) { to[y * w + x] = processColumn(medium, x, y, w, h); } } delete mediumImage; return dest; }
void MEShowNodesWindow::on_clickShowAll() { if( m_listStore->children().size() == 0 ) { return; } MC2BoundingBox bbox; vector<uint32> allNodes; typedef Gtk::TreeModel::Children type_children; type_children children = m_listStore->children(); for(type_children::iterator iter = children.begin(); iter != children.end(); ++iter) { uint32 nodeID = processRow(*iter, false); allNodes.push_back(nodeID); } for (vector<uint32>::iterator it=allNodes.begin(); it!=allNodes.end(); ++it) { m_mapArea->highlightItem(*it, false, false, false); GfxData* gfx = m_mapArea->getMap()->itemLookup(*it)->getGfxData(); GfxData::const_filter_iterator polyEnd = gfx->endFilteredPoly(0, 0); for(GfxData::const_filter_iterator it = gfx->beginFilteredPoly(0, 0); it != polyEnd; ++it) { MC2Coordinate currCoord = *it; bbox.update(currCoord.lat, currCoord.lon); } } const int32 delta = 50000; m_mapArea->zoomToBBox(bbox.getMaxLat()+delta, bbox.getMinLon()-delta, bbox.getMinLat()-delta, bbox.getMaxLon()+delta); }
Extent::Ptr RowAnalysisModule::getSharedExtent() { Extent::Ptr e = source.getSharedExtent(); if (e == NULL) { completeProcessing(); return e; } if (!prepared) { firstExtent(*e); } newExtentHook(*e); series.setExtent(e); if (!prepared) { prepareForProcessing(); prepared = true; if (!where_expr_str.empty()) { where_expr = DSExpr::make(series, where_expr_str); } } for (;series.morerecords();++series) { if (!where_expr || where_expr->valBool()) { ++processed_rows; processRow(); } else { ++ignored_rows; } } series.clearExtent(); return e; }
bool bindNext() { roxiemem::OwnedConstRoxieRow nextRow = (const byte *) input->ungroupedNextRow(); if (!nextRow) return false; processRow((const byte *) nextRow.get()); // Bind the variables for the current row return true; }
cv::Mat process(int newSize) { image = Thresholding(image).transform(); // cut egdes: for (int i = 0; i < image.rows; i ++) { if (!processRow(i)) { break; } } for (int i = image.rows - 1; i >= 0; i --) { if (!processRow(i)) { break; } } for (int i = 0; i < image.cols; i ++) { if (!processCol(i)) { break; } } for (int i = image.cols - 1; i >= 0; i --) { if (!processCol(i)) { break; } } for (int i = 0; i < used.size(); i ++) for (int j = 0; j < used[i].size(); j ++) used[i][j] = 0; int L = 1; for (int i = 0; i < image.rows; i ++) for (int j = 0; j < image.cols; j ++) if (image.at<uchar>(i, j) == BLACK && used[i][j] == 0) { dfs(i, j, L); L ++; } std::map<int,int> cnt; for (int i = 0; i < image.rows; i ++) for (int j = 0; j < image.cols; j ++) if (used[i][j] != 0) cnt[used[i][j]] ++; int max_value = 0; int best_comp; for (std::map<int,int>::iterator it = cnt.begin(); it != cnt.end(); it ++) if (it->second > max_value) { max_value = it->second; best_comp = it->first; } for (int i = 0; i < image.rows; i ++) for (int j = 0; j < image.cols; j ++) if (used[i][j] != best_comp) { image.at<uchar>(i,j) = WHITE; } int minRow = image.rows; int maxRow = -1; int minCol = image.cols; int maxCol = -1; for (int i = 0; i < image.rows; i ++) for (int j = 0; j < image.cols; j ++) if (image.at<uchar>(i,j) == BLACK) { minRow = std::min(minRow, i); maxRow = std::max(maxRow, i); minCol = std::min(minCol, j); maxCol = std::max(maxCol, j); } int X = minCol; int Y = minRow; int W = maxCol - minCol + 1; int H = maxRow - minRow + 1; int X0 = X - std::max(0, H - W) / 2; int Y0 = Y - std::max(0, W - H) / 2; try { image = Image::getSubMatrix(image, X0, Y0, std::max(W, H), std::max(W,H)); } catch (...) { } cv::Mat newImage; cv::resize(image, newImage, cv::Size(newSize, newSize)); newImage = Thresholding(newImage).transform(); return newImage; }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); loop { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; loop { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); doStopInput(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); loop { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } }
/** Process selected rows */ void ReflMainViewPresenter::process() { if(m_model->rowCount() == 0) { m_view->giveUserWarning("Cannot process an empty Table","Warning"); return; } std::vector<size_t> rows = m_view->getSelectedRowIndexes(); if(rows.size() == 0) { //Does the user want to abort? if(!m_view->askUserYesNo("This will process all rows in the table. Continue?","Process all rows?")) return; //They want to process all rows, so populate rows with every index in the model for(size_t idx = 0; idx < m_model->rowCount(); ++idx) rows.push_back(idx); } //Maps group numbers to the list of rows in that group we want to process std::map<int,std::vector<size_t> > groups; for(auto it = rows.begin(); it != rows.end(); ++it) { try { validateRow(*it); const int group = m_model->Int(*it, COL_GROUP); groups[group].push_back(*it); } catch(std::exception& ex) { const std::string rowNo = Mantid::Kernel::Strings::toString<size_t>(*it + 1); m_view->giveUserCritical("Error found in row " + rowNo + ":\n" + ex.what(), "Error"); return; } } int progress = 0; //Each group and each row within count as a progress step. const int maxProgress = (int)(rows.size() + groups.size()); m_view->setProgressRange(progress, maxProgress); m_view->setProgress(progress); for(auto gIt = groups.begin(); gIt != groups.end(); ++gIt) { const std::vector<size_t> groupRows = gIt->second; //Process each row individually for(auto rIt = groupRows.begin(); rIt != groupRows.end(); ++rIt) { try { processRow(*rIt); m_view->setProgress(++progress); } catch(std::exception& ex) { const std::string rowNo = Mantid::Kernel::Strings::toString<size_t>(*rIt + 1); const std::string message = "Error encountered while processing row " + rowNo + ":\n"; m_view->giveUserCritical(message + ex.what(), "Error"); m_view->setProgress(0); return; } } try { stitchRows(groupRows); m_view->setProgress(++progress); } catch(std::exception& ex) { const std::string groupNo = Mantid::Kernel::Strings::toString<int>(gIt->first); const std::string message = "Error encountered while stitching group " + groupNo + ":\n"; m_view->giveUserCritical(message + ex.what(), "Error"); m_view->setProgress(0); return; } } }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); for (;;) { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; for (;;) { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); stop(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); for (;;) { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } } else { if (!refactor || active) { try { StringBuffer partFname; getPartFilename(*partDesc, 0, partFname); ActPrintLog("INDEXWRITE: process: handling fname : %s", partFname.str()); open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); ActPrintLog("INDEXWRITE: write"); BooleanOnOff tf(receiving); if (!refactor || !active) receiving = false; do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; processRow(row); } while (!abortSoon); ActPrintLog("INDEXWRITE: write level 0 complete"); } catch (CATCHALL) { close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); throw; } close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); stop(); ActPrintLog("INDEXWRITE: Wrote %" RCPF "d records", processed & THORDATALINK_COUNT_MASK); if (buildTlk) { ActPrintLog("INDEXWRITE: sending rows"); NodeInfoArray tlkRows; CMessageBuffer msg; if (firstNode()) { if (processed & THORDATALINK_COUNT_MASK) { if (enableTlkPart0) tlkRows.append(* new CNodeInfo(0, firstRow.get(), firstRowSize, totalCount)); tlkRows.append(* new CNodeInfo(1, lastRow.get(), lastRowSize, totalCount)); } } else { if (processed & THORDATALINK_COUNT_MASK) { CNodeInfo row(queryJobChannel().queryMyRank(), lastRow.get(), lastRowSize, totalCount); row.serialize(msg); } queryJobChannel().queryJobComm().send(msg, 1, mpTag); } if (firstNode()) { ActPrintLog("INDEXWRITE: Waiting on tlk to complete"); // JCSMORE if refactor==true, is rowsToReceive here right?? unsigned rowsToReceive = (refactor ? (tlkDesc->queryOwner().numParts()-1) : container.queryJob().querySlaves()) -1; // -1 'cos got my own in array already ActPrintLog("INDEXWRITE: will wait for info from %d slaves before writing TLK", rowsToReceive); while (rowsToReceive--) { msg.clear(); receiveMsg(msg, RANK_ALL, mpTag); // NH->JCS RANK_ALL_OTHER not supported for recv if (abortSoon) return; if (msg.length()) { CNodeInfo *ni = new CNodeInfo(); ni->deserialize(msg); tlkRows.append(*ni); } } tlkRows.sort(CNodeInfo::compare); StringBuffer path; getPartFilename(*tlkDesc, 0, path); ActPrintLog("INDEXWRITE: creating toplevel key file : %s", path.str()); try { open(*tlkDesc, true, helper->queryDiskRecordSize()->isVariableSize()); if (tlkRows.length()) { CNodeInfo &lastNode = tlkRows.item(tlkRows.length()-1); memset(lastNode.value, 0xff, lastNode.size); } ForEachItemIn(idx, tlkRows) { CNodeInfo &info = tlkRows.item(idx); builder->processKeyData((char *)info.value, info.pos, info.size); } close(*tlkDesc, tlkCrc, true); } catch (CATCHALL) { abortSoon = true; close(*tlkDesc, tlkCrc, true); removeFiles(*partDesc); throw; } } } else if (!isLocal && firstNode())