bool AccessibilityTable::isDataTable() const { if (!m_renderer) return false; // Do not consider it a data table is it has an ARIA role. if (hasARIARole()) return false; // This employs a heuristic to determine if this table should appear. // Only "data" tables should be exposed as tables. // Unfortunately, there is no good way to determine the difference // between a "layout" table and a "data" table. RenderTable* table = toRenderTable(m_renderer); Node* tableNode = table->node(); if (!tableNode || !tableNode->hasTagName(tableTag)) return false; // if there is a caption element, summary, THEAD, or TFOOT section, it's most certainly a data table HTMLTableElement* tableElement = static_cast<HTMLTableElement*>(tableNode); if (!tableElement->summary().isEmpty() || tableElement->tHead() || tableElement->tFoot() || tableElement->caption()) return true; // if someone used "rules" attribute than the table should appear if (!tableElement->rules().isEmpty()) return true; // go through the cell's and check for tell-tale signs of "data" table status // cells have borders, or use attributes like headers, abbr, scope or axis RenderTableSection* firstBody = table->firstBody(); if (!firstBody) return false; int numCols = firstBody->numColumns(); int numRows = firstBody->numRows(); // if there's only one cell, it's not a good AXTable candidate if (numRows == 1 && numCols == 1) return false; // store the background color of the table to check against cell's background colors RenderStyle* tableStyle = table->style(); if (!tableStyle) return false; Color tableBGColor = tableStyle->visitedDependentColor(CSSPropertyBackgroundColor); // check enough of the cells to find if the table matches our criteria // Criteria: // 1) must have at least one valid cell (and) // 2) at least half of cells have borders (or) // 3) at least half of cells have different bg colors than the table, and there is cell spacing unsigned validCellCount = 0; unsigned borderedCellCount = 0; unsigned backgroundDifferenceCellCount = 0; Color alternatingRowColors[5]; int alternatingRowColorCount = 0; int headersInFirstColumnCount = 0; for (int row = 0; row < numRows; ++row) { int headersInFirstRowCount = 0; for (int col = 0; col < numCols; ++col) { RenderTableCell* cell = firstBody->primaryCellAt(row, col); if (!cell) continue; Node* cellNode = cell->node(); if (!cellNode) continue; if (cell->width() < 1 || cell->height() < 1) continue; validCellCount++; HTMLTableCellElement* cellElement = static_cast<HTMLTableCellElement*>(cellNode); bool isTHCell = cellElement->hasTagName(thTag); // If the first row is comprised of all <th> tags, assume it is a data table. if (!row && isTHCell) headersInFirstRowCount++; // If the first column is comprised of all <tg> tags, assume it is a data table. if (!col && isTHCell) headersInFirstColumnCount++; // in this case, the developer explicitly assigned a "data" table attribute if (!cellElement->headers().isEmpty() || !cellElement->abbr().isEmpty() || !cellElement->axis().isEmpty() || !cellElement->scope().isEmpty()) return true; RenderStyle* renderStyle = cell->style(); if (!renderStyle) continue; // a cell needs to have matching bordered sides, before it can be considered a bordered cell. if ((cell->borderTop() > 0 && cell->borderBottom() > 0) || (cell->borderLeft() > 0 && cell->borderRight() > 0)) borderedCellCount++; // if the cell has a different color from the table and there is cell spacing, // then it is probably a data table cell (spacing and colors take the place of borders) Color cellColor = renderStyle->visitedDependentColor(CSSPropertyBackgroundColor); if (table->hBorderSpacing() > 0 && table->vBorderSpacing() > 0 && tableBGColor != cellColor && cellColor.alpha() != 1) backgroundDifferenceCellCount++; // if we've found 10 "good" cells, we don't need to keep searching if (borderedCellCount >= 10 || backgroundDifferenceCellCount >= 10) return true; // For the first 5 rows, cache the background color so we can check if this table has zebra-striped rows. if (row < 5 && row == alternatingRowColorCount) { RenderObject* renderRow = cell->parent(); if (!renderRow || !renderRow->isBoxModelObject() || !toRenderBoxModelObject(renderRow)->isTableRow()) continue; RenderStyle* rowRenderStyle = renderRow->style(); if (!rowRenderStyle) continue; Color rowColor = rowRenderStyle->visitedDependentColor(CSSPropertyBackgroundColor); alternatingRowColors[alternatingRowColorCount] = rowColor; alternatingRowColorCount++; } } if (!row && headersInFirstRowCount == numCols && numCols > 1) return true; } if (headersInFirstColumnCount == numRows && numRows > 1) return true; // if there is less than two valid cells, it's not a data table if (validCellCount <= 1) return false; // half of the cells had borders, it's a data table unsigned neededCellCount = validCellCount / 2; if (borderedCellCount >= neededCellCount) return true; // half had different background colors, it's a data table if (backgroundDifferenceCellCount >= neededCellCount) return true; // Check if there is an alternating row background color indicating a zebra striped style pattern. if (alternatingRowColorCount > 2) { Color firstColor = alternatingRowColors[0]; for (int k = 1; k < alternatingRowColorCount; k++) { // If an odd row was the same color as the first row, its not alternating. if (k % 2 == 1 && alternatingRowColors[k] == firstColor) return false; // If an even row is not the same as the first row, its not alternating. if (!(k % 2) && alternatingRowColors[k] != firstColor) return false; } return true; } return false; }
static inline bool hasOneChild(ContainerNode* node) { Node* firstChild = node->firstChild(); return firstChild && !firstChild->nextSibling(); }
// FIXME: Shouldn't we omit style info when annotate == DoNotAnnotateForInterchange? // FIXME: At least, annotation and style info should probably not be included in range.markupString() String createMarkup(const Range* range, Vector<Node*>* nodes, EAnnotateForInterchange shouldAnnotate, bool convertBlocksToInlines, EAbsoluteURLs shouldResolveURLs) { DEFINE_STATIC_LOCAL(const String, interchangeNewlineString, (ASCIILiteral("<br class=\"" AppleInterchangeNewline "\">"))); if (!range) return ""; Document* document = range->ownerDocument(); if (!document) return ""; // Disable the delete button so it's elements are not serialized into the markup, // but make sure neither endpoint is inside the delete user interface. Frame* frame = document->frame(); DeleteButtonController* deleteButton = frame ? frame->editor()->deleteButtonController() : 0; RefPtr<Range> updatedRange = avoidIntersectionWithNode(range, deleteButton ? deleteButton->containerElement() : 0); if (!updatedRange) return ""; if (deleteButton) deleteButton->disable(); bool collapsed = updatedRange->collapsed(ASSERT_NO_EXCEPTION); if (collapsed) return ""; Node* commonAncestor = updatedRange->commonAncestorContainer(ASSERT_NO_EXCEPTION); if (!commonAncestor) return ""; document->updateLayoutIgnorePendingStylesheets(); Node* body = enclosingNodeWithTag(firstPositionInNode(commonAncestor), bodyTag); Node* fullySelectedRoot = 0; // FIXME: Do this for all fully selected blocks, not just the body. if (body && areRangesEqual(VisibleSelection::selectionFromContentsOfNode(body).toNormalizedRange().get(), range)) fullySelectedRoot = body; Node* specialCommonAncestor = highestAncestorToWrapMarkup(updatedRange.get(), shouldAnnotate); StyledMarkupAccumulator accumulator(nodes, shouldResolveURLs, shouldAnnotate, updatedRange.get(), specialCommonAncestor); Node* pastEnd = updatedRange->pastLastNode(); Node* startNode = updatedRange->firstNode(); VisiblePosition visibleStart(updatedRange->startPosition(), VP_DEFAULT_AFFINITY); VisiblePosition visibleEnd(updatedRange->endPosition(), VP_DEFAULT_AFFINITY); if (shouldAnnotate == AnnotateForInterchange && needInterchangeNewlineAfter(visibleStart)) { if (visibleStart == visibleEnd.previous()) { if (deleteButton) deleteButton->enable(); return interchangeNewlineString; } accumulator.appendString(interchangeNewlineString); startNode = visibleStart.next().deepEquivalent().deprecatedNode(); if (pastEnd && Range::compareBoundaryPoints(startNode, 0, pastEnd, 0, ASSERT_NO_EXCEPTION) >= 0) { if (deleteButton) deleteButton->enable(); return interchangeNewlineString; } } Node* lastClosed = accumulator.serializeNodes(startNode, pastEnd); if (specialCommonAncestor && lastClosed) { // Also include all of the ancestors of lastClosed up to this special ancestor. for (ContainerNode* ancestor = lastClosed->parentNode(); ancestor; ancestor = ancestor->parentNode()) { if (ancestor == fullySelectedRoot && !convertBlocksToInlines) { RefPtr<EditingStyle> fullySelectedRootStyle = styleFromMatchedRulesAndInlineDecl(fullySelectedRoot); // Bring the background attribute over, but not as an attribute because a background attribute on a div // appears to have no effect. if ((!fullySelectedRootStyle || !fullySelectedRootStyle->style() || !fullySelectedRootStyle->style()->getPropertyCSSValue(CSSPropertyBackgroundImage)) && static_cast<Element*>(fullySelectedRoot)->hasAttribute(backgroundAttr)) fullySelectedRootStyle->style()->setProperty(CSSPropertyBackgroundImage, "url('" + static_cast<Element*>(fullySelectedRoot)->getAttribute(backgroundAttr) + "')"); if (fullySelectedRootStyle->style()) { // Reset the CSS properties to avoid an assertion error in addStyleMarkup(). // This assertion is caused at least when we select all text of a <body> element whose // 'text-decoration' property is "inherit", and copy it. if (!propertyMissingOrEqualToNone(fullySelectedRootStyle->style(), CSSPropertyTextDecoration)) fullySelectedRootStyle->style()->setProperty(CSSPropertyTextDecoration, CSSValueNone); if (!propertyMissingOrEqualToNone(fullySelectedRootStyle->style(), CSSPropertyWebkitTextDecorationsInEffect)) fullySelectedRootStyle->style()->setProperty(CSSPropertyWebkitTextDecorationsInEffect, CSSValueNone); accumulator.wrapWithStyleNode(fullySelectedRootStyle->style(), document, true); } } else { // Since this node and all the other ancestors are not in the selection we want to set RangeFullySelectsNode to DoesNotFullySelectNode // so that styles that affect the exterior of the node are not included. accumulator.wrapWithNode(ancestor, convertBlocksToInlines, StyledMarkupAccumulator::DoesNotFullySelectNode); } if (nodes) nodes->append(ancestor); lastClosed = ancestor; if (ancestor == specialCommonAncestor) break; } } // FIXME: The interchange newline should be placed in the block that it's in, not after all of the content, unconditionally. if (shouldAnnotate == AnnotateForInterchange && needInterchangeNewlineAfter(visibleEnd.previous())) accumulator.appendString(interchangeNewlineString); if (deleteButton) deleteButton->enable(); return accumulator.takeResults(); }
// ---------------------------------------------------------------------------- static void ApplyInitialDataCallback(ik_node_t* ikNode) { Node* node = (Node*)ikNode->user_data; node->SetWorldRotation(QuatIK2Urho(&ikNode->rotation)); node->SetWorldPosition(Vec3IK2Urho(&ikNode->position)); }
String Frame::searchForLabelsBeforeElement(const Vector<String>& labels, Element* element, size_t* resultDistance, bool* resultIsInCellAbove) { OwnPtr<RegularExpression> regExp(createRegExpForLabels(labels)); // We stop searching after we've seen this many chars const unsigned int charsSearchedThreshold = 500; // This is the absolute max we search. We allow a little more slop than // charsSearchedThreshold, to make it more likely that we'll search whole nodes. const unsigned int maxCharsSearched = 600; // If the starting element is within a table, the cell that contains it HTMLTableCellElement* startingTableCell = 0; bool searchedCellAbove = false; if (resultDistance) *resultDistance = notFound; if (resultIsInCellAbove) *resultIsInCellAbove = false; // walk backwards in the node tree, until another element, or form, or end of tree int unsigned lengthSearched = 0; Node* n; for (n = element->traversePreviousNode(); n && lengthSearched < charsSearchedThreshold; n = n->traversePreviousNode()) { if (n->hasTagName(formTag) || (n->isHTMLElement() && static_cast<Element*>(n)->isFormControlElement())) { // We hit another form element or the start of the form - bail out break; } else if (n->hasTagName(tdTag) && !startingTableCell) { startingTableCell = static_cast<HTMLTableCellElement*>(n); } else if (n->hasTagName(trTag) && startingTableCell) { String result = searchForLabelsAboveCell(regExp.get(), startingTableCell, resultDistance); if (!result.isEmpty()) { if (resultIsInCellAbove) *resultIsInCellAbove = true; return result; } searchedCellAbove = true; } else if (n->isTextNode() && n->renderer() && n->renderer()->style()->visibility() == VISIBLE) { // For each text chunk, run the regexp String nodeString = n->nodeValue(); // add 100 for slop, to make it more likely that we'll search whole nodes if (lengthSearched + nodeString.length() > maxCharsSearched) nodeString = nodeString.right(charsSearchedThreshold - lengthSearched); int pos = regExp->searchRev(nodeString); if (pos >= 0) { if (resultDistance) *resultDistance = lengthSearched; return nodeString.substring(pos, regExp->matchedLength()); } lengthSearched += nodeString.length(); } } // If we started in a cell, but bailed because we found the start of the form or the // previous element, we still might need to search the row above us for a label. if (startingTableCell && !searchedCellAbove) { String result = searchForLabelsAboveCell(regExp.get(), startingTableCell, resultDistance); if (!result.isEmpty()) { if (resultIsInCellAbove) *resultIsInCellAbove = true; return result; } } return String(); }
int PFEMIntegrator::formSensitivityRHS(int passedGradNumber) { sensitivityFlag = 1; // Set a couple of data members gradNumber = passedGradNumber; // Get pointer to the SOE LinearSOE *theSOE = this->getLinearSOE(); // Get the analysis model AnalysisModel *theModel = this->getAnalysisModel(); // Randomness in external load (including randomness in time series) // Get domain Domain *theDomain = theModel->getDomainPtr(); // Loop through nodes to zero the unbalaced load Node *nodePtr; NodeIter &theNodeIter = theDomain->getNodes(); while ((nodePtr = theNodeIter()) != 0) nodePtr->zeroUnbalancedLoad(); // Loop through load patterns to add external load sensitivity LoadPattern *loadPatternPtr; LoadPatternIter &thePatterns = theDomain->getLoadPatterns(); double time; while((loadPatternPtr = thePatterns()) != 0) { time = theDomain->getCurrentTime(); loadPatternPtr->applyLoadSensitivity(time); } // Randomness in element/material contributions // Loop through FE elements FE_Element *elePtr; FE_EleIter &theEles = theModel->getFEs(); while((elePtr = theEles()) != 0) { theSOE->addB( elePtr->getResidual(this), elePtr->getID() ); } // Loop through DOF groups (IT IS IMPORTANT THAT THIS IS DONE LAST!) DOF_Group *dofPtr; DOF_GrpIter &theDOFs = theModel->getDOFs(); while((dofPtr = theDOFs()) != 0) { theSOE->addB( dofPtr->getUnbalance(this), dofPtr->getID() ); } // Reset the sensitivity flag sensitivityFlag = 0; return 0; }
// TESTME // TODO : this is really a pattern // (see score2OSSIA, score_plugin_coppa and friends), try to refactor it. // This could be a try_insert algorithm. void merge(Device::Node& base, const State::Message& message) { using Device::Node; QStringList path = message.address.address.path; path.prepend(message.address.address.device); Node* node = &base; for (int i = 0; i < path.size(); i++) { auto it = std::find_if(node->begin(), node->end(), [&](const auto& cur_node) { return cur_node.displayName() == path[i]; }); if (it == node->end()) { // We have to start adding sub-nodes from here. Node* parentnode{node}; for (int k = i; k < path.size(); k++) { Node* newNode{}; if (k == 0) { // We're adding a device Device::DeviceSettings dev; dev.name = path[k]; newNode = &parentnode->emplace_back(std::move(dev), nullptr); } else { // We're adding an address Device::AddressSettings addr; addr.name = path[k]; if (k == path.size() - 1) { // End of the address addr.value = message.value; // Note : since we don't have this // information in messagelist's, // we assign a default Out value // so that we only send the nodes that actually had messages // via the OSSIA api. addr.ioType = ossia::access_mode::SET; } newNode = &parentnode->emplace_back(std::move(addr), nullptr); } // TODO do similar simplification on other similar algorithms // cf in ossia stuff parentnode = newNode; } break; } else { node = &*it; if (i == path.size() - 1) { // We replace the value by the one in the message if (node->is<Device::AddressSettings>()) { node->get<Device::AddressSettings>().value = message.value; } } } } }
JSValue *ClipboardProtoFunc::callAsFunction(ExecState *exec, JSObject *thisObj, const List &args) { if (!thisObj->inherits(&Clipboard::info)) return throwError(exec, TypeError); Clipboard *cb = static_cast<Clipboard *>(thisObj); switch (id) { case Clipboard::ClearData: if (args.size() == 0) { cb->clipboard->clearAllData(); return jsUndefined(); } else if (args.size() == 1) { cb->clipboard->clearData(args[0]->toString(exec)); return jsUndefined(); } else return throwError(exec, SyntaxError, "clearData: Invalid number of arguments"); case Clipboard::GetData: { if (args.size() == 1) { bool success; String result = cb->clipboard->getData(args[0]->toString(exec), success); if (success) return jsString(result); else return jsUndefined(); } else return throwError(exec, SyntaxError, "getData: Invalid number of arguments"); } case Clipboard::SetData: if (args.size() == 2) return jsBoolean(cb->clipboard->setData(args[0]->toString(exec), args[1]->toString(exec))); else return throwError(exec, SyntaxError, "setData: Invalid number of arguments"); case Clipboard::SetDragImage: { if (!cb->clipboard->isForDragging()) return jsUndefined(); if (args.size() != 3) return throwError(exec, SyntaxError, "setDragImage: Invalid number of arguments"); int x = (int)args[1]->toNumber(exec); int y = (int)args[2]->toNumber(exec); // See if they passed us a node Node *node = toNode(args[0]); if (!node) return throwError(exec, TypeError); if (!node->isElementNode()) return throwError(exec, SyntaxError, "setDragImageFromElement: Invalid first argument"); if (static_cast<Element*>(node)->hasLocalName(imgTag) && !node->inDocument()) cb->clipboard->setDragImage(static_cast<HTMLImageElement*>(node)->cachedImage(), IntPoint(x, y)); else cb->clipboard->setDragImageElement(node, IntPoint(x, y)); return jsUndefined(); } } return jsUndefined(); }
/// Cancel is used to rollback the innermost transaction and continue execution /// after the transaction block. It is only used to implement an explicit, /// user-level cancel operation. inline void _ITM_transaction::cancel(void** stack_lower_bound) { rollback(stack_lower_bound); Node* scope = leave(); scope->restore(a_abortTransaction | a_restoreLiveVariables); }
void ModelManager::initialize() { if ( subnet_model_ == 0 && siblingcontainer_model_ == 0 && proxynode_model_ == 0 ) { // initialize these models only once outside of the constructor // as the node model asks for the # of threads to setup slipools // but during construction of ModelManager, the KernelManager is not created subnet_model_ = new GenericModel< Subnet >( "subnet" ); subnet_model_->set_type_id( 0 ); pristine_models_.push_back( std::pair< Model*, bool >( subnet_model_, false ) ); siblingcontainer_model_ = new GenericModel< SiblingContainer >( "siblingcontainer" ); siblingcontainer_model_->set_type_id( 1 ); pristine_models_.push_back( std::pair< Model*, bool >( siblingcontainer_model_, true ) ); proxynode_model_ = new GenericModel< proxynode >( "proxynode" ); proxynode_model_->set_type_id( 2 ); pristine_models_.push_back( std::pair< Model*, bool >( proxynode_model_, true ) ); } // Re-create the model list from the clean prototypes for ( index i = 0; i < pristine_models_.size(); ++i ) { if ( pristine_models_[ i ].first != 0 ) { // set the num of threads for the number of sli pools pristine_models_[ i ].first->set_threads(); std::string name = pristine_models_[ i ].first->get_name(); models_.push_back( pristine_models_[ i ].first->clone( name ) ); if ( !pristine_models_[ i ].second ) modeldict_->insert( name, i ); } } // create proxy nodes, one for each thread and model proxy_nodes_.resize( kernel().vp_manager.get_num_threads() ); int proxy_model_id = get_model_id( "proxynode" ); for ( thread t = 0; t < static_cast< thread >( kernel().vp_manager.get_num_threads() ); ++t ) { for ( index i = 0; i < pristine_models_.size(); ++i ) { if ( pristine_models_[ i ].first != 0 ) { Node* newnode = proxynode_model_->allocate( t ); newnode->set_model_id( i ); proxy_nodes_[ t ].push_back( newnode ); } } Node* newnode = proxynode_model_->allocate( t ); newnode->set_model_id( proxy_model_id ); dummy_spike_sources_.push_back( newnode ); } synapsedict_->clear(); // one list of prototypes per thread std::vector< std::vector< ConnectorModel* > > tmp_proto( kernel().vp_manager.get_num_threads() ); prototypes_.swap( tmp_proto ); // (re-)append all synapse prototypes for ( std::vector< ConnectorModel* >::iterator i = pristine_prototypes_.begin(); i != pristine_prototypes_.end(); ++i ) { if ( *i != 0 ) { std::string name = ( *i )->get_name(); for ( thread t = 0; t < static_cast< thread >( kernel().vp_manager.get_num_threads() ); ++t ) prototypes_[ t ].push_back( ( *i )->clone( name ) ); synapsedict_->insert( name, prototypes_[ 0 ].size() - 1 ); } } }
void AnimationState::ApplyTrackBlendedSilent(AnimationStateTrack& stateTrack, float weight) { const AnimationTrack* track = stateTrack.track_; Node* node = stateTrack.node_; if (track->keyFrames_.Empty() || !node) return; unsigned& frame = stateTrack.keyFrame_; track->GetKeyFrameIndex(time_, frame); // Check if next frame to interpolate to is valid, or if wrapping is needed (looping animation only) unsigned nextFrame = frame + 1; bool interpolate = true; if (nextFrame >= track->keyFrames_.Size()) { if (!looped_) { nextFrame = frame; interpolate = false; } else nextFrame = 0; } const AnimationKeyFrame* keyFrame = &track->keyFrames_[frame]; unsigned char channelMask = track->channelMask_; if (!interpolate) { // No interpolation, blend between old transform & animation if (channelMask & CHANNEL_POSITION) node->SetPositionSilent(node->GetPosition().Lerp(keyFrame->position_, weight)); if (channelMask & CHANNEL_ROTATION) node->SetRotationSilent(node->GetRotation().Slerp(keyFrame->rotation_, weight)); if (channelMask & CHANNEL_SCALE) node->SetScaleSilent(node->GetScale().Lerp(keyFrame->scale_, weight)); } else { const AnimationKeyFrame* nextKeyFrame = &track->keyFrames_[nextFrame]; float timeInterval = nextKeyFrame->time_ - keyFrame->time_; if (timeInterval < 0.0f) timeInterval += animation_->GetLength(); float t = timeInterval > 0.0f ? (time_ - keyFrame->time_) / timeInterval : 1.0f; // Interpolation, blend between old transform & animation if (channelMask & CHANNEL_POSITION) { node->SetPositionSilent(node->GetPosition().Lerp( keyFrame->position_.Lerp(nextKeyFrame->position_, t), weight)); } if (channelMask & CHANNEL_ROTATION) { node->SetRotationSilent(node->GetRotation().Slerp( keyFrame->rotation_.Slerp(nextKeyFrame->rotation_, t), weight)); } if (channelMask & CHANNEL_SCALE) { node->SetScaleSilent(node->GetScale().Lerp( keyFrame->scale_.Lerp(nextKeyFrame->scale_, t), weight)); } } }
Node* Node::CreateChild(const String& name, CreateMode mode, unsigned id) { Node* newNode = CreateChild(id, mode); newNode->SetName(name); return newNode; }
bool Gizmo3D::RotateEditNodes(Vector3 adjust) { bool moved = false; /* if (rotateSnap) { float rotateStepScaled = rotateStep * snapScale; adjust.x = Floor(adjust.x / rotateStepScaled + 0.5) * rotateStepScaled; adjust.y = Floor(adjust.y / rotateStepScaled + 0.5) * rotateStepScaled; adjust.z = Floor(adjust.z / rotateStepScaled + 0.5) * rotateStepScaled; } */ if (adjust.Length() > M_EPSILON) { moved = true; for (unsigned i = 0; i < editNodes_->Size(); ++i) { Node* node = editNodes_->At(i); Quaternion rotQuat(adjust.x_, adjust.y_, adjust.z_); if (axisMode_ == AXIS_LOCAL && editNodes_->Size() == 1) node->SetRotation(node->GetRotation() * rotQuat); else { Vector3 offset = node->GetWorldPosition() - gizmoAxisX_.axisRay_.origin_; if (node->GetParent() && node->GetParent()->GetWorldRotation() != Quaternion(1, 0, 0, 0)) rotQuat = node->GetParent()->GetWorldRotation().Inverse() * rotQuat * node->GetParent()->GetWorldRotation(); node->SetRotation(rotQuat * node->GetRotation()); Vector3 newPosition = gizmoAxisX_.axisRay_.origin_ + rotQuat * offset; if (node->GetParent()) newPosition = node->GetParent()->WorldToLocal(newPosition); node->SetPosition(newPosition); } } } return moved; }
void elliptical_pile_height(HashTable* HT_Node_Ptr, Element *EmTemp, MatProps* matprops, PileProps* pileprops){ unsigned nodes[9][2]; //get corner and edge nodes unsigned *node_key=EmTemp->getNode(); for(int inode=0; inode<8; inode++) for(int ikey=0; ikey<KEYLENGTH; ikey++) nodes[inode][ikey]=node_key[inode*KEYLENGTH+ikey]; //get center node node_key=EmTemp->pass_key(); for(int ikey=0; ikey<KEYLENGTH; ikey++) nodes[8][ikey]=node_key[ikey]; double node_pile_height[9]; double sum_node_pile_height[9]; double sum_node_xmom[9]; double sum_node_ymom[9]; double height; double phi=-1;//vfract = 0.; for(int inode=0;inode<9;inode++) { //get pile height at each node... Node* ndtemp = (Node*) HT_Node_Ptr->lookup(&nodes[inode][0]); double* ndcoord = ndtemp->get_coord(); // for multiple piles which may overlap, the highest value is used.. node_pile_height[inode] = 0.0; sum_node_pile_height[inode] = 0.0; sum_node_xmom[inode] = 0.0; sum_node_ymom[inode] = 0.0; //check each pile to see which has max height at this node for(int ipile=0;ipile<pileprops->numpiles;ipile++) { //if (/*pileprops->vol_fract[ipile] > vfract*/) // vfract = pileprops->vol_fract[ipile]; //get position relative to pile center double major=ndcoord[0]-pileprops->xCen[ipile]; double minor=ndcoord[1]-pileprops->yCen[ipile]; /* "undo" elliptical pile rotation ... from (x,y)->(major,minor) also make nondimensional (by dividing by major and minor radius) */ double doubleswap=(major* pileprops->cosrot[ipile]+ minor* pileprops->sinrot[ipile])/ pileprops->majorrad[ipile]; minor =(-major* pileprops->sinrot[ipile]+ minor* pileprops->cosrot[ipile])/ pileprops->minorrad[ipile]; major = doubleswap; /* calculate pile height based on non dimensional position relative to center of pile */ #ifdef PARABALOID height = pileprops->pileheight[ipile]*(1.-major*major-minor*minor); #elif defined CYLINDER if (major*major+minor*minor<1.0) height = pileprops->pileheight[ipile]; else height =0.0; #endif height=(height>=0.0)?height:0.0; sum_node_pile_height[inode]+=height; sum_node_xmom[inode]+=height*(pileprops->initialVx[ipile]); sum_node_ymom[inode]+=height*(pileprops->initialVy[ipile]); if(node_pile_height[inode] < height) node_pile_height[inode] = height; } if(sum_node_pile_height[inode] <= .001)//GEOFLOW_TINY) sum_node_xmom[inode]=sum_node_ymom[inode]=0.0; else{ sum_node_xmom[inode]*=height/sum_node_pile_height[inode]; sum_node_ymom[inode]*=height/sum_node_pile_height[inode]; //these are now the averaged momentums at each node } } /* The pile_height value assigned is an "area" weighted average over the element's 9 nodes. The element is divided into 4 squares, and each corner of each of the 4 squares count once. Because the center node is repeated 4 times it's weight is 4 times as much as the element's corner nodes which are not repeated; each edge node is repeated twice */ double pileheight=(//corner nodes node_pile_height[0]+node_pile_height[1]+ node_pile_height[2]+node_pile_height[3]+ //edge nodes 2.0*(node_pile_height[4]+node_pile_height[5]+ node_pile_height[6]+node_pile_height[7])+ //center node 4.0*node_pile_height[8] )/16.0; double xmom=(//corner nodes sum_node_xmom[0]+sum_node_xmom[1]+ sum_node_xmom[2]+sum_node_xmom[3]+ //edge nodes 2.0*(sum_node_xmom[4]+sum_node_xmom[5]+ sum_node_xmom[6]+sum_node_xmom[7])+ //center node 4.0*sum_node_xmom[8] )/16.0; double ymom=(//corner nodes sum_node_ymom[0]+sum_node_ymom[1]+ sum_node_ymom[2]+sum_node_ymom[3]+ //edge nodes 2.0*(sum_node_ymom[4]+sum_node_ymom[5]+ sum_node_ymom[6]+sum_node_ymom[7])+ //center node 4.0*sum_node_ymom[8] )/16.0; double TIME_SCALE= sqrt(matprops->LENGTH_SCALE/matprops->GRAVITY_SCALE); if (pileheight>GEOFLOW_TINY) phi=1/TIME_SCALE; else phi/=TIME_SCALE; EmTemp->put_height_mom(pileheight,phi,xmom,ymom); return; }
bool HeartbeatMsgEx::processIncoming(struct sockaddr_in* fromAddr, Socket* sock, char* respBuf, size_t bufLen, HighResolutionStats* stats) { LogContext log("Heartbeat incoming"); std::string peer = fromAddr ? Socket::ipaddrToStr(&fromAddr->sin_addr) : sock->getPeername(); //LOG_DEBUG_CONTEXT(log, Log_DEBUG, std::string("Received a HeartbeatMsg from: ") + peer); App* app = Program::getApp(); NodeCapacityPools* metaCapacityPools = app->getMetaCapacityPools(); HeartbeatManager* heartbeatMgr = app->getHeartbeatMgr(); bool isNodeNew; NodeType nodeType = getNodeType(); std::string nodeID(getNodeID() ); NicAddressList nicList; parseNicList(&nicList); BitStore nodeFeatureFlags; parseNodeFeatureFlags(&nodeFeatureFlags); // check for empty nodeID; (sanity check, should never fail) if(unlikely(nodeID.empty() ) ) { log.log(Log_WARNING, "Rejecting heartbeat of node with empty long ID " "from: " + peer + "; " "type: " + Node::nodeTypeToStr(nodeType) ); return false; } if(nodeType == NODETYPE_Client) { // this is a client heartbeat NodeStoreClients* clients = app->getClientNodes(); // construct node Node* node = RegisterNodeMsgEx::constructNode( nodeID, getNodeNumID(), getPortUDP(), getPortTCP(), nicList); node->setNodeType(getNodeType() ); node->setFhgfsVersion(getFhgfsVersion() ); node->setFeatureFlags(&nodeFeatureFlags); // add node to store (or update it) isNodeNew = clients->addOrUpdateNode(&node); } else { // this is a server heartbeat /* only accept new servers if nodeNumID is set (otherwise RegisterNodeMsg would need to be called first) */ if(!getNodeNumID() ) { /* shouldn't happen: this server would need to register first to get a nodeNumID assigned */ log.log(Log_WARNING, "Rejecting heartbeat of node without numeric ID: " + nodeID + "; " "type: " + Node::nodeTypeToStr(nodeType) ); return false; } // get the corresponding node store for this node type NodeStoreServers* servers = app->getServerStoreFromType(nodeType); if(unlikely(!servers) ) { log.logErr(std::string("Invalid node type: ") + StringTk::intToStr(nodeType) ); return false; } // check if adding a new server is allowed (in case this is a server) if(!RegisterNodeMsgEx::checkNewServerAllowed(servers, getNodeNumID(), nodeType) ) { // this is a new server and adding was disabled log.log(Log_WARNING, std::string("Registration of new servers disabled. Rejecting: ") + nodeID + " (Type: " + Node::nodeTypeToStr(nodeType) + ")"); return true; } // construct node Node* node = RegisterNodeMsgEx::constructNode( nodeID, getNodeNumID(), getPortUDP(), getPortTCP(), nicList); node->setNodeType(nodeType); node->setFhgfsVersion(getFhgfsVersion() ); node->setFeatureFlags(&nodeFeatureFlags); std::string typedNodeID = node->getTypedNodeID(); // add node to store (or update it) uint16_t confirmationNodeNumID; isNodeNew = servers->addOrUpdateNodeEx(&node, &confirmationNodeNumID); if(confirmationNodeNumID != getNodeNumID() ) { // unable to add node to store log.log(Log_WARNING, "Node rejected because of ID conflict. " "Given numeric ID: " + StringTk::uintToStr(getNodeNumID() ) + "; " "string ID: " + getNodeID() + "; " "type: " + Node::nodeTypeToStr(nodeType) ); return true; } // add to capacity pools if(nodeType == NODETYPE_Meta) { app->getMetaStateStore()->addIfNotExists(getNodeNumID(), CombinedTargetState( TargetReachabilityState_POFFLINE, TargetConsistencyState_GOOD) ); bool isNewMetaTarget = metaCapacityPools->addIfNotExists( confirmationNodeNumID, CapacityPool_LOW); if(isNewMetaTarget) heartbeatMgr->notifyAsyncAddedNode(nodeID, getNodeNumID(), nodeType); // (note: storage targets get published through MapTargetMsg) } // handle root node information (if any is given) RegisterNodeMsgEx::processIncomingRoot(getRootNumID(), nodeType); } // end of server heartbeat specific handling if(isNodeNew) { // this node is new RegisterNodeMsgEx::processNewNode(nodeID, getNodeNumID(), nodeType, getFhgfsVersion(), &nicList, peer); } // send response MsgHelperAck::respondToAckRequest(this, fromAddr, sock, respBuf, bufLen, app->getDatagramListener() ); return true; }
bool SetpointManagerMultiZoneMaximumHumidityAverage_Impl::setSetpointNode(const Node& node) { bool result = setPointer(OS_SetpointManager_MultiZone_MaximumHumidity_AverageFields::SetpointNodeorNodeListName, node.handle()); return result; }
bool RegisterNodeMsgEx::processIncoming(struct sockaddr_in* fromAddr, Socket* sock, char* respBuf, size_t bufLen, HighResolutionStats* stats) { LogContext log("RegisterNodeMsg incoming"); std::string peer = fromAddr ? Socket::ipaddrToStr(&fromAddr->sin_addr) : sock->getPeername(); LOG_DEBUG_CONTEXT(log, Log_DEBUG, std::string("Received a RegisterNodeMsg from: ") + peer); App* app = Program::getApp(); NodeType nodeType = getNodeType(); std::string nodeID(getNodeID() ); NicAddressList nicList; BitStore nodeFeatureFlags; Node* node; uint16_t newNodeNumID; bool isNodeNew; LOG_DEBUG_CONTEXT(log, Log_SPAM, "Type: " + Node::nodeTypeToStr(nodeType) + "; " "NodeID: " + nodeID + "; " "NodeNumID: " + StringTk::uintToStr(getNodeNumID() ) ); // check for empty nodeID; (sanity check, should never fail) if(unlikely(nodeID.empty() ) ) { log.log(Log_WARNING, "Rejecting registration of node with empty string ID " "from: " + peer + "; " "type: " + Node::nodeTypeToStr(nodeType) ); return false; } // get the corresponding node store for this node type NodeStoreServers* servers = app->getServerStoreFromType(nodeType); if(unlikely(!servers) ) { log.logErr("Invalid node type for registration: " + Node::nodeTypeToStr(nodeType) + "; " "from: " + peer); newNodeNumID = 0; goto send_response; } // check if adding of new servers is allowed if(!checkNewServerAllowed(servers, getNodeNumID(), nodeType) ) { // this is a new server and adding was disabled log.log(Log_WARNING, "Registration of new servers disabled. Rejecting node: " + nodeID + " (Type: " + Node::nodeTypeToStr(nodeType) + ")"); newNodeNumID = 0; goto send_response; } // construct node parseNicList(&nicList); node = constructNode(nodeID, getNodeNumID(), getPortUDP(), getPortTCP(), nicList); node->setNodeType(getNodeType() ); node->setFhgfsVersion(getFhgfsVersion() ); parseNodeFeatureFlags(&nodeFeatureFlags); node->setFeatureFlags(&nodeFeatureFlags); // add node to store (or update it) isNodeNew = servers->addOrUpdateNodeEx(&node, &newNodeNumID); if(!newNodeNumID) { // unable to add node to store log.log(Log_WARNING, "Unable to add node with given numeric ID: " + StringTk::uintToStr(getNodeNumID() ) + "; " "string ID: " + getNodeID() + "; " "Type: " + Node::nodeTypeToStr(nodeType) ); goto send_response; } /* note on capacity pools: we may not add new nodes to capacity pools here yet, because at this point the registered node is not ready to receive messages yet. so we will add it to capacity pools later when we receive its first heartbeat msg. */ processIncomingRoot(getRootNumID(), nodeType); if(isNodeNew) { // this node is new processNewNode(nodeID, newNodeNumID, nodeType, getFhgfsVersion(), &nicList, peer); } // send response send_response: RegisterNodeRespMsg respMsg(newNodeNumID); respMsg.serialize(respBuf, bufLen); sock->sendto(respBuf, respMsg.getMsgLength(), 0, (struct sockaddr*)fromAddr, sizeof(struct sockaddr_in) ); return true; }
void LaplaceMeshSmoother::smooth(unsigned int n_iterations) { if (!_initialized) this->init(); // Don't smooth the nodes on the boundary... // this would change the mesh geometry which // is probably not something we want! std::vector<bool> on_boundary; MeshTools::find_boundary_nodes(_mesh, on_boundary); // Ensure that the find_boundary_nodes() function returned a properly-sized vector if (on_boundary.size() != _mesh.max_node_id()) libmesh_error_msg("MeshTools::find_boundary_nodes() returned incorrect length vector!"); // We can only update the nodes after all new positions were // determined. We store the new positions here std::vector<Point> new_positions; for (unsigned int n=0; n<n_iterations; n++) { new_positions.resize(_mesh.max_node_id()); { MeshBase::node_iterator it = _mesh.local_nodes_begin(); const MeshBase::node_iterator it_end = _mesh.local_nodes_end(); for (; it != it_end; ++it) { Node * node = *it; if (node == libmesh_nullptr) libmesh_error_msg("[" << _mesh.processor_id() << "]: Node iterator returned NULL pointer."); // leave the boundary intact // Only relocate the nodes which are vertices of an element // All other entries of _graph (the secondary nodes) are empty if (!on_boundary[node->id()] && (_graph[node->id()].size() > 0)) { Point avg_position(0.,0.,0.); for (unsigned j=0; j<_graph[node->id()].size(); ++j) { // Will these nodal positions always be available // or will they refer to remote nodes? This will // fail an assertion in the latter case, which // shouldn't occur if DistributedMesh is working // correctly. const Point & connected_node = _mesh.point(_graph[node->id()][j]); avg_position.add( connected_node ); } // end for(j) // Compute the average, store in the new_positions vector new_positions[node->id()] = avg_position / static_cast<Real>(_graph[node->id()].size()); } // end if } // end for } // end scope // now update the node positions (local node positions only) { MeshBase::node_iterator it = _mesh.local_nodes_begin(); const MeshBase::node_iterator it_end = _mesh.local_nodes_end(); for (; it != it_end; ++it) { Node * node = *it; if (!on_boundary[node->id()] && (_graph[node->id()].size() > 0)) { // Should call Point::op= // libMesh::out << "Setting node id " << node->id() << " to position " << new_positions[node->id()]; _mesh.node_ref(node->id()) = new_positions[node->id()]; } } // end for } // end scope // Now the nodes which are ghosts on this processor may have been moved on // the processors which own them. So we need to synchronize with our neighbors // and get the most up-to-date positions for the ghosts. SyncNodalPositions sync_object(_mesh); Parallel::sync_dofobject_data_by_id (_mesh.comm(), _mesh.nodes_begin(), _mesh.nodes_end(), sync_object); } // end for n_iterations // finally adjust the second order nodes (those located between vertices) // these nodes will be located between their adjacent nodes // do this element-wise MeshBase::element_iterator el = _mesh.active_elements_begin(); const MeshBase::element_iterator end = _mesh.active_elements_end(); for (; el != end; ++el) { // Constant handle for the element const Elem * elem = *el; // get the second order nodes (son) // their element indices start at n_vertices and go to n_nodes const unsigned int son_begin = elem->n_vertices(); const unsigned int son_end = elem->n_nodes(); // loop over all second order nodes (son) for (unsigned int son=son_begin; son<son_end; son++) { // Don't smooth second-order nodes which are on the boundary if (!on_boundary[elem->node_id(son)]) { const unsigned int n_adjacent_vertices = elem->n_second_order_adjacent_vertices(son); // calculate the new position which is the average of the // position of the adjacent vertices Point avg_position(0,0,0); for (unsigned int v=0; v<n_adjacent_vertices; v++) avg_position += _mesh.point( elem->node_id( elem->second_order_adjacent_vertex(son,v) ) ); _mesh.node_ref(elem->node_id(son)) = avg_position / n_adjacent_vertices; } } } }
BOOL OpBackground::FixBackgroundLayer(Layer * pBackgroundLayer, const DocRect& NewPagesRect, const DocRect& OldPagesRect) { ERROR2IF(pBackgroundLayer == NULL,FALSE,"OpBackground::FixBackgroundLayer Bad params!"); //BOOL ok = TRUE; // There should be a rectangle on the page encompassing the old position of the pages // so go and try and find it NodeRegularShape * pShape = FindPageRectangle(pBackgroundLayer, OldPagesRect); // If no rectangle is found then something might be slightly wrong. Should we error? if (pShape) { const INT32 CornerRadius = 0; // No curvature if (pShape->MakeRectangle(NewPagesRect.Width(), NewPagesRect.Height(), CornerRadius)) { // Translate centre from 0,0 to required position relative to page INT32 XTrans = NewPagesRect.lo.x + (NewPagesRect.Width()/2); INT32 YTrans = NewPagesRect.lo.y + (NewPagesRect.Height()/2); Trans2DMatrix Trans(XTrans, YTrans); pShape->Transform(Trans); // finish off the shape pShape->InvalidateBoundingRect(); } // Find any bitmap fills and fix up the fill origin etc. Node * pNode = pShape->FindFirstChild(); while (pNode != NULL) { // Only check and fix bitmap colour fill attributes of the regular shape nodes if (pNode->IS_KIND_OF(AttrBitmapColourFill)) { // Convert the pointer to the correct type AttrBitmapColourFill * pBitmapFillAttr = (AttrBitmapColourFill*)pNode; // Set the coordinates of the fill end points // We want to start the fill at the top left of the regular shape // The bounding rect should be roughly what we want DocRect Rect = pShape->GetBoundingRect(TRUE, FALSE); KernelBitmap * pBitmap = ((AttrFillGeometry*)pBitmapFillAttr)->GetBitmap(); // Get the size of the bitmap taking into account its DPI INT32 Width = pBitmap->GetRecommendedWidth(); INT32 Height = pBitmap->GetRecommendedHeight(); // You would think that the handles on the fill correspond to where you place them // but oh now the displayed handles are shown at different points. So instead of // positioning the start in the centre of the bitmap, you need to position it bottom left // The End should be centre and right but needs to be bottom right. // The End2 should be centre and top but needs to be top left. // We want the start point to be half the height of the bitmap below this //DocCoord Start(Rect.lo.x + Width/2, Rect.hi.y - Height/2); DocCoord Start(Rect.lo.x, Rect.hi.y - Height); // We want the other end point to be same height as the centre point // but on the far right of the rectangle i.e the full width across //DocCoord End(Rect.lo.x + Width, Rect.hi.y - Height/2); DocCoord End(Rect.lo.x + Width, Rect.hi.y - Height); // We want the end point to be middle and top of the rectangle //DocCoord End2(Rect.lo.x + Width/2, Rect.hi.y); DocCoord End2(Rect.lo.x, Rect.hi.y); ((AttrFillGeometry*)pBitmapFillAttr)->SetStartPoint(&Start); ((AttrFillGeometry*)pBitmapFillAttr)->SetEndPoint(&End); ((AttrFillGeometry*)pBitmapFillAttr)->SetEndPoint2(&End2); } pNode = pNode->FindNext(); } } return TRUE; }
bool SVGTRefElement::childShouldCreateRenderer(const Node& child) const { return child.isInShadowTree(); }
HTMLElement* HTMLTextAreaElement::innerTextElement() const { Node* node = userAgentShadowRoot()->firstChild(); ASSERT(!node || node->hasTagName(divTag)); return toHTMLElement(node); }
bool Initialisation::computeInitialParameters(float startFactor) { ImageType::SizeType desiredSize = inputImage_->GetLargestPossibleRegion().GetSize(); // The spinal cord detection is performed on a bunch of axial slices. The choice of which slices will be analyzed depends on the startFactor. Default is the middle axial slice. The parameter startFactor must be the number of the slice, or a number between 0 and 1 representing the pourcentage of the image. // For exemple, startFactor=0.5 means the detection will start in the middle axial slice. float startZ; if (startFactor != -1.0) startSlice_ = startFactor; if (startSlice_ == -1.0) { startZ = desiredSize[1]/2; startSlice_ = startZ; } else if (startSlice_ < 1.0) { startZ = desiredSize[1]*startSlice_; startSlice_ = startZ; } else startZ = startSlice_; // Adapt radius to the image spacing to provide a radius in pixels - use average spacing of axial slice ImageType::SpacingType spacing = inputImage_->GetSpacing(); mean_resolution_ = (spacing[0]+spacing[2])/2; // Adapt the gap between detection axial slices to the spacing if (round(spacing[1]) != 0 && (int)gap_ % (int)round(spacing[1]) != 0) { gap_ = spacing[1]; } // Adapt the number of axial slices used for the detection to the spacing and the image dimensions. if (startZ-((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1]) < 0 || startZ+((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1]) >= desiredSize[1]) { numberOfSlices_ = numberOfSlices_-2; //gap_ = 1; if (verbose_) { cout << "WARNING: number of slices and gap between slices are not adapted to the image dimensions for the initilization. Default parameters will be used." << endl; cout << "New parameters:" << endl << "Gap inter slices = " << gap_ << endl << "Number of slices = " << numberOfSlices_ << endl; } } // Initalisation of the paremeters for the spinal cord detection ImageType::IndexType desiredStart; desiredStart[0] = 0; desiredStart[1] = startZ; desiredStart[2] = 0; desiredSize[1] = 0; // First extraction of the axial slice to check if the image contains information (not null) ImageType::RegionType desiredRegionImage(desiredStart, desiredSize); typedef itk::ExtractImageFilter< ImageType, ImageType2D > Crop2DFilterType; Crop2DFilterType::Pointer cropFilter = Crop2DFilterType::New(); cropFilter->SetExtractionRegion(desiredRegionImage); cropFilter->SetInput(inputImage_); #if ITK_VERSION_MAJOR >= 4 cropFilter->SetDirectionCollapseToIdentity(); // This is required. #endif try { cropFilter->Update(); } catch( itk::ExceptionObject & e ) { std::cerr << "Exception caught while updating cropFilter " << std::endl; std::cerr << e << std::endl; } ImageType2D::Pointer image_test_minmax = cropFilter->GetOutput(); MinMaxCalculatorType::Pointer minMaxCalculator = MinMaxCalculatorType::New(); minMaxCalculator->SetImage(image_test_minmax); minMaxCalculator->ComputeMaximum(); minMaxCalculator->ComputeMinimum(); ImageType2D::PixelType maxIm = minMaxCalculator->GetMaximum(), minIm = minMaxCalculator->GetMinimum(); if (maxIm == minIm) { cerr << "WARNING: The principal axial slice where the spinal cord detection will be performed (slice " << startZ << ") is full of constant value (" << maxIm << "). You can change it using -init parameter." << endl; } // Starting the spinal cord detection process if (verbose_) cout << "Initialization" << endl; // Creation of a matrix of potential spinal cord centers vector<vector <vector <Node*> > > centers; // Start of the detection of circles and ellipses. For each axial slices, a Hough transform is performed to detect circles. Each axial image is stretched in the antero-posterior direction in order to detect the spinal cord as a ellipse as well as a circle. for (int i=round(-((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1])); i<=round(((numberOfSlices_-1.0)/2.0)*(gap_/spacing[1])); i+=round(gap_/spacing[1])) { // Cropping of the image if (verbose_) cout << "Slice num " << i << endl; desiredStart[1] = startZ+i; ImageType::RegionType desiredRegion(desiredStart, desiredSize); FilterType::Pointer filter = FilterType::New(); filter->SetExtractionRegion(desiredRegion); filter->SetInput(inputImage_); #if ITK_VERSION_MAJOR >= 4 filter->SetDirectionCollapseToIdentity(); // This is required. #endif try { filter->Update(); } catch( itk::ExceptionObject & e ) { std::cerr << "Exception caught while updating cropFilter " << std::endl; std::cerr << e << std::endl; cout << inputImage_->GetLargestPossibleRegion().GetSize() << endl; cout << desiredRegion << endl; } // The image is duplicated to allow multiple processing on the image. ImageType2D::Pointer im = filter->GetOutput(); DuplicatorType::Pointer duplicator = DuplicatorType::New(); duplicator->SetInputImage(im); duplicator->Update(); ImageType2D::Pointer clonedImage = duplicator->GetOutput(); ImageType::DirectionType imageDirection = inputImage_->GetDirection(); ImageType2D::DirectionType clonedImageDirection; clonedImageDirection[0][0] = imageDirection[0][0]; clonedImageDirection[0][1] = imageDirection[0][2]; clonedImageDirection[1][0] = imageDirection[1][0]; clonedImageDirection[1][1] = imageDirection[1][2]; clonedImage->SetDirection(clonedImageDirection); // Initialization of resulting spinal cord center list. vector<vector <Node*> > vecNode; // Initialization of stretching parameters // A stretchingFactor equals to 1 doesn't change the image double stretchingFactor = 1.0, step = 0.25; while (stretchingFactor <= 2.0) { if (verbose_) cout << "Stretching factor " << stretchingFactor << endl; // Stretching the image in the antero-posterior direction. This direction is chosen because potential elliptical spinal cord will be transformed to circles and wil be detected by the Hough transform. The resulting circles will then be stretch in the other direction. if (stretchingFactor != 1.0) { ImageType2D::SizeType inputSize = clonedImage->GetLargestPossibleRegion().GetSize(), outputSize; outputSize[0] = inputSize[0]*stretchingFactor; outputSize[1] = inputSize[1]; ImageType2D::SpacingType outputSpacing; outputSpacing[0] = static_cast<double>(clonedImage->GetSpacing()[0] * inputSize[0] / outputSize[0]); outputSpacing[1] = static_cast<double>(clonedImage->GetSpacing()[1] * inputSize[1] / outputSize[1]); ResampleImageFilterType::Pointer resample = ResampleImageFilterType::New(); resample->SetInput(clonedImage); resample->SetSize(outputSize); resample->SetOutputDirection(clonedImage->GetDirection()); resample->SetOutputOrigin(clonedImage->GetOrigin()); resample->SetOutputSpacing(outputSpacing); resample->SetTransform(TransformType::New()); resample->Update(); im = resample->GetOutput(); } // Searching the circles in the image using circular Hough transform, adapted from ITK // The list of radii and accumulator values are then extracted for analyses vector<CVector3> vecCenter; vector<double> vecRadii, vecAccumulator; searchCenters(im,vecCenter,vecRadii,vecAccumulator,startZ+i); // Reformating of the detected circles in the image. Each detected circle is push in a Node with all its information. // The radii are transformed in mm using mean axial resolution vector<Node*> vecNodeTemp; for (unsigned int k=0; k<vecCenter.size(); k++) { if (vecRadii[k] != 0.0) { CVector3 center = vecCenter[k]; center[0] /= stretchingFactor; vecNodeTemp.push_back(new Node(center,mean_resolution_*vecRadii[k]/stretchingFactor,vecAccumulator[k],vecCenter[k],mean_resolution_*vecRadii[k],stretchingFactor)); } } vecNode.push_back(vecNodeTemp); // Preparing next iteration of the spinal cord detection stretchingFactor += step; } // Saving the detected centers centers.push_back(vecNode); } // All centers are ordoned by slice // First step -> delete points without neighbour double limitDistance = sqrt(2.0*gap_*gap_); // in mm list<Node*> listPoints; for (unsigned int k=0; k<numberOfSlices_; k++) { // For every point in a slice, we search on next and previous slice for neighbors // Potential neighbours are circles that have a similar radius (less than 20% of difference) for (unsigned int i=0; i<centers[k].size(); i++) { for (unsigned int m=0; m<centers[k][i].size(); m++) { bool hasNeighbor = false; double radius = centers[k][i][m]->getRadius(); if (k != 0) // search down { // All the point are sorted by the distance map<double,Node*> listNeighbors; for (unsigned int j=0; j<centers[k-1][i].size(); j++) { // Compute the distance between two adjacent centers (in mm) // If this distance is less or equal to the limit distance, the two centers are attached to each others double currentDistance = mean_resolution_*sqrt(pow(centers[k][i][m]->getPosition()[0]-centers[k-1][i][j]->getPosition()[0],2)+pow(centers[k][i][m]->getPosition()[1]-centers[k-1][i][j]->getPosition()[1],2)+pow(centers[k][i][m]->getPosition()[2]-centers[k-1][i][j]->getPosition()[2],2)); if (currentDistance <= limitDistance) listNeighbors[currentDistance] = centers[k-1][i][j]; } while (!listNeighbors.empty()) { double radiusCurrent = listNeighbors.begin()->second->getRadius(); if (radiusCurrent >= radius*0.8 && radiusCurrent <= radius*1.2) { hasNeighbor = true; centers[k][i][m]->addPrevious(listNeighbors.begin()->second); break; } listNeighbors.erase(listNeighbors.begin()); } } if (k != numberOfSlices_-1) // search up { map<double,Node*> listNeighbors; for (unsigned int j=0; j<centers[k+1][i].size(); j++) { double currentDistance = mean_resolution_*sqrt(pow(centers[k][i][m]->getPosition()[0]-centers[k+1][i][j]->getPosition()[0],2)+pow(centers[k][i][m]->getPosition()[1]-centers[k+1][i][j]->getPosition()[1],2)+pow(centers[k][i][m]->getPosition()[2]-centers[k+1][i][j]->getPosition()[2],2)); if (currentDistance <= limitDistance) listNeighbors[currentDistance] = centers[k+1][i][j]; } while (!listNeighbors.empty()) { double radiusCurrent = listNeighbors.begin()->second->getRadius(); if (radiusCurrent >= radius*0.8 && radiusCurrent <= radius*1.2) { hasNeighbor = true; centers[k][i][m]->addNext(listNeighbors.begin()->second); break; } listNeighbors.erase(listNeighbors.begin()); } } if (hasNeighbor) // if point has at least one neighbor, we keep it listPoints.push_back(centers[k][i][m]); } } } // Second step -> assembling points vector<vector <Node*> > chains; while (listPoints.size() != 0) { vector<Node*> temp; Node* current = listPoints.front(); temp.push_back(current); listPoints.pop_front(); while(current->hasNext()) { current = current->getNext(); temp.push_back(current); } chains.push_back(temp); } // And search for the longest and with larger accumulation value and small angle between normals unsigned int maxLenght = 0, max = 0; double maxAccumulator = 0.0, angleMax = 15.0; for (unsigned int j=0; j<chains.size(); j++) { unsigned int length = chains[j].size(); double angle = 0.0; if (length >= 3) { CVector3 vector1 = chains[j][0]->getPosition()-chains[j][length/2]->getPosition(), vector2 = (chains[j][length/2]->getPosition()-chains[j][length-1]->getPosition()); angle = 360.0*acos((vector1*vector2)/(vector1.Norm()*vector2.Norm()))/(2.0*M_PI); } if (length > maxLenght && angle <= angleMax) { maxLenght = chains[j].size(); max = j; maxAccumulator = 0.0; for (unsigned int k=0; k<length; k++) maxAccumulator += chains[j][k]->getAccumulator(); } else if (length == maxLenght && angle <= angleMax) { double accumulator = 0.0; for (unsigned int k=0; k<length; k++) accumulator += chains[j][k]->getAccumulator(); if (accumulator > maxAccumulator) { maxLenght = chains[j].size(); max = j; maxAccumulator = accumulator; } } } if (chains.size() > 1) { unsigned int sizeMaxChain = chains[max].size(); //cout << "Results : " << endl; points_.clear(); for (unsigned int j=0; j<sizeMaxChain; j++) { points_.push_back(chains[max][j]->getPosition()); //cout << chains[max][j]->getPosition() << " " << chains[max][j]->getRadius() << endl; } if (verbose_) cout << "Stretching factor of circle found = " << chains[max][0]->getStretchingFactor() << endl; if (sizeMaxChain < numberOfSlices_) { if (verbose_) cout << "Warning: Number of center found on slices (" << sizeMaxChain << ") doesn't correspond to number of analyzed slices. An error may occur. To improve results, you can increase the number of analyzed slices (option -n must be impair)" << endl; // we have to transform pixel points to physical points CVector3 finalPoint, initPointT = chains[max][0]->getPosition(), finalPointT = chains[max][sizeMaxChain-1]->getPosition(); ContinuousIndex initPointIndex, finalPointIndex; initPointIndex[0] = initPointT[0]; initPointIndex[1] = initPointT[1]; initPointIndex[2] = initPointT[2]; finalPointIndex[0] = finalPointT[0]; finalPointIndex[1] = finalPointT[1]; finalPointIndex[2] = finalPointT[2]; PointType initPoint, finPoint; inputImage_->TransformContinuousIndexToPhysicalPoint(initPointIndex,initPoint); inputImage_->TransformContinuousIndexToPhysicalPoint(finalPointIndex,finPoint); initialPoint_ = CVector3(initPoint[0],initPoint[1],initPoint[2]); finalPoint = CVector3(finPoint[0],finPoint[1],finPoint[2]); initialNormal1_ = (finalPoint-initialPoint_).Normalize(); initialRadius_ = 0.0; for (unsigned int j=0; j<sizeMaxChain; j++) initialRadius_ += chains[max][j]->getRadiusStretch(); initialRadius_ /= sizeMaxChain; stretchingFactor_ = chains[max][0]->getStretchingFactor(); } else { // we have to transform pixel points to physical points CVector3 finalPoint1, finalPoint2, initPointT = chains[max][(int)(sizeMaxChain/2)]->getPosition(), finalPointT1 = chains[max][0]->getPosition(), finalPointT2 = chains[max][sizeMaxChain-1]->getPosition(); ContinuousIndex initPointIndex, finalPoint1Index, finalPoint2Index; initPointIndex[0] = initPointT[0]; initPointIndex[1] = initPointT[1]; initPointIndex[2] = initPointT[2]; finalPoint1Index[0] = finalPointT1[0]; finalPoint1Index[1] = finalPointT1[1]; finalPoint1Index[2] = finalPointT1[2]; finalPoint2Index[0] = finalPointT2[0]; finalPoint2Index[1] = finalPointT2[1]; finalPoint2Index[2] = finalPointT2[2]; PointType initPoint, finPoint1, finPoint2; inputImage_->TransformContinuousIndexToPhysicalPoint(initPointIndex,initPoint); inputImage_->TransformContinuousIndexToPhysicalPoint(finalPoint1Index,finPoint1); inputImage_->TransformContinuousIndexToPhysicalPoint(finalPoint2Index,finPoint2); initialPoint_ = CVector3(initPoint[0],initPoint[1],initPoint[2]); finalPoint1 = CVector3(finPoint1[0],finPoint1[1],finPoint1[2]); finalPoint2 = CVector3(finPoint2[0],finPoint2[1],finPoint2[2]); initialNormal1_ = (finalPoint1-initialPoint_).Normalize(); initialNormal2_ = (finalPoint2-initialPoint_).Normalize(); initialRadius_ = 0.0; for (unsigned int j=0; j<sizeMaxChain; j++) initialRadius_ += chains[max][j]->getRadiusStretch(); initialRadius_ /= sizeMaxChain; stretchingFactor_ = chains[max][0]->getStretchingFactor(); } return true; } else { cout << "Error: No point detected..." << endl; return false; } }
// ---------------------------------------------------------------------------- static void UpdateInitialPoseCallback(ik_node_t* ikNode) { Node* node = (Node*)ikNode->user_data; ikNode->rotation = QuatUrho2IK(node->GetWorldRotation()); ikNode->position = Vec3Urho2IK(node->GetWorldPosition()); }
Node DatatypesEnumerator::getCurrentTerm( unsigned index ){ Debug("dt-enum-debug") << "Get current term at " << index << " " << d_type << std::endl; Node ret; if( index<d_has_debruijn ){ if( d_child_enum ){ ret = NodeManager::currentNM()->mkConst(UninterpretedConstant(d_type.toType(), d_size_limit)); }else{ //no top-level variables return Node::null(); } }else{ Debug("dt-enum-debug") << "Look at constructor " << (index - d_has_debruijn) << std::endl; DatatypeConstructor ctor = d_datatype[index - d_has_debruijn]; Debug("dt-enum-debug") << "Check last term..." << std::endl; //we first check if the last argument (which is forced to make sum of iterated arguments equal to d_size_limit) is defined Node lc; if( ctor.getNumArgs()>0 ){ Assert( index<d_sel_types.size() ); Assert( ctor.getNumArgs()-1<d_sel_types[index].size() ); lc = getTermEnum( d_sel_types[index][ctor.getNumArgs()-1], d_size_limit - d_sel_sum[index] ); if( lc.isNull() ){ Debug("dt-enum-debug") << "Current infeasible." << std::endl; return Node::null(); } } Debug("dt-enum-debug") << "Get constructor..." << std::endl; NodeBuilder<> b(kind::APPLY_CONSTRUCTOR); Type typ; if( d_datatype.isParametric() ){ typ = ctor.getSpecializedConstructorType(d_type.toType()); b << NodeManager::currentNM()->mkNode(kind::APPLY_TYPE_ASCRIPTION, NodeManager::currentNM()->mkConst(AscriptionType(typ)), Node::fromExpr( ctor.getConstructor() ) ); }else{ b << ctor.getConstructor(); } Debug("dt-enum-debug") << "Get arguments..." << std::endl; if( ctor.getNumArgs()>0 ){ Assert( index<d_sel_types.size() ); Assert( index<d_sel_index.size() ); Assert( d_sel_types[index].size()==ctor.getNumArgs() ); Assert( d_sel_index[index].size()==ctor.getNumArgs()-1 ); for( int i=0; i<(int)(ctor.getNumArgs()-1); i++ ){ Node c = getTermEnum( d_sel_types[index][i], d_sel_index[index][i] ); Assert( !c.isNull() ); b << c; } b << lc; } Node nnn = Node(b); Debug("dt-enum-debug") << "Return... " << nnn << std::endl; ret = nnn; } if( !d_child_enum && d_has_debruijn ){ Node nret = DatatypesRewriter::normalizeCodatatypeConstant( ret ); if( nret!=ret ){ if( nret.isNull() ){ Trace("dt-enum-nn") << "Invalid constant : " << ret << std::endl; }else{ Trace("dt-enum-nn") << "Non-normal constant : " << ret << std::endl; Trace("dt-enum-nn") << " ...normal form is : " << nret << std::endl; } return Node::null(); } } return ret; }
void RotationViewportAnchor::setAnchor() { RootFrameViewport* rootFrameViewport = m_rootFrameView->getRootFrameViewport(); DCHECK(rootFrameViewport); IntRect outerViewRect = layoutViewport().visibleContentRect(IncludeScrollbars); IntRect innerViewRect = rootFrameViewport->visibleContentRect(); m_oldPageScaleFactor = m_visualViewport->scale(); m_oldMinimumPageScaleFactor = m_pageScaleConstraintsSet.finalConstraints().minimumScale; // Save the absolute location in case we won't find the anchor node, we'll // fall back to that. m_visualViewportInDocument = FloatPoint(rootFrameViewport->visibleContentRect().location()); m_anchorNode.clear(); m_anchorNodeBounds = LayoutRect(); m_anchorInNodeCoords = FloatSize(); m_normalizedVisualViewportOffset = FloatSize(); if (innerViewRect.isEmpty()) return; // Preserve origins at the absolute screen origin if (innerViewRect.location() == IntPoint::zero()) return; // Inner rectangle should be within the outer one. DCHECK(outerViewRect.contains(innerViewRect)); // Outer rectangle is used as a scale, we need positive width and height. DCHECK(!outerViewRect.isEmpty()); m_normalizedVisualViewportOffset = FloatSize(innerViewRect.location() - outerViewRect.location()); // Normalize by the size of the outer rect m_normalizedVisualViewportOffset.scale(1.0 / outerViewRect.width(), 1.0 / outerViewRect.height()); FloatPoint anchorOffset(innerViewRect.size()); anchorOffset.scale(m_anchorInInnerViewCoords.width(), m_anchorInInnerViewCoords.height()); const FloatPoint anchorPointInContents = m_rootFrameView->rootFrameToContents( m_visualViewport->viewportToRootFrame(anchorOffset)); Node* node = findNonEmptyAnchorNode(flooredIntPoint(anchorPointInContents), innerViewRect, m_rootFrameView->frame().eventHandler()); if (!node) return; m_anchorNode = node; m_anchorNodeBounds = node->boundingBox(); m_anchorInNodeCoords = anchorPointInContents - FloatPoint(m_anchorNodeBounds.location()); m_anchorInNodeCoords.scale(1.f / m_anchorNodeBounds.width(), 1.f / m_anchorNodeBounds.height()); }
void mergeBlocks( BasicBlock* firstBlock, BasicBlock* secondBlock, Vector<BasicBlock*, 1> jettisonedBlocks) { // This will add all of the nodes in secondBlock to firstBlock, but in so doing // it will also ensure that any GetLocals from the second block that refer to // SetLocals in the first block are relinked. If jettisonedBlock is not NoBlock, // then Phantoms are inserted for anything that the jettisonedBlock would have // kept alive. // Remove the terminal of firstBlock since we don't need it anymore. Well, we don't // really remove it; we actually turn it into a check. Node* terminal = firstBlock->terminal(); ASSERT(terminal->isTerminal()); NodeOrigin boundaryNodeOrigin = terminal->origin; terminal->remove(); ASSERT(terminal->refCount() == 1); for (unsigned i = jettisonedBlocks.size(); i--;) { BasicBlock* jettisonedBlock = jettisonedBlocks[i]; // Time to insert ghosties for things that need to be kept alive in case we OSR // exit prior to hitting the firstBlock's terminal, and end up going down a // different path than secondBlock. for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i) keepOperandAlive(firstBlock, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForArgument(i)); for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i) keepOperandAlive(firstBlock, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForLocal(i)); } for (size_t i = 0; i < secondBlock->phis.size(); ++i) firstBlock->phis.append(secondBlock->phis[i]); for (size_t i = 0; i < secondBlock->size(); ++i) firstBlock->append(secondBlock->at(i)); ASSERT(firstBlock->terminal()->isTerminal()); // Fix the predecessors of my new successors. This is tricky, since we are going to reset // all predecessors anyway due to reachability analysis. But we need to fix the // predecessors eagerly to ensure that we know what they are in case the next block we // consider in this phase wishes to query the predecessors of one of the blocks we // affected. for (unsigned i = firstBlock->numSuccessors(); i--;) { BasicBlock* successor = firstBlock->successor(i); for (unsigned j = 0; j < successor->predecessors.size(); ++j) { if (successor->predecessors[j] == secondBlock) successor->predecessors[j] = firstBlock; } } // Fix the predecessors of my former successors. Again, we'd rather not do this, but it's // an unfortunate necessity. See above comment. for (unsigned i = jettisonedBlocks.size(); i--;) fixJettisonedPredecessors(firstBlock, jettisonedBlocks[i]); firstBlock->valuesAtTail = secondBlock->valuesAtTail; firstBlock->cfaBranchDirection = secondBlock->cfaBranchDirection; m_graph.killBlock(secondBlock); }
Node* StyledMarkupAccumulator::traverseNodesForSerialization(Node* startNode, Node* pastEnd, NodeTraversalMode traversalMode) { const bool shouldEmit = traversalMode == EmitString; Vector<Node*> ancestorsToClose; Node* next; Node* lastClosed = 0; for (Node* n = startNode; n != pastEnd; n = next) { // According to <rdar://problem/5730668>, it is possible for n to blow // past pastEnd and become null here. This shouldn't be possible. // This null check will prevent crashes (but create too much markup) // and the ASSERT will hopefully lead us to understanding the problem. ASSERT(n); if (!n) break; next = NodeTraversal::next(n); bool openedTag = false; if (isBlock(n) && canHaveChildrenForEditing(n) && next == pastEnd) // Don't write out empty block containers that aren't fully selected. continue; if (!n->renderer() && !enclosingNodeWithTag(firstPositionInOrBeforeNode(n), selectTag)) { next = NodeTraversal::nextSkippingChildren(n); // Don't skip over pastEnd. if (pastEnd && pastEnd->isDescendantOf(n)) next = pastEnd; } else { // Add the node to the markup if we're not skipping the descendants if (shouldEmit) appendStartTag(n); // If node has no children, close the tag now. if (!n->childNodeCount()) { if (shouldEmit) appendEndTag(n); lastClosed = n; } else { openedTag = true; ancestorsToClose.append(n); } } // If we didn't insert open tag and there's no more siblings or we're at the end of the traversal, take care of ancestors. // FIXME: What happens if we just inserted open tag and reached the end? if (!openedTag && (!n->nextSibling() || next == pastEnd)) { // Close up the ancestors. while (!ancestorsToClose.isEmpty()) { Node* ancestor = ancestorsToClose.last(); if (next != pastEnd && next->isDescendantOf(ancestor)) break; // Not at the end of the range, close ancestors up to sibling of next node. if (shouldEmit) appendEndTag(ancestor); lastClosed = ancestor; ancestorsToClose.removeLast(); } // Surround the currently accumulated markup with markup for ancestors we never opened as we leave the subtree(s) rooted at those ancestors. ContainerNode* nextParent = next ? next->parentNode() : 0; if (next != pastEnd && n != nextParent) { Node* lastAncestorClosedOrSelf = n->isDescendantOf(lastClosed) ? lastClosed : n; for (ContainerNode* parent = lastAncestorClosedOrSelf->parentNode(); parent && parent != nextParent; parent = parent->parentNode()) { // All ancestors that aren't in the ancestorsToClose list should either be a) unrendered: if (!parent->renderer()) continue; // or b) ancestors that we never encountered during a pre-order traversal starting at startNode: ASSERT(startNode->isDescendantOf(parent)); if (shouldEmit) wrapWithNode(parent); lastClosed = parent; } } } } return lastClosed; }
bool run() { // FIXME: We should make this work in SSA. https://bugs.webkit.org/show_bug.cgi?id=148260 DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA); const bool extremeLogging = false; bool outerChanged = false; bool innerChanged; do { innerChanged = false; for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; ASSERT(block->isReachable); switch (block->terminal()->op()) { case Jump: { // Successor with one predecessor -> merge. if (block->successor(0)->predecessors.size() == 1) { ASSERT(block->successor(0)->predecessors[0] == block); if (extremeLogging) m_graph.dump(); m_graph.dethread(); mergeBlocks(block, block->successor(0), noBlocks()); innerChanged = outerChanged = true; break; } // FIXME: Block only has a jump -> remove. This is tricky though because of // liveness. What we really want is to slam in a phantom at the end of the // block, after the terminal. But we can't right now. :-( // Idea: what if I slam the ghosties into my successor? Nope, that's // suboptimal, because if my successor has multiple predecessors then we'll // be keeping alive things on other predecessor edges unnecessarily. // What we really need is the notion of end-of-block ghosties! // FIXME: Allow putting phantoms after terminals. // https://bugs.webkit.org/show_bug.cgi?id=126778 break; } case Branch: { // Branch on constant -> jettison the not-taken block and merge. if (isKnownDirection(block->cfaBranchDirection)) { bool condition = branchCondition(block->cfaBranchDirection); BasicBlock* targetBlock = block->successorForCondition(condition); BasicBlock* jettisonedBlock = block->successorForCondition(!condition); if (targetBlock->predecessors.size() == 1) { if (extremeLogging) m_graph.dump(); m_graph.dethread(); mergeBlocks(block, targetBlock, oneBlock(jettisonedBlock)); } else { if (extremeLogging) m_graph.dump(); m_graph.dethread(); Node* terminal = block->terminal(); ASSERT(terminal->isTerminal()); NodeOrigin boundaryNodeOrigin = terminal->origin; jettisonBlock(block, jettisonedBlock, boundaryNodeOrigin); block->replaceTerminal( m_graph, SpecNone, Jump, boundaryNodeOrigin, OpInfo(targetBlock)); ASSERT(block->terminal()); } innerChanged = outerChanged = true; break; } if (block->successor(0) == block->successor(1)) { convertToJump(block, block->successor(0)); innerChanged = outerChanged = true; break; } // Branch to same destination -> jump. // FIXME: this will currently not be hit because of the lack of jump-only // block simplification. break; } case Switch: { SwitchData* data = block->terminal()->switchData(); // Prune out cases that end up jumping to default. for (unsigned i = 0; i < data->cases.size(); ++i) { if (data->cases[i].target.block == data->fallThrough.block) { data->fallThrough.count += data->cases[i].target.count; data->cases[i--] = data->cases.last(); data->cases.removeLast(); } } // If there are no cases other than default then this turns // into a jump. if (data->cases.isEmpty()) { convertToJump(block, data->fallThrough.block); innerChanged = outerChanged = true; break; } // Switch on constant -> jettison all other targets and merge. Node* terminal = block->terminal(); if (terminal->child1()->hasConstant()) { FrozenValue* value = terminal->child1()->constant(); TriState found = FalseTriState; BasicBlock* targetBlock = 0; for (unsigned i = data->cases.size(); found == FalseTriState && i--;) { found = data->cases[i].value.strictEqual(value); if (found == TrueTriState) targetBlock = data->cases[i].target.block; } if (found == MixedTriState) break; if (found == FalseTriState) targetBlock = data->fallThrough.block; ASSERT(targetBlock); Vector<BasicBlock*, 1> jettisonedBlocks; for (BasicBlock* successor : terminal->successors()) { if (successor != targetBlock) jettisonedBlocks.append(successor); } if (targetBlock->predecessors.size() == 1) { if (extremeLogging) m_graph.dump(); m_graph.dethread(); mergeBlocks(block, targetBlock, jettisonedBlocks); } else { if (extremeLogging) m_graph.dump(); m_graph.dethread(); NodeOrigin boundaryNodeOrigin = terminal->origin; for (unsigned i = jettisonedBlocks.size(); i--;) jettisonBlock(block, jettisonedBlocks[i], boundaryNodeOrigin); block->replaceTerminal( m_graph, SpecNone, Jump, boundaryNodeOrigin, OpInfo(targetBlock)); } innerChanged = outerChanged = true; break; } break; } default: break; } } if (innerChanged) { // Here's the reason for this pass: // Blocks: A, B, C, D, E, F // A -> B, C // B -> F // C -> D, E // D -> F // E -> F // // Assume that A's branch is determined to go to B. Then the rest of this phase // is smart enough to simplify down to: // A -> B // B -> F // C -> D, E // D -> F // E -> F // // We will also merge A and B. But then we don't have any other mechanism to // remove D, E as predecessors for F. Worse, the rest of this phase does not // know how to fix the Phi functions of F to ensure that they no longer refer // to variables in D, E. In general, we need a way to handle Phi simplification // upon: // 1) Removal of a predecessor due to branch simplification. The branch // simplifier already does that. // 2) Invalidation of a predecessor because said predecessor was rendered // unreachable. We do this here. // // This implies that when a block is unreachable, we must inspect its // successors' Phi functions to remove any references from them into the // removed block. m_graph.invalidateCFG(); m_graph.resetReachability(); m_graph.killUnreachableBlocks(); } if (Options::validateGraphAtEachPhase()) validate(); } while (innerChanged); return outerChanged; }
PassRefPtr<DocumentFragment> createFragmentFromText(Range* context, const String& text) { if (!context) return 0; Node* styleNode = context->firstNode(); if (!styleNode) { styleNode = context->startPosition().deprecatedNode(); if (!styleNode) return 0; } Document* document = styleNode->document(); RefPtr<DocumentFragment> fragment = document->createDocumentFragment(); if (text.isEmpty()) return fragment.release(); String string = text; string.replace("\r\n", "\n"); string.replace('\r', '\n'); RenderObject* renderer = styleNode->renderer(); if (renderer && renderer->style()->preserveNewline()) { fragment->appendChild(document->createTextNode(string), ASSERT_NO_EXCEPTION); if (string.endsWith('\n')) { RefPtr<Element> element = createBreakElement(document); element->setAttribute(classAttr, AppleInterchangeNewline); fragment->appendChild(element.release(), ASSERT_NO_EXCEPTION); } return fragment.release(); } // A string with no newlines gets added inline, rather than being put into a paragraph. if (string.find('\n') == notFound) { fillContainerFromString(fragment.get(), string); return fragment.release(); } // Break string into paragraphs. Extra line breaks turn into empty paragraphs. Node* blockNode = enclosingBlock(context->firstNode()); Element* block = static_cast<Element*>(blockNode); bool useClonesOfEnclosingBlock = blockNode && blockNode->isElementNode() && !block->hasTagName(bodyTag) && !block->hasTagName(htmlTag) && block != editableRootForPosition(context->startPosition()); bool useLineBreak = enclosingTextFormControl(context->startPosition()); Vector<String> list; string.split('\n', true, list); // true gets us empty strings in the list size_t numLines = list.size(); for (size_t i = 0; i < numLines; ++i) { const String& s = list[i]; RefPtr<Element> element; if (s.isEmpty() && i + 1 == numLines) { // For last line, use the "magic BR" rather than a P. element = createBreakElement(document); element->setAttribute(classAttr, AppleInterchangeNewline); } else if (useLineBreak) { element = createBreakElement(document); fillContainerFromString(fragment.get(), s); } else { if (useClonesOfEnclosingBlock) element = block->cloneElementWithoutChildren(); else element = createDefaultParagraphElement(document); fillContainerFromString(element.get(), s); } fragment->appendChild(element.release(), ASSERT_NO_EXCEPTION); } return fragment.release(); }
int Beam3d :: giveLocalCoordinateSystem(FloatMatrix &answer) // // returns a unit vectors of local coordinate system at element // stored rowwise (mainly used by some materials with ortho and anisotrophy) // { FloatArray lx, ly, lz, help(3); Node *nodeA, *nodeB; nodeA = this->giveNode(1); nodeB = this->giveNode(2); lx.beDifferenceOf(*nodeB->giveCoordinates(), *nodeA->giveCoordinates()); lx.normalize(); if ( !this->usingAngle ) { Node *refNode = this->giveDomain()->giveNode(this->referenceNode); help.beDifferenceOf(*refNode->giveCoordinates(), *nodeA->giveCoordinates()); lz.beVectorProductOf(lx, help); lz.normalize(); } else { FloatMatrix rot(3, 3); double theta = referenceAngle * M_PI / 180.0; rot.at(1, 1) = cos(theta) + pow(lx.at(1), 2) * ( 1 - cos(theta) ); rot.at(1, 2) = lx.at(1) * lx.at(2) * ( 1 - cos(theta) ) - lx.at(3) * sin(theta); rot.at(1, 3) = lx.at(1) * lx.at(3) * ( 1 - cos(theta) ) + lx.at(2) * sin(theta); rot.at(2, 1) = lx.at(2) * lx.at(1) * ( 1 - cos(theta) ) + lx.at(3) * sin(theta); rot.at(2, 2) = cos(theta) + pow(lx.at(2), 2) * ( 1 - cos(theta) ); rot.at(2, 3) = lx.at(2) * lx.at(3) * ( 1 - cos(theta) ) - lx.at(1) * sin(theta); rot.at(3, 1) = lx.at(3) * lx.at(1) * ( 1 - cos(theta) ) - lx.at(2) * sin(theta); rot.at(3, 2) = lx.at(3) * lx.at(2) * ( 1 - cos(theta) ) + lx.at(1) * sin(theta); rot.at(3, 3) = cos(theta) + pow(lx.at(3), 2) * ( 1 - cos(theta) ); help.at(3) = 1.0; // up-vector // here is ly is used as a temp var if ( acos(lx.dotProduct(help)) < 0.001 ) { // Check if it is vertical ly = {0., 1., 0.}; } else { ly.beVectorProductOf(lx, help); } lz.beProductOf(rot, ly); lz.normalize(); } ly.beVectorProductOf(lz, lx); ly.normalize(); answer.resize(3, 3); answer.zero(); for ( int i = 1; i <= 3; i++ ) { answer.at(1, i) = lx.at(i); answer.at(2, i) = ly.at(i); answer.at(3, i) = lz.at(i); } return 1; }