void CombinatorialEmbedding::reverseEdge(edge e) { // reverse edge in graph m_pGraph->reverseEdge(e); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); }
/** * @brief Convert the current object to a PostbreSQL Datum * * If the current object is Null, we still return <tt>Datum(0)</tt>, i.e., we * return a valid Datum. It is the responsibilty of the caller to separately * call isNull(). * * @param inFCInfo The PostgreSQL FunctionCallInfo that was passed to the UDF. * This is necessary for verifying that the top-level AnyType has the * correct type. */ inline Datum AbstractionLayer::AnyType::getAsDatum(const FunctionCallInfo inFCInfo) { consistencyCheck(); Oid targetTypeID; TupleDesc tupleDesc; TypeFuncClass funcClass; bool exceptionOccurred = false; PG_TRY(); { // FIXME: get_call_result_type is tagged as expensive in funcapi.c // It seems not to be necessary to release the tupleDesc // E.g., in plython.c ReleaseTupleDesc() is not called funcClass = get_call_result_type(inFCInfo, &targetTypeID, &tupleDesc); } PG_CATCH(); { exceptionOccurred = true; } PG_END_TRY(); if (exceptionOccurred) throw std::invalid_argument("An exception occurred while " "gathering inormation about the PostgreSQL return type."); bool targetIsComposite = (funcClass == TYPEFUNC_COMPOSITE); if (targetIsComposite && !isComposite()) throw std::logic_error("Invalid type conversion requested. " "Simple type supplied but PostgreSQL expects composite type."); if (!targetIsComposite && isComposite()) throw std::logic_error("Invalid type conversion requested. " "Composite type supplied but PostgreSQL expects simple type."); // tupleDesc can be NULL if the return type is not composite return getAsDatum(targetTypeID, isComposite(), tupleDesc); }
edge CombinatorialEmbedding::splitFace(adjEntry adjSrc, adjEntry adjTgt) { OGDF_ASSERT(m_rightFace[adjSrc] == m_rightFace[adjTgt]) OGDF_ASSERT(adjSrc != adjTgt) edge e = m_pGraph->newEdge(adjSrc,adjTgt); face f1 = m_rightFace[adjTgt]; face f2 = createFaceElement(adjSrc); adjEntry adj = adjSrc; do { m_rightFace[adj] = f2; f2->m_size++; adj = adj->faceCycleSucc(); } while (adj != adjSrc); f1->entries.m_adjFirst = adjTgt; f1->m_size += (2 - f2->m_size); m_rightFace[e->adjSource()] = f1; OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return e; }
node CombinatorialEmbedding::contract(edge e) { // Since we remove face e, we also remove adjSrc and adjTgt. // We make sure that node of them is stored as first adjacency // entry of a face. adjEntry adjSrc = e->adjSource(); adjEntry adjTgt = e->adjTarget(); face fSrc = m_rightFace[adjSrc]; face fTgt = m_rightFace[adjTgt]; if (fSrc->entries.m_adjFirst == adjSrc) { adjEntry adj = adjSrc->faceCycleSucc(); fSrc->entries.m_adjFirst = (adj != adjTgt) ? adj : adj->faceCycleSucc(); } if (fTgt->entries.m_adjFirst == adjTgt) { adjEntry adj = adjTgt->faceCycleSucc(); fTgt->entries.m_adjFirst = (adj != adjSrc) ? adj : adj->faceCycleSucc(); } node v = m_pGraph->contract(e); --fSrc->m_size; --fTgt->m_size; OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return v; }
void CombinatorialEmbedding::moveBridge(adjEntry adjBridge, adjEntry adjBefore) { OGDF_ASSERT(m_rightFace[adjBridge] == m_rightFace[adjBridge->twin()]); OGDF_ASSERT(m_rightFace[adjBridge] != m_rightFace[adjBefore]); face fOld = m_rightFace[adjBridge]; face fNew = m_rightFace[adjBefore]; adjEntry adjCand = adjBridge->faceCycleSucc(); int sz = 0; adjEntry adj; for(adj = adjBridge->twin(); adj != adjCand; adj = adj->faceCycleSucc()) { if (fOld->entries.m_adjFirst == adj) fOld->entries.m_adjFirst = adjCand; m_rightFace[adj] = fNew; ++sz; } fOld->m_size -= sz; fNew->m_size += sz; edge e = adjBridge->theEdge(); if(e->source() == adjBridge->twinNode()) m_pGraph->moveSource(e, adjBefore, after); else m_pGraph->moveTarget(e, adjBefore, after); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); }
void ConstCombinatorialEmbedding::computeFaces() { m_externalFace = nullptr; // no longer valid! m_faceIdCount = 0; faces.clear(); m_rightFace.fill(nullptr); for(node v : m_cpGraph->nodes) { for(adjEntry adj : v->adjEdges) { if (m_rightFace[adj]) continue; #ifdef OGDF_DEBUG face f = OGDF_NEW FaceElement(this,adj,m_faceIdCount++); #else face f = OGDF_NEW FaceElement(adj,m_faceIdCount++); #endif faces.pushBack(f); adjEntry adj2 = adj; do { m_rightFace[adj2] = f; f->m_size++; adj2 = adj2->faceCycleSucc(); } while (adj2 != adj); } } m_faceArrayTableSize = Graph::nextPower2(MIN_FACE_TABLE_SIZE,m_faceIdCount); reinitArrays(); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); }
face CombinatorialEmbedding::joinFaces(edge e) { face f = joinFacesPure(e); m_pGraph->delEdge(e); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return f; }
ClusterGraph &ClusterGraph::operator=(const ClusterGraph &C) { clear(); shallowCopy(C); m_clusterArrayTableSize = C.m_clusterArrayTableSize; reinitArrays(); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return *this; }
void AST_Rewrite::AccumulatedDeclarationsAttribute::popScope() { ROSE_ASSERT (this != NULL); ROSE_ASSERT (stackOfLists.size() > 0); stackOfLists.pop(); // error checking consistencyCheck(); }
void AST_Rewrite::AccumulatedDeclarationsAttribute::pushScope() { ROSE_ASSERT (this != NULL); list<SgDeclarationStatement*> emptyDeclarationList; stackOfLists.push(emptyDeclarationList); ROSE_ASSERT (stackOfLists.size() > 0); // error checking consistencyCheck(); }
Graph &Graph::operator=(const Graph &G) { clear(); copy(G); m_nodeArrayTableSize = nextPower2(MIN_NODE_TABLE_SIZE,m_nodeIdCount); m_edgeArrayTableSize = nextPower2(MIN_EDGE_TABLE_SIZE,m_edgeIdCount); reinitArrays(); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return *this; }
void AST_Rewrite::AccumulatedDeclarationsAttribute::addNewDeclaration( SgDeclarationStatement* declaration ) { ROSE_ASSERT (this != NULL); ROSE_ASSERT (stackOfLists.size() > 0); stackOfLists.top().push_back(declaration); ROSE_ASSERT (stackOfLists.top().size() > 0); // error checking consistencyCheck(); }
/** * @brief Add an element to a composite value, for returning to the backend */ inline AnyType& AnyType::operator<<(const AnyType &inValue) { consistencyCheck(); madlib_assert(mContent == Null || mContent == ReturnComposite, std::logic_error("Internal inconsistency while creating composite " "return value.")); mContent = ReturnComposite; mChildren.push_back(inValue); return *this; }
void CombinatorialEmbedding::clear() { m_pGraph->clear(); faces.clear(); m_faceIdCount = 0; m_faceArrayTableSize = MIN_FACE_TABLE_SIZE; m_externalFace = nullptr; reinitArrays(); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); }
void CombinatorialEmbedding::removeDeg1(node v) { OGDF_ASSERT(v->degree() == 1); adjEntry adj = v->firstAdj(); face f = m_rightFace[adj]; if (f->entries.m_adjFirst == adj || f->entries.m_adjFirst == adj->twin()) f->entries.m_adjFirst = adj->faceCycleSucc(); f->m_size -= 2; m_pGraph->delNode(v); OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); }
/** * @brief Return the n-th element from a composite value * * To the user, AnyType is a fully recursive type: Each AnyType object can be a * composite object and be composed of a number of other AnyType objects. * On top of the C++ abstraction layer, function have a single-top level * AnyType object as parameter. */ inline AbstractionLayer::AnyType AbstractionLayer::AnyType::operator[](uint16_t inID) const { consistencyCheck(); if (isNull()) throw std::invalid_argument("Unexpected Null value in function " "argument."); if (!isComposite()) throw std::invalid_argument("Invalid type conversion requested. " "Expected composite type but got simple type."); if (mContent == ReturnComposite) return mChildren[inID]; Oid typeID = 0; bool isMutable = false; Datum datum = 0; bool isTuple = false; HeapTupleHeader pgTuple = NULL; try { if (mContent == FunctionComposite) { if (inID >= size_t(PG_NARGS())) throw std::out_of_range("Access behind end of argument list"); if (PG_ARGISNULL(inID)) return AnyType(); backendGetTypeIDForFunctionArg(inID, typeID, isMutable); datum = PG_GETARG_DATUM(inID); } else if (mContent == NativeComposite) backendGetTypeIDAndDatumForTupleElement(inID, typeID, datum); if (typeID == InvalidOid) throw std::invalid_argument("Backend returned invalid type ID."); backendGetIsCompositeTypeAndHeapTupleHeader(typeID, datum, isTuple, pgTuple); } catch (PGException &e) { throw std::invalid_argument("An exception occurred while " "gathering information about PostgreSQL function arguments."); } return isTuple ? AnyType(pgTuple, datum, typeID) : AnyType(datum, typeID, isMutable); }
edge CombinatorialEmbedding::split(edge e) { face f1 = m_rightFace[e->adjSource()]; face f2 = m_rightFace[e->adjTarget()]; edge e2 = m_pGraph->split(e); m_rightFace[e->adjSource()] = m_rightFace[e2->adjSource()] = f1; f1->m_size++; m_rightFace[e->adjTarget()] = m_rightFace[e2->adjTarget()] = f2; f2->m_size++; OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return e2; }
string AST_Rewrite::AccumulatedDeclarationsAttribute::generateDeclarationString() const { string globalString = generateGlobalDeclarationString(); string openingNonGlobalString = generateOpeningNonGlobalDeclarationString(); string closingNonGlobalString = generateClosingNonGlobalDeclarationString(); #if 0 printf ("globalString = %s \n",globalString.c_str()); printf ("openingNonGlobalString = %s \n",openingNonGlobalString.c_str()); printf ("closingNonGlobalString = %s \n",closingNonGlobalString.c_str()); #endif consistencyCheck(); return globalString + openingNonGlobalString + closingNonGlobalString; }
/** recurse down randomly * until exhausting storage */ void invocation (uint consumed, void* lastLevel) { MockSizeRequest numbers; consumed += numbers.getNrI()+numbers.getNrO(); if (TABLE_SIZ <= consumed) return; // end recursion ++counter; #if false /////////////////////////////////////////////////////////////////////////////////////////////////////////////UNIMPLEMENTED :: TICKET #833 BuffTableChunk thisChunk (numbers, *pStorage); CHECK (consistencyCheck (thisChunk, numbers, lastLevel)); uint nrBranches ( 1 + (rand() % WIDTH_MAX)); while (nrBranches--) invocation (consumed, first_behind (thisChunk,numbers.getNrI())); #endif /////////////////////////////////////////////////////////////////////////////////////////////////////////////UNIMPLEMENTED :: TICKET #833 }
//-- //----------------- //incremental stuff //special version of the above function doing a pushback of the new edge //on the adjacency list of v making it possible to insert new degree 0 //nodes into a face, end node v edge CombinatorialEmbedding::splitFace(adjEntry adjSrc, node v) { adjEntry adjTgt = v->lastAdj(); edge e = nullptr; bool degZ = v->degree() == 0; if (degZ) { e = m_pGraph->newEdge(adjSrc, v); } else { OGDF_ASSERT(m_rightFace[adjSrc] == m_rightFace[adjTgt]) OGDF_ASSERT(adjSrc != adjTgt) e = m_pGraph->newEdge(adjSrc, adjTgt); //could use ne(v,ad) here, too } face f1 = m_rightFace[adjSrc]; //if v already had an adjacent edge, we split the face in two faces int subSize = 0; if (!degZ) { face f2 = createFaceElement(adjTgt); adjEntry adj = adjTgt; do { m_rightFace[adj] = f2; f2->m_size++; adj = adj->faceCycleSucc(); } while (adj != adjTgt); subSize = f2->m_size; }//if not zero degree else { m_rightFace[e->adjSource()] = f1; } f1->entries.m_adjFirst = adjSrc; f1->m_size += (2 - subSize); m_rightFace[e->adjTarget()] = f1; OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return e; }//splitface
node CombinatorialEmbedding::splitNode(adjEntry adjStartLeft, adjEntry adjStartRight) { face fL = leftFace(adjStartLeft); face fR = leftFace(adjStartRight); node u = m_pGraph->splitNode(adjStartLeft,adjStartRight); adjEntry adj = adjStartLeft->cyclicPred(); m_rightFace[adj] = fL; ++fL->m_size; m_rightFace[adj->twin()] = fR; ++fR->m_size; OGDF_ASSERT_IF(dlConsistencyChecks, consistencyCheck()); return u; }
inline T AnyType::getAs() const { consistencyCheck(); if (isNull()) throw std::invalid_argument("Invalid type conversion. " "Null where not expected."); if (isComposite()) throw std::invalid_argument("Invalid type conversion. " "Composite type where not expected."); // Verify type OID if (TypeTraits<T>::oid != InvalidOid && mTypeID != TypeTraits<T>::oid) { std::stringstream errorMsg; errorMsg << "Invalid type conversion. Expected type ID " << TypeTraits<T>::oid; if (mSysInfo) errorMsg << " ('" << mSysInfo->typeInformation(TypeTraits<T>::oid)->getName() << "')"; errorMsg << " but got " << mTypeID; if (mSysInfo) errorMsg << " ('" << mSysInfo->typeInformation(mTypeID)->getName() << "')"; errorMsg << '.'; throw std::invalid_argument(errorMsg.str()); } // Verify type name if (TypeTraits<T>::typeName() && std::strncmp(mTypeName, TypeTraits<T>::typeName(), NAMEDATALEN)) { std::stringstream errorMsg; errorMsg << "Invalid type conversion. Expected type '" << TypeTraits<T>::typeName() << "' but backend type name is '" << mTypeName << "' (ID " << mTypeID << ")."; } bool needMutableClone = (TypeTraits<T>::isMutable && !mIsMutable); return TypeTraits<T>::toCXXType(mDatum, needMutableClone, mSysInfo); }
T AbstractionLayer::AnyType::getAs() const { consistencyCheck(); if (isNull()) throw std::invalid_argument("Invalid type conversion requested. Got " "Null from backend."); if (isComposite()) throw std::invalid_argument("Invalid type conversion requested. " "Expected simple or array type but got composite type from " "backend."); if (mTypeID != TypeTraits<T>::oid) throw std::invalid_argument( "Invalid type conversion requested. PostgreSQL type does not match " "C++ type."); bool needMutableClone = (TypeTraits<T>::isMutable && !mIsMutable); return TypeTraits<T>::toCXXType(mDatum, needMutableClone); }
/** * @brief Return a PostgreSQL Datum representing the current object * * If the current object is Null, we still return <tt>Datum(0)</tt>, i.e., we * return a valid Datum. It is the responsibilty of the caller to separately * call isNull(). * * The only *conversion* taking place in this function is *combining* Datums * into a tuple. At this place, we do not have to worry any more about retaining * memory. * * @param inFnCallInfo The PostgreSQL FunctionCallInfo that was passed to the * UDF. For polymorphic functions or functions that return RECORD, the * function-call information (specifically, the expression parse tree) * is necessary to dynamically resolve type information. * @param inTargetTypeID PostgreSQL OID of the target type to convert to. If * omitted the target type is the return type of the function specified by * \c inFnCallInfo. * * @see getAsDatum(const FunctionCallInfo) */ inline Datum AnyType::getAsDatum(FunctionCallInfo inFnCallInfo, Oid inTargetTypeID) const { consistencyCheck(); // The default value to return in case of Null is 0. Note, however, that // 0 can also be a perfectly valid (non-null) Datum. It is the caller's // responsibility to call isNull() separately. if (isNull()) return 0; // Note: mSysInfo is NULL if this object was not an argument from the // backend. SystemInformation* sysInfo = SystemInformation::get(inFnCallInfo); FunctionInformation* funcInfo = sysInfo ->functionInformation(inFnCallInfo->flinfo->fn_oid); TupleDesc targetTupleDesc; if (inTargetTypeID == InvalidOid) { inTargetTypeID = funcInfo->getReturnType(inFnCallInfo); // If inTargetTypeID is \c RECORDOID, the tuple description needs to be // derived from the function call targetTupleDesc = funcInfo->getReturnTupleDesc(inFnCallInfo); } else { // If we are here, we should not see inTargetTypeID == RECORDOID because // that should only happen for the first non-recursive call of // getAsDatum where inTargetTypeID == InvalidOid by default. // If it would happen, then the following would return NULL and an // exception would be raised a few line below. So no need to add a check // here. targetTupleDesc = sysInfo->typeInformation(inTargetTypeID) ->getTupleDesc(); } bool targetIsComposite = targetTupleDesc != NULL; Datum returnValue = mDatum; if (targetIsComposite && !isComposite()) throw std::runtime_error("Invalid type conversion. " "Simple type supplied but backend expects composite type."); if (!targetIsComposite && isComposite()) throw std::runtime_error("Invalid type conversion. " "Composite type supplied but backend expects simple type."); if (targetIsComposite) { if (static_cast<size_t>(targetTupleDesc->natts) < mChildren.size()) throw std::runtime_error("Invalid type conversion. " "Internal composite type has more elements than backend " "composite type."); std::vector<Datum> values; std::vector<char> nulls; for (uint16_t pos = 0; pos < mChildren.size(); ++pos) { Oid targetTypeID = targetTupleDesc->attrs[pos]->atttypid; values.push_back(mChildren[pos].getAsDatum(inFnCallInfo, targetTypeID)); nulls.push_back(mChildren[pos].isNull()); } // All elements that have not been initialized will be set to Null for (uint16_t pos = mChildren.size(); pos < static_cast<size_t>(targetTupleDesc->natts); ++pos) { values.push_back(Datum(0)); nulls.push_back(true); } HeapTuple heapTuple = madlib_heap_form_tuple(targetTupleDesc, &values[0], reinterpret_cast<bool*>(&nulls[0])); // BACKEND: HeapTupleGetDatum is a macro that will not cause an // exception returnValue = HeapTupleGetDatum(heapTuple); } else { /* if (!targetIsComposite) */ if (mTypeID != InvalidOid && inTargetTypeID != mTypeID) { std::stringstream errorMsg; errorMsg << "Invalid type conversion. " "Backend expects type ID " << inTargetTypeID << " ('" << sysInfo->typeInformation(inTargetTypeID)->getName() << "') " "but supplied type ID is " << mTypeID << + " ('" << sysInfo->typeInformation(mTypeID)->getName() << "')."; throw std::invalid_argument(errorMsg.str()); } if (mTypeName && std::strncmp(mTypeName, sysInfo->typeInformation(inTargetTypeID)->getName(), NAMEDATALEN)) { std::stringstream errorMsg; errorMsg << "Invalid type conversion. Backend expects type '" << sysInfo->typeInformation(inTargetTypeID)->getName() << "' (ID " << inTargetTypeID << ") but internal type name is '" << mTypeName << "'."; throw std::invalid_argument(errorMsg.str()); } } return returnValue; }
/** * @brief Return the n-th element from a composite value * * To the user, AnyType is a fully recursive type: Each AnyType object can be a * composite object and be composed of a number of other AnyType objects. * Function written using the C++ abstraction layer have a single logical * argument of type AnyType. */ inline AnyType AnyType::operator[](uint16_t inID) const { consistencyCheck(); if (isNull()) { // Handle case mContent == NULL throw std::invalid_argument("Invalid type conversion. " "Null where not expected."); } if (!isComposite()) { // Handle case mContent == Scalar throw std::invalid_argument("Invalid type conversion. " "Composite type where not expected."); } if (mContent == ReturnComposite) return mChildren[inID]; // It holds now that mContent is either FunctionComposite or NativeComposite // In this case, it is guaranteed that fcinfo != NULL Oid typeID = 0; bool isMutable = false; Datum datum = 0; if (mContent == FunctionComposite) { // This AnyType object represents to composite value consisting of all // function arguments if (inID >= size_t(PG_NARGS())) throw std::out_of_range("Invalid type conversion. Access behind " "end of argument list."); if (PG_ARGISNULL(inID)) return AnyType(); typeID = mSysInfo->functionInformation(fcinfo->flinfo->fn_oid) ->getArgumentType(inID, fcinfo->flinfo); if (inID == 0) { // If we are called as an aggregate function, the first argument is // the transition state. In that case, we are free to modify the // data. In fact, for performance reasons, we *should* even do all // modifications in-place. In all other cases, directly modifying // memory is dangerous. // See warning at: // http://www.postgresql.org/docs/current/static/xfunc-c.html#XFUNC-C-BASETYPE // BACKEND: AggCheckCallContext currently will never raise an // exception isMutable = AggCheckCallContext(fcinfo, NULL); } datum = PG_GETARG_DATUM(inID); } else { /* if (mContent == NativeComposite) */ // This AnyType objects represents a tuple that was passed from the // backend TupleDesc tupdesc = mSysInfo ->typeInformation(HeapTupleHeaderGetTypeId(mTupleHeader)) ->getTupleDesc(HeapTupleHeaderGetTypMod(mTupleHeader)); if (inID >= tupdesc->natts) throw std::out_of_range("Invalid type conversion. Access behind " "end of composite object."); typeID = tupdesc->attrs[inID]->atttypid; bool isNull = false; datum = madlib_GetAttributeByNum(mTupleHeader, inID, &isNull); if (isNull) return AnyType(); } if (typeID == InvalidOid) throw std::invalid_argument("Backend returned invalid type ID."); return mSysInfo->typeInformation(typeID)->isCompositeType() ? AnyType(mSysInfo, madlib_DatumGetHeapTupleHeader(datum), datum, typeID) : AnyType(mSysInfo, datum, typeID, isMutable); }
/* ---------------------------------------------------------------------------------------------------- *\ isIncKeepLastReachability(): If the last result is unsat, put the inductive invariant into the last frame. isIncContinueOnLastSolver(): Reset the solver. \* ---------------------------------------------------------------------------------------------------- */ void V3VrfMPDR::startVerify(const uint32_t& p) { vrfRestart: // Check Shared Results if (_sharedBound && V3NtkUD == _sharedBound->getBound(p)) return; // Clear Verification Results clearResult(p); if (profileON()) _totalStat->start(); // Consistency Check consistencyCheck(); assert (!_constr.size()); if (!reportUnsupportedInitialState()) return; // Initialize Backup Frames for (uint32_t i = 0; i < _pdrBackup.size(); ++i) delete _pdrBackup[i]; _pdrBackup.clear(); if (_pdrFrame.size()) { if (isIncKeepLastReachability()) { // Backup frames in the order: ..., 2, 1, INF assert (_pdrFrame.size() > 1); _pdrBackup.reserve(_pdrFrame.size() - 1); for (uint32_t i = _pdrFrame.size() - 2; i > 0; --i) _pdrBackup.push_back(_pdrFrame[i]); _pdrBackup.push_back(_pdrFrame.back()); delete _pdrFrame[0]; } else { for (uint32_t i = 0; i < _pdrFrame.size(); ++i) delete _pdrFrame[i]; } _pdrFrame.clear(); } // Initialize Other Members if (!isIncKeepLastReachability()) _pdrPriority.clear(); _pdrActCount = 0; if (_pdrBad) delete _pdrBad; _pdrBad = 0; if (_pdrGen) delete _pdrGen; _pdrGen = 0; if (dynamic_cast<V3BvNtk*>(_vrfNtk)) { _pdrGen = new V3AlgBvGeneralize(_handler); assert (_pdrGen); _pdrSim = dynamic_cast<V3AlgBvSimulate*>(_pdrGen); assert (_pdrSim); } else { _pdrGen = new V3AlgAigGeneralize(_handler); assert (_pdrGen); _pdrSim = dynamic_cast<V3AlgAigSimulate*>(_pdrGen); assert (_pdrSim); } V3NetVec simTargets(1, _vrfNtk->getOutput(p)); _pdrSim->reset(simTargets); // Initialize Pattern Input Size assert (p < _result.size()); assert (p < _vrfNtk->getOutputSize()); const V3NetId& pId = _vrfNtk->getOutput(p); assert (V3NetUD != pId); _pdrSize = _vrfNtk->getInputSize() + _vrfNtk->getInoutSize(); // Initialize Parameters const string flushSpace = string(100, ' '); uint32_t proved = V3NtkUD, fired = V3NtkUD; struct timeval inittime, curtime; gettimeofday(&inittime, NULL); // Initialize Signal Priority List if (_pdrPriority.size() != _vrfNtk->getLatchSize()) _pdrPriority.resize(_vrfNtk->getLatchSize(), 0); // Initialize Solver if (_pdrSvr && !isIncContinueOnLastSolver()) { delete _pdrSvr; _pdrSvr = 0; } initializeSolver(); // Initialize Bad Cube _pdrBad = new V3MPDRCube(0); assert (_pdrBad); _pdrBad->setState(V3NetVec(1, pId)); // Initialize Frame 0 if (_vrfNtk->getLatchSize()) _pdrFrame.push_back(new V3MPDRFrame(_pdrSvr->setImplyInit())); // R0 = I0 else _pdrFrame.push_back(new V3MPDRFrame(_pdrSvr->reserveFormula())); assert (_pdrFrame.back()->getActivator()); assert (_pdrFrame.size() == 1); // Initialize Frame INF if (_pdrBackup.size()) { _pdrFrame.push_back(_pdrBackup.back()); _pdrBackup.pop_back(); addFrameInfoToSolver(1); } else _pdrFrame.push_back(new V3MPDRFrame(_pdrSvr->reserveFormula())); assert (_pdrFrame.back()->getActivator()); assert (_pdrFrame.size() == 2); // Check Shared Invariants if (_sharedInv) { V3NetTable sharedInv; _sharedInv->getInv(sharedInv); for (uint32_t i = 0; i < sharedInv.size(); ++i) { V3MPDRCube* const inv = new V3MPDRCube(0); assert (inv); inv->setState(sharedInv[i]); addBlockedCube(make_pair(getPDRFrame(), inv)); } } // Continue on the Last Depth while (_pdrBackup.size() && (getIncLastDepthToKeepGoing() > getPDRFrame())) { _pdrFrame.push_back(_pdrFrame.back()); // Keep frame INF the last frame _pdrFrame[_pdrFrame.size() - 2] = _pdrBackup.back(); _pdrBackup.pop_back(); addFrameInfoToSolver(_pdrFrame.size() - 2); } // Start PDR Based Verification V3MPDRCube* badCube = 0; while (true) { // Check Time Bounds gettimeofday(&curtime, NULL); if (_maxTime < getTimeUsed(inittime, curtime)) break; // Check Shared Results if (_sharedBound && (V3NtkUD == _sharedBound->getBound(p))) break; // Check Shared Networks if (_sharedNtk) { V3NtkHandler* const sharedNtk = _sharedNtk->getNtk(_handler); if (sharedNtk) { setIncKeepLastReachability(true); setIncContinueOnLastSolver(false); setIncLastDepthToKeepGoing(getPDRDepth()); _handler = sharedNtk; _vrfNtk = sharedNtk->getNtk(); goto vrfRestart; } } // Find a Bad Cube as Initial Proof Obligation badCube = getInitialObligation(); // SAT(R ^ T ^ !p) if (!badCube) { if (!isIncKeepSilent() && intactON()) { if (!endLineON()) Msg(MSG_IFO) << "\r" + flushSpace + "\r"; Msg(MSG_IFO) << setw(3) << left << getPDRDepth() << " :"; const uint32_t j = (_pdrFrame.size() > 25) ? _pdrFrame.size() - 25 : 0; if (j) Msg(MSG_IFO) << " ..."; for (uint32_t i = j; i < _pdrFrame.size(); ++i) Msg(MSG_IFO) << " " << _pdrFrame[i]->getCubeList().size(); if (svrInfoON()) { Msg(MSG_IFO) << " ("; _pdrSvr->printInfo(); Msg(MSG_IFO) << ")"; } Msg(MSG_IFO) << endl; // Always Endline At the End of Each Frame } if (_sharedBound) _sharedBound->updateBound(p, getPDRFrame()); // Push New Frame _pdrFrame.push_back(_pdrFrame.back()); // Renders F Infinity to be the last in _pdrFrame if (_pdrBackup.size()) { _pdrFrame[_pdrFrame.size() - 2] = _pdrBackup.back(); _pdrBackup.pop_back(); addFrameInfoToSolver(_pdrFrame.size() - 2); } else _pdrFrame[_pdrFrame.size() - 2] = new V3MPDRFrame(_pdrSvr->reserveFormula()); // New Frame if (propagateCubes()) { proved = getPDRDepth(); break; } if (_maxDepth <= (getPDRFrame() - 1)) break; } else { badCube = recursiveBlockCube(badCube); if (badCube) { fired = getPDRDepth(); break; } // Interactively Show the Number of Bad Cubes in Frames if (!isIncKeepSilent() && intactON()) { if (!endLineON()) Msg(MSG_IFO) << "\r" + flushSpace + "\r"; Msg(MSG_IFO) << setw(3) << left << getPDRDepth() << " :"; const uint32_t j = (_pdrFrame.size() > 25) ? _pdrFrame.size() - 25 : 0; if (j) Msg(MSG_IFO) << " ..."; for (uint32_t i = j; i < _pdrFrame.size(); ++i) Msg(MSG_IFO) << " " << _pdrFrame[i]->getCubeList().size(); if (svrInfoON()) { Msg(MSG_IFO) << " ("; _pdrSvr->printInfo(); Msg(MSG_IFO) << ")"; } if (endLineON()) Msg(MSG_IFO) << endl; else Msg(MSG_IFO) << flush; } } } // Report Verification Result if (!isIncKeepSilent() && reportON()) { if (intactON()) { if (endLineON()) Msg(MSG_IFO) << endl; else Msg(MSG_IFO) << "\r" << flushSpace << "\r"; } if (V3NtkUD != proved) Msg(MSG_IFO) << "Inductive Invariant found at depth = " << ++proved; else if (V3NtkUD != fired) Msg(MSG_IFO) << "Counter-example found at depth = " << ++fired; else Msg(MSG_IFO) << "UNDECIDED at depth = " << _maxDepth; if (usageON()) { gettimeofday(&curtime, NULL); Msg(MSG_IFO) << " (time = " << setprecision(5) << getTimeUsed(inittime, curtime) << " sec)" << endl; } if (profileON()) { _totalStat->end(); Msg(MSG_IFO) << *_initSvrStat << endl; Msg(MSG_IFO) << *_solveStat << endl; Msg(MSG_IFO) << *_generalStat << endl; Msg(MSG_IFO) << *_propagateStat << endl; Msg(MSG_IFO) << *_ternaryStat << endl; Msg(MSG_IFO) << *_totalStat << endl; } } // Record CounterExample Trace or Invariant if (V3NtkUD != fired) { // Record Counter-Example // Compute PatternCount const V3MPDRCube* traceCube = badCube; assert (traceCube); assert (existInitial(traceCube->getState())); uint32_t patternCount = 0; while (_pdrBad != traceCube) { traceCube = traceCube->getNextCube(); ++patternCount; } V3CexTrace* const cex = new V3CexTrace(patternCount); assert (cex); _result[p].setCexTrace(cex); assert (_result[p].isCex()); // Set Pattern Value traceCube = badCube; assert (traceCube); assert (existInitial(traceCube->getState())); while (_pdrBad != traceCube) { if (_pdrSize) cex->pushData(traceCube->getInputData()); traceCube = traceCube->getNextCube(); assert (traceCube); } // Set Initial State Value if (_pdrInitValue.size()) { V3BitVecX initValue(_pdrInitValue.size()); for (uint32_t i = 0; i < badCube->getState().size(); ++i) { assert (initValue.size() > badCube->getState()[i].id); if (badCube->getState()[i].cp) initValue.set0(badCube->getState()[i].id); else initValue.set1(badCube->getState()[i].id); } for (uint32_t i = 0; i < _pdrInitValue.size(); ++i) if (_pdrInitConst[i]) { if (_pdrInitValue[i]) initValue.set0(i); else initValue.set1(i); } cex->setInit(initValue); } // Delete Cubes on the Trace const V3MPDRCube* lastCube; traceCube = badCube; while (_pdrBad != traceCube) { lastCube = traceCube->getNextCube(); delete traceCube; traceCube = lastCube; } // Check Common Results if (isIncVerifyUsingCurResult()) checkCommonCounterexample(p, *cex); } else if (V3NtkUD != proved) { // Record Inductive Invariant _result[p].setIndInv(_vrfNtk); assert (_result[p].isInv()); // Put the Inductive Invariant to Frame INF uint32_t f = 1; for (; f < getPDRDepth(); ++f) if (!_pdrFrame[f]->getCubeList().size()) break; assert (f < getPDRDepth()); for (uint32_t i = 1 + f; i < getPDRFrame(); ++i) { const V3MPDRCubeList& cubeList = _pdrFrame[i]->getCubeList(); V3MPDRCubeList::const_iterator it; for (it = cubeList.begin(); it != cubeList.end(); ++it) addBlockedCube(make_pair(getPDRFrame(), *it)); _pdrFrame[i]->clearCubeList(); delete _pdrFrame[i]; } // Remove Empty Frames _pdrFrame.back()->removeSelfSubsumed(); _pdrFrame[f] = _pdrFrame.back(); while ((1 + f) != _pdrFrame.size()) _pdrFrame.pop_back(); // Check Common Results if (isIncVerifyUsingCurResult()) { const V3MPDRCubeList& invCubeList = _pdrFrame.back()->getCubeList(); V3NetTable invList; invList.clear(); invList.reserve(invCubeList.size()); for (V3MPDRCubeList::const_iterator it = invCubeList.begin(); it != invCubeList.end(); ++it) invList.push_back((*it)->getState()); checkCommonProof(p, invList, false); } } }
/** * @brief Return a PostgreSQL Datum representing the current object * * The only *conversion* taking place in this function is *combining* Datums * into a tuple. At this place, we do not have to worry any more about retaining * memory. * * @param inTargetTypeID PostgreSQL OID of the target type to convert to * @param inTargetIsComposite Whether the target type is composite. * \c indeterminate if unknown. * @param inTargetTupleDesc If target type is known to be composite, then * (optionally) the PostgreSQL TupleDesc. NULL is always a valid argument. * * @see getAsDatum(const FunctionCallInfo) */ inline Datum AbstractionLayer::AnyType::getAsDatum(Oid inTargetTypeID, boost::tribool inTargetIsComposite, TupleDesc inTargetTupleDesc) const { consistencyCheck(); // The default value to return in case of Null is 0. Note, however, that // 0 can also be a perfectly valid (non-null) Datum. It is the caller's // responsibility to call isNull() separately. if (isNull()) return 0; try { bool exceptionOccurred = false; TupleHandle tupleHandle(inTargetTupleDesc); if (boost::indeterminate(inTargetIsComposite)) { inTargetIsComposite = isRowTypeInCache(inTargetTypeID); backendGetIsCompositeTypeAndTupleHandle(inTargetTypeID, inTargetIsComposite, tupleHandle); } if (inTargetIsComposite && !isComposite()) throw std::runtime_error("Invalid type conversion requested. " "Simple type supplied but PostgreSQL expects composite type."); if (!inTargetIsComposite && isComposite()) throw std::runtime_error("Invalid type conversion requested. " "Composite type supplied but PostgreSQL expects simple type."); madlib_assert(inTargetIsComposite == (tupleHandle.desc != NULL), MADLIB_DEFAULT_EXCEPTION); if (inTargetIsComposite) { if (static_cast<size_t>(tupleHandle.desc->natts) < mChildren.size()) throw std::runtime_error("Invalid type conversion requested. " "Internal composite type has more elements than PostgreSQL " "composite type."); std::vector<Datum> values; std::vector<char> nulls; for (uint16_t pos = 0; pos < mChildren.size(); ++pos) { Oid targetTypeID = tupleHandle.desc->attrs[pos]->atttypid; values.push_back(mChildren[pos].getAsDatum(targetTypeID)); nulls.push_back(mChildren[pos].isNull()); } // All elements that have not been initialized will be set to Null for (uint16_t pos = mChildren.size(); pos < static_cast<size_t>(tupleHandle.desc->natts); ++pos) { values.push_back(Datum(0)); nulls.push_back(true); } Datum returnValue; PG_TRY(); { HeapTuple heapTuple = heap_form_tuple(tupleHandle.desc, &values[0], reinterpret_cast<bool*>(&nulls[0])); returnValue = HeapTupleGetDatum(heapTuple); } PG_CATCH(); { exceptionOccurred = true; } PG_END_TRY(); if (exceptionOccurred) throw PGException(); return returnValue; } } catch (PGException &e) { throw std::invalid_argument("An exception occurred while " "gathering inormation about the PostgreSQL return type."); } if (inTargetTypeID != mTypeID) throw std::invalid_argument("Invalid type conversion requested. " "C++ type and PostgreSQL return type do not match."); return mDatum; }