// Compute a range A-B and add it to the list.
void HexagonBlockRanges::RangeList::addsub(const IndexRange &A,
      const IndexRange &B) {
  // Exclusion of non-overlapping ranges makes some checks simpler
  // later in this function.
  if (!A.overlaps(B)) {
    // A - B = A.
    add(A);
    return;
  }

  IndexType AS = A.start(), AE = A.end();
  IndexType BS = B.start(), BE = B.end();

  // If AE is None, then A is included in B, since A and B overlap.
  // The result of subtraction if empty, so just return.
  if (AE == IndexType::None)
    return;

  if (AS < BS) {
    // A starts before B.
    // AE cannot be None since A and B overlap.
    assert(AE != IndexType::None);
    // Add the part of A that extends on the "less" side of B.
    add(AS, BS, A.Fixed, false);
  }

  if (BE < AE) {
    // BE cannot be Exit here.
    if (BE == IndexType::None)
      add(BS, AE, A.Fixed, false);
    else
      add(BE, AE, A.Fixed, false);
  }
}
bool HexagonBlockRanges::IndexRange::contains(const IndexRange &A) const {
  if (start() <= A.start()) {
    // Treat "None" in the range end as equal to the range start.
    IndexType E = (end() != IndexType::None) ? end() : start();
    IndexType AE = (A.end() != IndexType::None) ? A.end() : A.start();
    if (AE <= E)
      return true;
  }
  return false;
}
bool HexagonBlockRanges::IndexRange::overlaps(const IndexRange &A) const {
  // If A contains start(), or "this" contains A.start(), then overlap.
  IndexType S = start(), E = end(), AS = A.start(), AE = A.end();
  if (AS == S)
    return true;
  bool SbAE = (S < AE) || (S == AE && A.TiedEnd);  // S-before-AE.
  bool ASbE = (AS < E) || (AS == E && TiedEnd);    // AS-before-E.
  if ((AS < S && SbAE) || (S < AS && ASbE))
    return true;
  // Otherwise no overlap.
  return false;
}
Пример #4
0
		bool operator < ( const Structure& o) const
		{
			if (structname < o.structname) return true;
			if (structname > o.structname) return false;
			if (source.end() < o.source.end()) return true;
			if (source.end() > o.source.end()) return false;
			if (sink.end() < o.sink.end()) return true;
			if (sink.end() > o.sink.end()) return false;
			if (source.start() < o.source.start()) return true;
			if (source.start() > o.source.start()) return false;
			return (sink.start() < o.sink.start());
		}
Пример #5
0
PrimitiveRange::PrimitiveRange(PrimitiveType type,
                               VertexBuffer& vertexBuffer,
                               const IndexRange& indexRange,
                               size_t base):
  m_type(type),
  m_vertexBuffer(&vertexBuffer),
  m_indexBuffer(nullptr),
  m_start(0),
  m_count(0),
  m_base(base)
{
  m_indexBuffer = indexRange.indexBuffer();
  m_start = indexRange.start();
  m_count = indexRange.count();
}
void HexagonBlockRanges::IndexRange::merge(const IndexRange &A) {
  // Allow merging adjacent ranges.
  assert(end() == A.start() || overlaps(A));
  IndexType AS = A.start(), AE = A.end();
  if (AS < start() || start() == IndexType::None)
    setStart(AS);
  if (end() < AE || end() == IndexType::None) {
    setEnd(AE);
    TiedEnd = A.TiedEnd;
  } else {
    if (end() == AE)
      TiedEnd |= A.TiedEnd;
  }
  if (A.Fixed)
    Fixed = true;
}
Пример #7
0
IndexRange reverse(IndexRange arg) {
    if (arg.empty()) {
        return arg;
    } else {
        if (arg.start() < arg.end()) {
            return IndexRange::between(arg.end() - 1, arg.start() - 1);
        } else {
            return IndexRange::between(arg.end() + 1, arg.start() + 1);
        }
    }
}
Пример #8
0
IndexRange span(IndexRange lhs, IndexRange rhs) {
    if (lhs.start() <= lhs.end()) {
        if (rhs.start() <= rhs.end()) {
            const SINT start = std::min(lhs.start(), rhs.start());
            const SINT end = std::max(lhs.end(), rhs.end());
            DEBUG_ASSERT(start <= end);
            return IndexRange::between(start, end);
        } else {
            DEBUG_ASSERT(!"Cannot span index ranges with contrary orientations");
        }
    } else {
        if (rhs.start() >= rhs.end()) {
            const SINT start = std::max(lhs.start(), rhs.start());
            const SINT end = std::min(lhs.end(), rhs.end());
            DEBUG_ASSERT(start >= end);
            return IndexRange::between(start, end);
        } else {
            DEBUG_ASSERT(!"Cannot span index ranges with contrary orientations");
        }
    }
    return IndexRange();
}
void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
    // Count what there is to draw.
    BatchList::Iter iter;
    iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
    int numGLInstances = 0;
    int numGLDrawCmds = 0;
    while (Batch* b = iter.get()) {
        GLBatch* batch = static_cast<GLBatch*>(b);
        iter.next();

        numGLInstances += batch->fNumDraws;
        numGLDrawCmds += batch->numGLCommands();
    }
    if (!numGLDrawCmds) {
        return;
    }
    SkASSERT(numGLInstances);

    // Lazily create a vertex array object.
    if (!fVertexArrayID) {
        GL_CALL(GenVertexArrays(1, &fVertexArrayID));
        if (!fVertexArrayID) {
            return;
        }
        this->glGpu()->bindVertexArray(fVertexArrayID);

        // Attach our index buffer to the vertex array.
        SkASSERT(!this->indexBuffer()->isCPUBacked());
        GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
                           static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));

        // Set up the non-instanced attribs.
        this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
        GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
        GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
                                    sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
        GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
        GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
                                     (void*) offsetof(ShapeVertex, fAttrs)));

        SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
    }

    // Create and map instance and draw-indirect buffers.
    SkASSERT(!fInstanceBuffer);
    fInstanceBuffer.reset(
        rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
                         kDynamic_GrAccessPattern,
                         GrResourceProvider::kNoPendingIO_Flag |
                         GrResourceProvider::kRequireGpuMemory_Flag));
    if (!fInstanceBuffer) {
        return;
    }

    SkASSERT(!fDrawIndirectBuffer);
    fDrawIndirectBuffer.reset(
        rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
                         kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
                         GrResourceProvider::kNoPendingIO_Flag |
                         GrResourceProvider::kRequireGpuMemory_Flag));
    if (!fDrawIndirectBuffer) {
        return;
    }

    Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
    int glInstancesIdx = 0;

    auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
    int glDrawCmdsIdx = 0;

    bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();

    if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
        fGLDrawCmdsInfo.reset(numGLDrawCmds);
    }

    // Generate the instance and draw-indirect buffer contents based on the tracked batches.
    iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
    while (Batch* b = iter.get()) {
        GLBatch* batch = static_cast<GLBatch*>(b);
        iter.next();

        batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
        batch->fGLDrawCmdsIdx = glDrawCmdsIdx;

        const Batch::Draw* draw = batch->fHeadDraw;
        SkASSERT(draw);
        do {
            int instanceCount = 0;
            IndexRange geometry = draw->fGeometry;
            SkASSERT(!geometry.isEmpty());

            do {
                glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
                draw = draw->fNext;
            } while (draw && draw->fGeometry == geometry);

            GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
            glCmd.fCount = geometry.fCount;
            glCmd.fInstanceCount = instanceCount;
            glCmd.fFirstIndex = geometry.fStart;
            glCmd.fBaseVertex = 0;
            glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;

            if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
                fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount;
#if GR_GL_LOG_INSTANCED_BATCHES
                fGLDrawCmdsInfo[glDrawCmdsIdx].fGeometry = geometry;
#endif
            }

            glInstancesIdx += instanceCount;
            ++glDrawCmdsIdx;
        } while (draw);
    }

    SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
    fDrawIndirectBuffer->unmap();

    SkASSERT(glInstancesIdx == numGLInstances);
    fInstanceBuffer->unmap();
}
void
PredictorMfe2dHeuristic::
predict( const IndexRange & r1
		, const IndexRange & r2
		, const OutputConstraint & outConstraint
		)
{
#if INTARNA_MULITHREADING
	#pragma omp critical(intarna_omp_logOutput)
#endif
	{ VLOG(2) <<"predicting mfe interactions heuristically in O(n^2) space and time..."; }
	// measure timing
	TIMED_FUNC_IF(timerObj,VLOG_IS_ON(9));

#if INTARNA_IN_DEBUG_MODE
	// check indices
	if (!(r1.isAscending() && r2.isAscending()) )
		throw std::runtime_error("PredictorMfe2dHeuristic::predict("+toString(r1)+","+toString(r2)+") is not sane");
#endif


	// set index offset
	energy.setOffset1(r1.from);
	energy.setOffset2(r2.from);

	// resize matrix
	hybridE.resize( std::min( energy.size1()
						, (r1.to==RnaSequence::lastPos?energy.size1()-1:r1.to)-r1.from+1 )
				, std::min( energy.size2()
						, (r2.to==RnaSequence::lastPos?energy.size2()-1:r2.to)-r2.from+1 ) );

	// temp vars
	size_t i1,i2,w1,w2;

	// init matrix
	bool isValidCell = true;
	for (i1=0; i1<hybridE.size1(); i1++) {
	for (i2=0; i2<hybridE.size2(); i2++) {

		// check if positions can form interaction
		if (	energy.isAccessible1(i1)
				&& energy.isAccessible2(i2)
				&& energy.areComplementary(i1,i2) )
		{
			// set to interaction initiation with according boundary
			hybridE(i1,i2) = BestInteraction(energy.getE_init(), i1, i2);
		} else {
			// set to infinity, ie not used
			hybridE(i1,i2) = BestInteraction(E_INF, RnaSequence::lastPos, RnaSequence::lastPos);
		}

	} // i2
	} // i1

	// init mfe for later updates
	initOptima( outConstraint );

	// compute table and update mfeInteraction
	fillHybridE();

	// trace back and output handler update
	reportOptima( outConstraint );

}