示例#1
0
  size_t HttpResponseParser::curl_headerfunction( void *ptr, size_t size,
                                                  size_t nmemb, void *data )
  {
    size_t lSize = size*nmemb;
    size_t lResult = lSize;
    HttpResponseParser* lParser = static_cast<HttpResponseParser*>(data);
    if (lParser->theInsideRead) {
      lParser->theHandler.endBody();
      lParser->theInsideRead = false;
    }
    const char* lDataChar = (const char*) ptr;
    while (lSize != 0 && (lDataChar[lSize - 1] == 10
          || lDataChar[lSize - 1] == 13)) {
      lSize--;
    }
    if (lSize == 0) {
      return lResult;
    }
    std::string lData(lDataChar, lSize);

    if (lData.find("HTTP") == 0) {
      lParser->parseStatusAndMessage(lData);
      return lResult;
    }
    std::string::size_type lPos = lData.find(':');
    if (lPos == std::string::npos) {
      return lResult;
    }
    std::string lName = lData.substr(0, lPos);
    std::string lValue = lData.substr(lPos + 2);
    {
      std::string::size_type lPosition = lValue.size() - 1;
      while (true) {
        if (lPosition != std::string::npos) {
          break;
        }
        if (lValue[lPosition] == '\n' || lValue[lPosition] == '\r') {
          --lPosition;
        } else {
          break;
        }
      }
      lValue = lValue.substr(0, lPosition + 1);
    }
    String lNameS = fn::lower_case( lName );
    if (lNameS == "content-type") {
      parse_content_type(
        lValue, &lParser->theCurrentContentType, &lParser->theCurrentCharset,
        &lParser->theIsMultipart, &lParser->theBoundary
      );
    } else if (lNameS == "content-id") {
      lParser->theId = lValue;
    } else if (lNameS == "content-description") {
      lParser->theDescription = lValue;
    }
    lParser->theHeaders.push_back(
      std::pair<std::string, std::string>(lName, lValue));
    return lResult;
  }
  void RepartitionFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node>::
  DeterminePartitionPlacement(const Matrix& A, GOVector& decomposition, GO numPartitions) const {
    RCP<const Map> rowMap = A.getRowMap();

    RCP<const Teuchos::Comm<int> > comm = rowMap->getComm()->duplicate();
    int numProcs = comm->getSize();

    RCP<const Teuchos::MpiComm<int> > tmpic = rcp_dynamic_cast<const Teuchos::MpiComm<int> >(comm);
    TEUCHOS_TEST_FOR_EXCEPTION(tmpic == Teuchos::null, Exceptions::RuntimeError, "Cannot cast base Teuchos::Comm to Teuchos::MpiComm object.");
    RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > rawMpiComm = tmpic->getRawMpiComm();

    const Teuchos::ParameterList& pL = GetParameterList();

    // maxLocal is a constant which determins the number of largest edges which are being exchanged
    // The idea is that we do not want to construct the full bipartite graph, but simply a subset of
    // it, which requires less communication. By selecting largest local edges we hope to achieve
    // similar results but at a lower cost.
    const int maxLocal = pL.get<int>("repartition: remap num values");
    const int dataSize = 2*maxLocal;

    ArrayRCP<GO> decompEntries;
    if (decomposition.getLocalLength() > 0)
      decompEntries = decomposition.getDataNonConst(0);

    // Step 1: Sort local edges by weight
    // Each edge of a bipartite graph corresponds to a triplet (i, j, v) where
    //   i: processor id that has some piece of part with part_id = j
    //   j: part id
    //   v: weight of the edge
    // We set edge weights to be the total number of nonzeros in rows on this processor which
    // correspond to this part_id. The idea is that when we redistribute matrix, this weight
    // is a good approximation of the amount of data to move.
    // We use two maps, original which maps a partition id of an edge to the corresponding weight,
    // and a reverse one, which is necessary to sort by edges.
    std::map<GO,GO> lEdges;
    for (LO i = 0; i < decompEntries.size(); i++)
      lEdges[decompEntries[i]] += A.getNumEntriesInLocalRow(i);

    // Reverse map, so that edges are sorted by weight.
    // This results in multimap, as we may have edges with the same weight
    std::multimap<GO,GO> revlEdges;
    for (typename std::map<GO,GO>::const_iterator it = lEdges.begin(); it != lEdges.end(); it++)
      revlEdges.insert(std::make_pair(it->second, it->first));

    // Both lData and gData are arrays of data which we communicate. The data is stored
    // in pairs, so that data[2*i+0] is the part index, and data[2*i+1] is the corresponding edge weight.
    // We do not store processor id in data, as we can compute that by looking on the offset in the gData.
    Array<GO> lData(dataSize, -1), gData(numProcs * dataSize);
    int numEdges = 0;
    for (typename std::multimap<GO,GO>::reverse_iterator rit = revlEdges.rbegin(); rit != revlEdges.rend() && numEdges < maxLocal; rit++) {
      lData[2*numEdges+0] = rit->second; // part id
      lData[2*numEdges+1] = rit->first;  // edge weight
      numEdges++;
    }

    // Step 2: Gather most edges
    // Each processors contributes maxLocal edges by providing maxLocal pairs <part id, weight>, which is of size dataSize
    MPI_Datatype MpiType = MpiTypeTraits<GO>::getType();
    MPI_Allgather(static_cast<void*>(lData.getRawPtr()), dataSize, MpiType, static_cast<void*>(gData.getRawPtr()), dataSize, MpiType, *rawMpiComm);

    // Step 3: Construct mapping

    // Construct the set of triplets
    std::vector<Triplet<int,int> > gEdges(numProcs * maxLocal);
    size_t k = 0;
    for (LO i = 0; i < gData.size(); i += 2) {
      GO part   = gData[i+0];
      GO weight = gData[i+1];
      if (part != -1) {                     // skip nonexistent edges
        gEdges[k].i = i/dataSize;           // determine the processor by its offset (since every processor sends the same amount)
        gEdges[k].j = part;
        gEdges[k].v = weight;
        k++;
      }
    }
    gEdges.resize(k);

    // Sort edges by weight
    // NOTE: compareTriplets is actually a reverse sort, so the edges weight is in decreasing order
    std::sort(gEdges.begin(), gEdges.end(), compareTriplets<int,int>);

    // Do matching
    std::map<int,int> match;
    std::vector<char> matchedRanks(numProcs, 0), matchedParts(numProcs, 0);
    int numMatched = 0;
    for (typename std::vector<Triplet<int,int> >::const_iterator it = gEdges.begin(); it != gEdges.end(); it++) {
      GO rank = it->i;
      GO part = it->j;
      if (matchedRanks[rank] == 0 && matchedParts[part] == 0) {
        matchedRanks[rank] = 1;
        matchedParts[part] = 1;
        match[part] = rank;
        numMatched++;
      }
    }
    GetOStream(Statistics0) << "Number of unassigned paritions before cleanup stage: " << (numPartitions - numMatched) << " / " << numPartitions << std::endl;

    // Step 4: Assign unassigned partitions
    // We do that through random matching for remaining partitions. Not all part numbers are valid, but valid parts are a subset of [0, numProcs).
    // The reason it is done this way is that we don't need any extra communication, as we don't need to know which parts are valid.
    for (int part = 0, matcher = 0; part < numProcs; part++)
      if (match.count(part) == 0) {
        // Find first non-matched rank
        while (matchedRanks[matcher])
          matcher++;

        match[part] = matcher++;
      }

    // Step 5: Permute entries in the decomposition vector
    for (LO i = 0; i < decompEntries.size(); i++)
      decompEntries[i] = match[decompEntries[i]];
  }
void
chtRcThermalDiffusivityResistanceFvPatchScalarField::calcThermalDiffusivity
(
    chtRegionCoupleBase& owner,
    const chtRegionCoupleBase& neighbour
) const
{
    const fvPatch& p = owner.patch();
    const fvMesh& mesh = p.boundaryMesh().mesh();
    const magLongDelta& mld = magLongDelta::New(mesh);

    const chtRcTemperatureFvPatchScalarField& TwOwn =
        dynamic_cast<const chtRcTemperatureFvPatchScalarField&>
        (
            p.lookupPatchField<volScalarField, scalar>("T")
        );
    scalarField& k = owner;
    const scalarField& fOwn = owner.originalPatchField();
    const scalarField TcOwn = TwOwn.patchInternalField();

    scalarField fNei(p.size());
    scalarField TcNei(p.size());
    scalarField TwNei(p.size());

    scalarField QrOwn(p.size(), 0.0);
    scalarField fourQroOwn(p.size(), 0.0);
    scalarField fourQroNei(p.size(), 0.0);

    if (TwOwn.radiation())
    {
        QrOwn += p.lookupPatchField<volScalarField, scalar>("Qr");
        fourQroOwn += 4.0*radiation::sigmaSB.value()*pow4(TwOwn);
    }

    scalarField Qr = QrOwn;
    scalarField cond(p.size());

    {
        Field<VectorN<scalar, 5> > lData(neighbour.size());

        const scalarField& lfNei = neighbour.originalPatchField();
        scalarField lTcNei = TwOwn.shadowPatchField().patchInternalField();
        const scalarField& lTwNei = TwOwn.shadowPatchField();

        forAll(lData, facei)
        {
            lData[facei][0] = lTcNei[facei];
            lData[facei][1] = lfNei[facei];
            lData[facei][2] = lTwNei[facei];
        }

        if(TwOwn.shadowPatchField().radiation())
        {
            const scalarField& lQrNei =
                owner.lookupShadowPatchField<volScalarField, scalar>("Qr");

            forAll(lData, facei)
            {
                lData[facei][3] = lQrNei[facei];
            }
        }

        if(isA<chtRcThermalDiffusivitySlaveFvPatchScalarField>(owner))
        {
            forAll(lData, facei)
            {
                lData[facei][4] = conductivity_[facei];
            }
        }

        const Field<VectorN<scalar, 5> > iData =
            owner.regionCouplePatch().interpolate(lData);

        forAll(iData, facei)
        {
            TcNei[facei] = iData[facei][0];
            fNei[facei] = iData[facei][1];
            TwNei[facei] = iData[facei][2];
        }
示例#4
0
文件: main.cpp 项目: Jackieee/qcad
int main() {
    DL_Dxf dxf;
    DL_WriterA* dw = dxf.out("hatch.dxf", DL_Codes::AC1015);
 
    // section header:
    dxf.writeHeader(*dw);
    dw->sectionEnd();
 
    // section tables:
    dw->sectionTables();

    // VPORT:
    dxf.writeVPort(*dw);

    // LTYPE:
    dw->tableLinetypes(1);
    dxf.writeLinetype(*dw, DL_LinetypeData("CONTINUOUS", "Continuous", 0, 0, 0.0));
    dxf.writeLinetype(*dw, DL_LinetypeData("BYLAYER", "", 0, 0, 0.0));
    dxf.writeLinetype(*dw, DL_LinetypeData("BYBLOCK", "", 0, 0, 0.0));
    dw->tableEnd();

    // LAYER:
    dw->tableLayers(1);
    dxf.writeLayer(
        *dw,
        DL_LayerData("0", 0),
        DL_Attributes("", 2, 0, 100, "CONTINUOUS")
    );
    dw->tableEnd();

    // STYLE:
    dw->tableStyle(1);
    DL_StyleData style("Standard", 0, 0.0, 1.0, 0.0, 0, 2.5, "txt", "");
    style.bold = false;
    style.italic = false;
    dxf.writeStyle(*dw, style);
    dw->tableEnd();

    // VIEW:
    dxf.writeView(*dw);

    // UCS:
    dxf.writeUcs(*dw);

    // APPID:
    dw->tableAppid(1);
    dxf.writeAppid(*dw, "ACAD");
    dw->tableEnd();

    // DIMSTYLE:
    dxf.writeDimStyle(*dw, 2.5, 0.625, 0.625, 0.625, 2.5);

    // BLOCK_RECORD:
    dxf.writeBlockRecord(*dw);
    dw->tableEnd();

    dw->sectionEnd();
 
    // BLOCK:
    dw->sectionBlocks();
    dxf.writeBlock(*dw, DL_BlockData("*Model_Space", 0, 0.0, 0.0, 0.0));
    dxf.writeEndBlock(*dw, "*Model_Space");
    dxf.writeBlock(*dw, DL_BlockData("*Paper_Space", 0, 0.0, 0.0, 0.0));
    dxf.writeEndBlock(*dw, "*Paper_Space");
    dxf.writeBlock(*dw, DL_BlockData("*Paper_Space0", 0, 0.0, 0.0, 0.0));
    dxf.writeEndBlock(*dw, "*Paper_Space0");
    dw->sectionEnd();
 
    // ENTITIES:
    dw->sectionEntities();
 
    DL_Attributes attributes("0", 2, 0, -1, "BYLAYER");
 
    // start hatch with one loop:
    DL_HatchData data(1, false, 100.0, 0.0, "ESCHER", 0.0, 0.0);
    dxf.writeHatch1(*dw, data, attributes);
 
    // start loop:
    DL_HatchLoopData lData(1);
    dxf.writeHatchLoop1(*dw, lData);
 
    // write edge:
    DL_HatchEdgeData eData(
        0.0,
        0.0,
        100.0,
        0.0,
        M_PI*2,
        true
    );
    dxf.writeHatchEdge(*dw, eData);
 
    // end loop:
    dxf.writeHatchLoop2(*dw, lData);
 
    // end hatch:
    dxf.writeHatch2(*dw, data, attributes);
 
    // end section ENTITIES:
    dw->sectionEnd();
    dxf.writeObjects(*dw, "MY_OBJECTS");
    dxf.writeObjectsEnd(*dw);
 
    dw->dxfEOF();
    dw->close();
    delete dw;
 
    return 0;
}