コード例 #1
0
ファイル: DoubleList.cpp プロジェクト: miracle2k/mp3diags
void DoubleList::initAvailable()
{
    m_listPainter.m_vAvailable.clear();

    switch (m_eSelectionMode)
    {
    case MULTIPLE:
        for (int i = 0, n = cSize(m_listPainter.getAll()); i < n; ++i)
        {
            m_listPainter.m_vAvailable.push_back(i); // in a sense, m_listPainter.m_vAvailable shouldn't be used at all in this case, and it isn't in the DoubleList code; however, it makes AvailableModel easier, because while DoubleList always has to check the value of m_eSelectionMode anyway, AvailableModel doesn't have to do this, but an empty m_listPainter.m_vAvailable in the MULTIPLE case would force it to do so;
        }
        break;

    case SINGLE_UNSORTABLE:
        {
            int n (cSize(m_listPainter.getAll()));
            m_listPainter.m_vSel.push_back(n);
            for (int i = 0, j = 0; i < n; ++i)
            {
                if (i < m_listPainter.m_vSel[j])
                {
                    m_listPainter.m_vAvailable.push_back(i);
                }
                else
                {
                    ++j;
                }
            }
            m_listPainter.m_vSel.pop_back();
        }
        break;

    case SINGLE_SORTABLE:
        {
            SubList v (m_listPainter.m_vSel.begin(), m_listPainter.m_vSel.end());
            sort(v.begin(), v.end());
            int n (cSize(m_listPainter.getAll()));
            v.push_back(n);
            for (int i = 0, j = 0; i < n; ++i)
            {
                if (i < v[j])
                {
                    m_listPainter.m_vAvailable.push_back(i);
                }
                else
                {
                    ++j;
                }
            }
        }
        break;

    }
}
コード例 #2
0
ファイル: blocking.cpp プロジェクト: ZeroCM/zcm
bool zcm_blocking_t::deleteFromSubList(SubList& slist, zcm_sub_t *sub)
{
    for (size_t i = 0; i < slist.size(); i++) {
        if (slist[i] == sub) {
            // shrink the array by moving the last element
            size_t last = slist.size()-1;
            slist[i] = slist[last];
            slist.resize(last);
            // delete the element
            return deleteSubEntry(sub, slist.size());
        }
    }
    return false;
}
コード例 #3
0
ファイル: UPstream.C プロジェクト: ADGlassby/OpenFOAM-2.2.x
void Foam::UPstream::waitRequests(const label start)
{
    if (debug)
    {
        Pout<< "UPstream::waitRequests : starting wait for "
            << PstreamGlobals::outstandingRequests_.size()-start
            << " outstanding requests starting at " << start << endl;
    }

    if (PstreamGlobals::outstandingRequests_.size())
    {
        SubList<MPI_Request> waitRequests
        (
            PstreamGlobals::outstandingRequests_,
            PstreamGlobals::outstandingRequests_.size() - start,
            start
        );

        if
        (
            MPI_Waitall
            (
                waitRequests.size(),
                waitRequests.begin(),
                MPI_STATUSES_IGNORE
            )
        )
        {
            FatalErrorIn
            (
                "UPstream::waitRequests()"
            )   << "MPI_Waitall returned with error" << Foam::endl;
        }

        resetRequests(start);
    }

    if (debug)
    {
        Pout<< "UPstream::waitRequests : finished wait." << endl;
    }
}
コード例 #4
0
ファイル: BitList.cpp プロジェクト: ForTheBetter/pubsub
void build_subIndex_from_subList(SubIndex &subIndex, SubList &subList, vector<pair<int, int> > &attrList)
{
	int maxsz = ORDER_BY_SIZE ? SUB_SIZE_UB : 1;
	for(int sz = 0; sz < maxsz; sz++){
		for(int i = 0; i < (int)attrList.size(); i++){
			for(int j = 0; j <= attrList[i].second; j++){
				int wordId;
				if(attrList[i].second > MAGNITUDE){
					wordId = attrList[i].first + j / ((int) ceil(1.0 * attrList[i].second / MAGNITUDE));
				}
				else{
					wordId = attrList[i].first + j;
				}
				subIndex[sz][wordId].wordId = subIndex[sz][wordId].rowId = wordId;
			}
		}
	}
    for(SubList::iterator sit = subList.begin(); sit != subList.end(); ++sit){
		int sz = sit->attrCnt;
		if(!ORDER_BY_SIZE){
			sz = 1;
		}
		for(vector<AttrRange>::iterator ait = sit->attrList.begin(); ait != sit->attrList.end(); ++ait){
			int attrId = ait->attrId;
            for(vector<Interval>::iterator iit = ait->intervalList.begin(); iit != ait->intervalList.end(); ++iit){
				for(int val = iit->left; val <= iit->right; val++){
					int wordId;
					if(attrList[attrId].second > MAGNITUDE){
						wordId = attrList[attrId].first + val / ((int) ceil(1.0 * attrList[attrId].second / MAGNITUDE));
					}
					else{
						wordId = attrList[attrId].first + val;
					}
					BitNumber newNumber;
                    newNumber.did = sit->subId; newNumber.eid = 1;
                    newNumber.weight = 1; newNumber.colId = sit->subId;
                    subIndex[sz - 1][wordId].bitList.push_back(newNumber);
				}
            }
        }
    }
}
コード例 #5
0
ファイル: UPstream.C プロジェクト: GameCave/OpenFOAM-2.3.x
void Foam::reduce
(
    scalar& Value,
    const sumOp<scalar>& bop,
    const int tag,
    const label communicator,
    label& requestID
)
{
#ifdef MPIX_COMM_TYPE_SHARED
    // Assume mpich2 with non-blocking collectives extensions. Once mpi3
    // is available this will change.
    MPI_Request request;
    scalar v = Value;
    MPIX_Ireduce
    (
        &v,
        &Value,
        1,
        MPI_SCALAR,
        MPI_SUM,
        0,              //root
        PstreamGlobals::MPICommunicators_[communicator],
        &request
    );

    requestID = PstreamGlobals::outstandingRequests_.size();
    PstreamGlobals::outstandingRequests_.append(request);

    if (debug)
    {
        Pout<< "UPstream::allocateRequest for non-blocking reduce"
            << " : request:" << requestID
            << endl;

#else
    // Non-blocking not yet implemented in mpi
    reduce(Value, bop, tag, communicator);
    requestID = -1;
#endif
}


void Foam::UPstream::allocatePstreamCommunicator
(
    const label parentIndex,
    const label index
)
{
    if (index == PstreamGlobals::MPIGroups_.size())
    {
        // Extend storage with dummy values
        MPI_Group newGroup;
        PstreamGlobals::MPIGroups_.append(newGroup);
        MPI_Comm newComm;
        PstreamGlobals::MPICommunicators_.append(newComm);
    }
    else if (index > PstreamGlobals::MPIGroups_.size())
    {
        FatalErrorIn
        (
            "UPstream::allocatePstreamCommunicator\n"
            "(\n"
            "    const label parentIndex,\n"
            "    const labelList& subRanks\n"
            ")\n"
        )   << "PstreamGlobals out of sync with UPstream data. Problem."
            << Foam::exit(FatalError);
    }


    if (parentIndex == -1)
    {
        // Allocate world communicator

        if (index != UPstream::worldComm)
        {
            FatalErrorIn
            (
                "UPstream::allocateCommunicator\n"
                "(\n"
                "    const label parentIndex,\n"
                "    const labelList& subRanks\n"
                ")\n"
            )   << "world communicator should always be index "
                << UPstream::worldComm << Foam::exit(FatalError);
        }

        PstreamGlobals::MPICommunicators_[index] = MPI_COMM_WORLD;
        MPI_Comm_group(MPI_COMM_WORLD, &PstreamGlobals::MPIGroups_[index]);
        MPI_Comm_rank
        (
            PstreamGlobals::MPICommunicators_[index],
           &myProcNo_[index]
        );

        // Set the number of processes to the actual number
        int numProcs;
        MPI_Comm_size(PstreamGlobals::MPICommunicators_[index], &numProcs);
        procIDs_[index] = identity(numProcs);
    }
    else
    {
        // Create new group
        MPI_Group_incl
        (
            PstreamGlobals::MPIGroups_[parentIndex],
            procIDs_[index].size(),
            procIDs_[index].begin(),
           &PstreamGlobals::MPIGroups_[index]
        );

        // Create new communicator
        MPI_Comm_create
        (
            PstreamGlobals::MPICommunicators_[parentIndex],
            PstreamGlobals::MPIGroups_[index],
           &PstreamGlobals::MPICommunicators_[index]
        );

        if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
        {
            myProcNo_[index] = -1;
        }
        else
        {
            if
            (
                MPI_Comm_rank
                (
                    PstreamGlobals::MPICommunicators_[index],
                   &myProcNo_[index]
                )
            )
            {
                FatalErrorIn
                (
                    "UPstream::allocatePstreamCommunicator\n"
                    "(\n"
                    "    const label,\n"
                    "    const labelList&\n"
                    ")\n"
                )   << "Problem :"
                    << " when allocating communicator at " << index
                    << " from ranks " << procIDs_[index]
                    << " of parent " << parentIndex
                    << " cannot find my own rank"
                    << Foam::exit(FatalError);
            }
        }
    }
}


void Foam::UPstream::freePstreamCommunicator(const label communicator)
{
    if (communicator != UPstream::worldComm)
    {
        if (PstreamGlobals::MPICommunicators_[communicator] != MPI_COMM_NULL)
        {
            // Free communicator. Sets communicator to MPI_COMM_NULL
            MPI_Comm_free(&PstreamGlobals::MPICommunicators_[communicator]);
        }
        if (PstreamGlobals::MPIGroups_[communicator] != MPI_GROUP_NULL)
        {
            // Free greoup. Sets group to MPI_GROUP_NULL
            MPI_Group_free(&PstreamGlobals::MPIGroups_[communicator]);
        }
    }
}


Foam::label Foam::UPstream::nRequests()
{
    return PstreamGlobals::outstandingRequests_.size();
}


void Foam::UPstream::resetRequests(const label i)
{
    if (i < PstreamGlobals::outstandingRequests_.size())
    {
        PstreamGlobals::outstandingRequests_.setSize(i);
    }
}


void Foam::UPstream::waitRequests(const label start)
{
    if (debug)
    {
        Pout<< "UPstream::waitRequests : starting wait for "
            << PstreamGlobals::outstandingRequests_.size()-start
            << " outstanding requests starting at " << start << endl;
    }

    if (PstreamGlobals::outstandingRequests_.size())
    {
        SubList<MPI_Request> waitRequests
        (
            PstreamGlobals::outstandingRequests_,
            PstreamGlobals::outstandingRequests_.size() - start,
            start
        );

        if
        (
            MPI_Waitall
            (
                waitRequests.size(),
                waitRequests.begin(),
                MPI_STATUSES_IGNORE
            )
        )
        {
            FatalErrorIn
            (
                "UPstream::waitRequests()"
            )   << "MPI_Waitall returned with error" << Foam::endl;
        }

        resetRequests(start);
    }

    if (debug)
    {
        Pout<< "UPstream::waitRequests : finished wait." << endl;
    }
}


void Foam::UPstream::waitRequest(const label i)
{
    if (debug)
    {
        Pout<< "UPstream::waitRequest : starting wait for request:" << i
            << endl;
    }

    if (i >= PstreamGlobals::outstandingRequests_.size())
    {
        FatalErrorIn
        (
            "UPstream::waitRequest(const label)"
        )   << "There are " << PstreamGlobals::outstandingRequests_.size()
            << " outstanding send requests and you are asking for i=" << i
            << nl
            << "Maybe you are mixing blocking/non-blocking comms?"
            << Foam::abort(FatalError);
    }

    if
    (
        MPI_Wait
        (
           &PstreamGlobals::outstandingRequests_[i],
            MPI_STATUS_IGNORE
        )
    )
    {
        FatalErrorIn
        (
            "UPstream::waitRequest()"
        )   << "MPI_Wait returned with error" << Foam::endl;
    }

    if (debug)
    {
        Pout<< "UPstream::waitRequest : finished wait for request:" << i
            << endl;
    }
}


bool Foam::UPstream::finishedRequest(const label i)
{
    if (debug)
    {
        Pout<< "UPstream::finishedRequest : checking request:" << i
            << endl;
    }

    if (i >= PstreamGlobals::outstandingRequests_.size())
    {
        FatalErrorIn
        (
            "UPstream::finishedRequest(const label)"
        )   << "There are " << PstreamGlobals::outstandingRequests_.size()
            << " outstanding send requests and you are asking for i=" << i
            << nl
            << "Maybe you are mixing blocking/non-blocking comms?"
            << Foam::abort(FatalError);
    }

    int flag;
    MPI_Test
    (
       &PstreamGlobals::outstandingRequests_[i],
       &flag,
        MPI_STATUS_IGNORE
    );

    if (debug)
    {
        Pout<< "UPstream::finishedRequest : finished request:" << i
            << endl;
    }

    return flag != 0;
}


int Foam::UPstream::allocateTag(const char* s)
{
    int tag;
    if (PstreamGlobals::freedTags_.size())
    {
        tag = PstreamGlobals::freedTags_.remove();
    }
    else
    {
        tag = PstreamGlobals::nTags_++;
    }

    if (debug)
    {
        //if (UPstream::lateBlocking > 0)
        //{
        //    string& poutp = Pout.prefix();
        //    poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
        //    Perr.prefix() = Pout.prefix();
        //}
        Pout<< "UPstream::allocateTag " << s
            << " : tag:" << tag
            << endl;
    }

    return tag;
}