void ModeListMirrorBuddyGroups::printGroups(UInt16List& buddyGroupIDs, UInt16List& primaryTargetIDs,
   UInt16List& secondaryTargetIDs)
{
   if(buddyGroupIDs.empty() )
   {
      std::cerr << "No mirror buddy groups defined." << std::endl;
      return;
   }

   // print header
   printf("%17s %17s %17s\n", "BuddyGroupID", "PrimaryTargetID", "SecondaryTargetID");
   printf("%17s %17s %17s\n", "============", "===============", "=================");


   // print buddy group mappings

   UInt16ListConstIter groupsIter = buddyGroupIDs.begin();
   UInt16ListConstIter primaryTargetsIter = primaryTargetIDs.begin();
   UInt16ListConstIter secondaryTargetsIter = secondaryTargetIDs.begin();

   for(; groupsIter != buddyGroupIDs.end();
      groupsIter++, primaryTargetsIter++, secondaryTargetsIter++)
   {
      printf("%17hu %17hu %17hu\n", *groupsIter, *primaryTargetsIter, *secondaryTargetsIter);
   }

}
Esempio n. 2
0
UInt16List* MtpDataPacket::getAUInt16() {
    UInt16List* result = new UInt16List;
    int count = getUInt32();
    for (int i = 0; i < count; i++)
        result->push_back(getUInt16());
    return result;
}
Esempio n. 3
0
void InternodeSyncer::updateTargetBuddyCapacityPools()
{
   App* app = Program::getApp();

   MirrorBuddyGroupMapper* buddyMapper = app->getMirrorBuddyGroupMapper();
   TargetCapacityPools* targetPools = app->getStorageCapacityPools();
   NodeCapacityPools* buddyPools = app->getStorageBuddyCapacityPools();

   UInt16Set listNormal; // new "normal" buddy group pool list
   UInt16Set listLow;
   UInt16Set listEmergency;

   UInt16List buddyGroupIDs;
   MirrorBuddyGroupList buddyGroups;

   buddyMapper->getMappingAsLists(buddyGroupIDs, buddyGroups);

   UInt16ListConstIter buddyGroupIDIter = buddyGroupIDs.begin();
   MirrorBuddyGroupListCIter buddyGroupIter = buddyGroups.begin();

   // walk all mapped buddy groups, add them to buddy pools based on worst buddy pool of any buddy
   for( ; buddyGroupIDIter != buddyGroupIDs.end(); buddyGroupIDIter++, buddyGroupIter++)
   {
      CapacityPoolType primaryPoolType;
      bool primaryFound = targetPools->getPoolAssignment(buddyGroupIter->firstTargetID,
         &primaryPoolType);

      if(!primaryFound)
         continue; // can't make a pool decision if a buddy does not exist

      CapacityPoolType secondaryPoolType;
      bool secondaryFound = targetPools->getPoolAssignment(buddyGroupIter->secondTargetID,
         &secondaryPoolType);

      if(!secondaryFound)
         continue; // can't make a pool decision if a buddy does not exist

      CapacityPoolType groupPoolType = BEEGFS_MAX(primaryPoolType, secondaryPoolType);

      if(groupPoolType == CapacityPool_NORMAL)
         listNormal.insert(*buddyGroupIDIter);
      else
      if(groupPoolType == CapacityPool_LOW)
         listLow.insert(*buddyGroupIDIter);
      else
      if(groupPoolType == CapacityPool_EMERGENCY)
         listEmergency.insert(*buddyGroupIDIter);
      else
      { // should never happen
         LogContext(__func__).logErr(
            "Skipping invalid groupPoolType: " + StringTk::intToStr(groupPoolType) );
      }
   }

   // apply new pools

   buddyPools->syncPoolsFromSets(listNormal, listLow, listEmergency);
}
Esempio n. 4
0
bool MapTargetsMsgEx::processIncoming(struct sockaddr_in* fromAddr, Socket* sock,
   char* respBuf, size_t bufLen, HighResolutionStats* stats)
{
   LogContext log("MapTargetsMsg incoming");

   std::string peer = fromAddr ? Socket::ipaddrToStr(&fromAddr->sin_addr) : sock->getPeername();
   LOG_DEBUG_CONTEXT(log, Log_DEBUG, std::string("Received a MapTargetsMsg from: ") + peer);

   App* app = Program::getApp();
   NodeStoreServers* storageNodes = app->getStorageNodes();
   TargetMapper* targetMapper = app->getTargetMapper();

   uint16_t nodeID = getNodeID();
   UInt16List targetIDs;

   parseTargetIDs(&targetIDs);

   for(UInt16ListConstIter iter = targetIDs.begin(); iter != targetIDs.end(); iter++)
   {
      bool wasNewTarget = targetMapper->mapTarget(*iter, nodeID);
      if(wasNewTarget)
      {
         LOG_DEBUG_CONTEXT(log, Log_WARNING, "Mapping "
            "target " + StringTk::uintToStr(*iter) +
            " => " +
            storageNodes->getNodeIDWithTypeStr(nodeID) );

         IGNORE_UNUSED_VARIABLE(storageNodes);
      }
   }


   // send response

   if(!MsgHelperAck::respondToAckRequest(this, fromAddr, sock,
      respBuf, bufLen, app->getDatagramListener() ) )
   {
      MapTargetsRespMsg respMsg(FhgfsOpsErr_SUCCESS);
      respMsg.serialize(respBuf, bufLen);

      if(fromAddr)
      { // datagram => sync via dgramLis send method
         app->getDatagramListener()->sendto(respBuf, respMsg.getMsgLength(), 0,
            (struct sockaddr*)fromAddr, sizeof(*fromAddr) );
      }
      else
         sock->sendto(respBuf, respMsg.getMsgLength(), 0, NULL, 0);
   }

   return true;
}
Esempio n. 5
0
void InternodeSyncer::getTargetMappingAsInfoList(TargetMapper* targetMapper,
   TargetCapacityInfoList& outTargetCapacityInfos)
{
   UInt16List targetIDs;
   UInt16List nodeIDs;

   targetMapper->getMappingAsLists(targetIDs, nodeIDs);

   UInt16ListConstIter targetIDsIter = targetIDs.begin();
   UInt16ListConstIter nodeIDsIter = nodeIDs.begin();

   for( ; targetIDsIter != targetIDs.end(); ++targetIDsIter, ++nodeIDsIter)
      outTargetCapacityInfos.push_back(TargetCapacityInfo(*targetIDsIter, *nodeIDsIter) );
}
Esempio n. 6
0
UInt16List* MtpDataPacket::getAUInt16() {
    uint32_t count;
    if (!getUInt32(count))
        return NULL;
    UInt16List* result = new UInt16List;
    for (uint32_t i = 0; i < count; i++) {
        uint16_t value;
        if (!getUInt16(value)) {
            delete result;
            return NULL;
        }
        result->push(value);
    }
    return result;
}