예제 #1
0
unsigned int TAxiomSet :: absorb ( void )
{
	// absorbed- and unabsorbable GCIs
	AxiomCollection Absorbed, GCIs;

	// we will change Accum (via split rule), so indexing and compare with size
	for ( curAxiom = 0; curAxiom < Accum.size(); ++curAxiom )
	{
#	ifdef RKG_DEBUG_ABSORPTION
		std::cout << "\nProcessing (" << curAxiom << "):";
#	endif
		TAxiom* ax = Accum[curAxiom];
		if ( absorbGCI(ax) )
			Absorbed.push_back(ax);
		else
			GCIs.push_back(ax);
	}

	// clear absorbed and remove them from Accum
	for ( AxiomCollection::iterator p = Absorbed.begin(), p_end = Absorbed.end(); p != p_end; ++p )
		delete *p;
	Accum.swap(GCIs);

#ifdef RKG_DEBUG_ABSORPTION
	std::cout << "\nAbsorption done with " << Accum.size() << " GCIs left\n";
#endif
	PrintStatistics();
	return size();
}
예제 #2
0
파일: LabExam1.c 프로젝트: Yuwain/School
void main()
{
	int p, q;
	int rows, cols;
	float A[ROWS][COLS];

	printf("Enter # of rows, # of cols: ");
	scanf("%d %d", &rows, &cols);

	for (p = 0; p < rows; p++)
	{
		printf("Enter %d column values for row %d:\n", cols, p + 1);
		
		for (q = 0; q < cols; q++)
		{
			scanf("%f", &A[p][q]);
		}
	}

	Print(A, rows, cols);
	AverageOfColumns(A, rows, cols);
	
	printf("\n");
	
	Print(A, rows + 1, cols);
	PrintStatistics(A, rows, cols);
}
예제 #3
0
Agent::~Agent ()
{
	PrintStatistics();

	for ( ActivList::iterator it = _activities.begin(); it != _activities.end(); ++it )
		delete (*it);
};
예제 #4
0
/*
 *  ======== DSPProcessor_Load ========
 *  Purpose:
 *      Reset a processor and load a new base program image.
 *      This will be an OEM-only function, and not part of the 'Bridge
 *      application developer's API.
 */
DBAPI DSPProcessor_Load(DSP_HPROCESSOR hProcessor, IN CONST INT iArgc,
                        IN CONST CHAR **aArgv, IN CONST CHAR **aEnvp)
{
    DSP_STATUS status = DSP_SOK;
    Trapped_Args tempStruct;
#ifdef DEBUG_BRIDGE_PERF
    struct timeval tv_beg;
    struct timeval tv_end;
    struct timezone tz;
    int timeRetVal = 0;

    timeRetVal = getTimeStamp(&tv_beg);
#endif


    DEBUGMSG(DSPAPI_ZONE_FUNCTION, (TEXT("PROC: DSPProcessor_Load\r\n")));

    /* Check the handle */
    if (hProcessor) {
        if (iArgc > 0) {
            if (!DSP_ValidReadPtr(aArgv, iArgc)) {
                tempStruct.ARGS_PROC_LOAD.hProcessor =
                    hProcessor;
                tempStruct.ARGS_PROC_LOAD.iArgc = iArgc;
                tempStruct.ARGS_PROC_LOAD.aArgv =
                    (CHAR **)aArgv;
                tempStruct.ARGS_PROC_LOAD.aEnvp =
                    (CHAR **)aEnvp;
                status = DSPTRAP_Trap(&tempStruct,
                                      CMD_PROC_LOAD_OFFSET);
            } else {
                status = DSP_EPOINTER;
                DEBUGMSG(DSPAPI_ZONE_ERROR,
                         (TEXT("PROC: Null pointer in input \r\n")));
            }
        } else {
            status = DSP_EINVALIDARG;
            DEBUGMSG(DSPAPI_ZONE_ERROR,
                     (TEXT("PROC: iArgc is invalid. \r\n")));
        }
    } else {
        /* Invalid handle */
        status = DSP_EHANDLE;
        DEBUGMSG(DSPAPI_ZONE_ERROR,
                 (TEXT("PROC: Invalid Handle \r\n")));
    }

#ifdef DEBUG_BRIDGE_PERF
    timeRetVal = getTimeStamp(&tv_end);
    PrintStatistics(&tv_beg, &tv_end, "DSPProcessor_Load", 0);

#endif

    return status;
}
// 
// Function: StatisticsThread
//
// Description:
//    Simple thread to print the statistics out. In this model there isn't
//    a good place to incorporate it into the normal program flow.
//
DWORD WINAPI StatisticsThread(LPVOID lpParam)
{
    while (1)
    {
        SleepEx(5000, TRUE);

        PrintStatistics();
    }
    ExitThread(0);
    return 0;
}
예제 #6
0
int Agent:: Execute ()
{
	_executions++;
	agentStats.StartAgentTiming();
	_blk.process_messages();

	umsg = _blk.readSignal<UpdateMessage> ("external");
	rmsg = _blk.readSignal<ResetMessage> ("external");

	if(umsg != 0){
		std::vector<std::pair<std::string,std::string> > dataForWrite;
		for(int i=0; i < umsg->updatexml_size(); i++){
			std::pair<std::string,std::string> temp;
			temp.first = umsg->updatexml(i).keyword();
			temp.second = umsg->updatexml(i).value();
			dataForWrite.push_back(temp);
		}
		_xml.burstWrite(dataForWrite);
	}
	if(rmsg != 0){
		for ( ActivList::iterator it = _activities.begin(); it != _activities.end(); it++ )
		{
			std::string activityName = (*it)->GetName();
			for(int i=0; i < rmsg->resetactivities_size(); i++){
				if(activityName.compare(rmsg->resetactivities(i)) == 0){
					(*it)->Reset();
					break;
				}
			}
		}
	}
	for ( ActivList::iterator it = _activities.begin(); it != _activities.end(); it++ )
	{
		agentStats.StartActivityTiming(*it);
		(*it)->Execute();
		agentStats.StopActivityTiming(*it);
	}

	_blk.publish_all();
	agentStats.StopAgentTiming();

	if ( ! (_executions % _statsCycle) )
	{
		PrintStatistics();
	}

	return 0;
};
예제 #7
0
/*
 *  ======== DSPNode_PutMessage ========
 *  Purpose:
 *      Send an event message to a task node.
 */
DBAPI DSPNode_PutMessage(DSP_HNODE hNode, IN CONST struct DSP_MSG *pMessage,
						UINT uTimeout)
{
	DSP_STATUS status = DSP_SOK;
	Trapped_Args tempStruct;
#ifdef DEBUG_BRIDGE_PERF
	struct timeval tv_beg;
	struct timeval tv_end;
	struct timeval tz;
	int timeRetVal = 0;

	timeRetVal = getTimeStamp(&tv_beg);
#endif

	DEBUGMSG(DSPAPI_ZONE_FUNCTION, (TEXT("NODE: DSPNode_PutMessage:\r\n")));

	if (hNode) {
		if (pMessage) {
			/* Set up the structure */
			/* Call DSP Trap */
			tempStruct.ARGS_NODE_PUTMESSAGE.hNode = hNode;
			tempStruct.ARGS_NODE_PUTMESSAGE.pMessage =
						(struct DSP_MSG *)pMessage;
			tempStruct.ARGS_NODE_PUTMESSAGE.uTimeout = uTimeout;
			status = DSPTRAP_Trap(&tempStruct,
				CMD_NODE_PUTMESSAGE_OFFSET);
		} else {
			status = DSP_EPOINTER;
			DEBUGMSG(DSPAPI_ZONE_ERROR,
				(TEXT("NODE: DSPNode_PutMessage: "
						"pMessage is Invalid \r\n")));
		}
	} else {
		/* Invalid pointer */
		status = DSP_EHANDLE;
		DEBUGMSG(DSPAPI_ZONE_ERROR,
			(TEXT("NODE: DSPNode_PutMessage: "
					"hNode is Invalid \r\n")));
	}
#ifdef DEBUG_BRIDGE_PERF
	timeRetVal = getTimeStamp(&tv_end);
	PrintStatistics(&tv_beg, &tv_end, "DSPNode_PutMessage", 0);
#endif


	return status;
}
예제 #8
0
SgEmptyBlackWhite DfpnSolver::StartSearch(DfpnHashTable& hashTable, 
                                          PointSequence& pv,
                                          const DfpnBounds& maxBounds)
{
    m_aborted = false;
    m_hashTable = &hashTable;
    m_numTerminal = 0;
    m_numMIDcalls = 0;
    m_generateMoves = 0;
    m_totalWastedWork = 0;
    m_prunedSiblingStats.Clear();
    m_moveOrderingPercent.Clear();
    m_moveOrderingIndex.Clear();
    m_deltaIncrease.Clear();
    m_checkTimerAbortCalls = 0;

    // Skip search if already solved
    DfpnData data;
    if (TTRead(data) && data.m_bounds.IsSolved())
    {
        SgDebug() << "Already solved!\n";
        const SgEmptyBlackWhite toPlay = GetColorToMove();
        SgEmptyBlackWhite w = Winner(data.m_bounds.IsWinning(), toPlay);
        GetPVFromHash(pv);
        SgDebug() << SgEBW(w) << " wins!\n";
        WriteMoveSequence(SgDebug(), pv);
        return w;
    }

    m_timer.Start();
    DfpnHistory history;
    MID(maxBounds, history);
    m_timer.Stop();

    GetPVFromHash(pv);
    SgEmptyBlackWhite winner = SG_EMPTY;
    if (TTRead(data) && data.m_bounds.IsSolved())
    {
        const SgEmptyBlackWhite toPlay = GetColorToMove();
        winner = Winner(data.m_bounds.IsWinning(), toPlay);
    }
    PrintStatistics(winner, pv);

    if (m_aborted)
        SgWarning() << "Search aborted.\n";
    return winner;
}
예제 #9
0
/*
 *  ======== DSPNode_GetMessage ========
 *  Purpose:
 *      Retrieve an event message from a task node.
 */
DBAPI DSPNode_GetMessage(DSP_HNODE hNode, OUT struct DSP_MSG *pMessage,
				UINT uTimeout)
{
	int status = 0;
	Trapped_Args tempStruct;
#ifdef DEBUG_BRIDGE_PERF
	struct timeval tv_beg;
	struct timeval tv_end;
	struct timezone tz;
	int timeRetVal = 0;

	timeRetVal = getTimeStamp(&tv_beg);

#endif

	DEBUGMSG(DSPAPI_ZONE_FUNCTION, (TEXT("NODE: DSPNode_GetMessage:\r\n")));

	if (hNode) {
		if (pMessage) {
			/* Set up the structure */
			/* Call DSP Trap */
			tempStruct.ARGS_NODE_GETMESSAGE.hNode = hNode;
			tempStruct.ARGS_NODE_GETMESSAGE.pMessage = pMessage;
			tempStruct.ARGS_NODE_GETMESSAGE.uTimeout = uTimeout;
			status = DSPTRAP_Trap(&tempStruct,
				CMD_NODE_GETMESSAGE_OFFSET);
		} else {
			status = -EFAULT;
			DEBUGMSG(DSPAPI_ZONE_ERROR,
				(TEXT("NODE: DSPNode_GetMessage:"
				"pMessage is Invalid \r\n")));
		}
	} else {
		status = -EFAULT;
		DEBUGMSG(DSPAPI_ZONE_ERROR,
			(TEXT("NODE: DSPNode_GetMessage: "
			"hNode is Invalid \r\n")));
	}
#ifdef DEBUG_BRIDGE_PERF
	timeRetVal = getTimeStamp(&tv_end);
	PrintStatistics(&tv_beg, &tv_end, "DSPNode_GetMessage", 0);
#endif


	return status;
}
예제 #10
0
파일: runsvs.cpp 프로젝트: rizar/svs
int App::Run() {
    Load();
    if (ParamsInputPath_.size()) {
        Params_.Load(ParamsInputPath_.c_str());
    }

    Builder_.SetParams(Params_);
    Builder_.SetInputCloud(Input_);
    PrintParameters();

    Builder_.GenerateTrainingSet();
    ExportForLibSVM();

    if (AlphaInputPath_.size()) {
        std::ifstream alphaInput(AlphaInputPath_.c_str());
        std::vector<SVMFloat> alphas;
        std::copy(std::istream_iterator<SVMFloat>(alphaInput), std::istream_iterator<SVMFloat>(),
                std::back_inserter(alphas));

        Builder_.InitSVM(alphas);
    } else {
        Builder_.Learn();
        TrainedModel_ = true;
    }
    if (AlphaOutputPath_.size()) {
        std::ofstream alphaOutput(AlphaOutputPath_.c_str());
        std::copy(Builder_.SVM().Alphas(), Builder_.SVM().Alphas() + Builder_.Objects->size(),
                std::ostream_iterator<SVMFloat>(alphaOutput, "\n"));
    }
    if (ParamsOutputPath_.size()) {
        Params_.Save(ParamsOutputPath_.c_str());
    }
    ExportAlphaMap();

    Builder_.CalcGradients();

    PrintStatistics();
    Visualize();
    return 0;
}
예제 #11
0
파일: stats.c 프로젝트: hoangduit/reactos
VOID
Execute(LPTSTR Path)
{
  if (!ExtInfoList)
  {
	  _tprintf (_T("No extensions specified.\n"));
    return;
  }

  if (!ProcessDirectories (Path))
  {
	  _tprintf (_T("Failed to process directories.\n"));
    return;
  }

  if (!ProcessFiles (Path))
  {
	  _tprintf (_T("Failed to process files.\n"));
    return;
  }

  PrintStatistics();
}
예제 #12
0
/*
 *  ======== DSPNode_Create ========
 *  Purpose:
 *      Create a node in a pre-run (i.e., inactive) state on its
 *		DSP processor.
 */
DBAPI DSPNode_Create(DSP_HNODE hNode)
{
	DSP_STATUS status = DSP_SOK;
	Trapped_Args tempStruct;
#ifdef DEBUG_BRIDGE_PERF
	struct timeval tv_beg;
	struct timeval tv_end;
	struct timezone tz;
	int timeRetVal = 0;

	timeRetVal = getTimeStamp(&tv_beg);
#endif


	DEBUGMSG(DSPAPI_ZONE_FUNCTION, (TEXT("NODE: DSPNode_Create:\r\n")));

	if (hNode) {
		/* Set up the structure */
		/* Call DSP Trap */
		tempStruct.ARGS_NODE_CREATE.hNode = hNode;
		status = DSPTRAP_Trap(&tempStruct, CMD_NODE_CREATE_OFFSET);
	} else {
		/* Invalid pointer */
		status = DSP_EHANDLE;
		DEBUGMSG(DSPAPI_ZONE_ERROR,
		(TEXT("NODE: DSPNode_Create: hNode is Invalid Handle\r\n")));
	}

#ifdef DEBUG_BRIDGE_PERF
	timeRetVal = getTimeStamp(&tv_end);
	PrintStatistics(&tv_beg, &tv_end, "DSPNode_Create", 0);

#endif

	return status;
}
예제 #13
0
int
SolveSubproblem(int CurrentSubproblem, int Subproblems,
                GainType * GlobalBestCost)
{
    Node *FirstNodeSaved = FirstNode, *N, *Next, *Last = 0;
    GainType OptimumSaved = Optimum, Cost, Improvement, GlobalCost;
    double LastTime, Time, ExcessSaved = Excess;
    int NewDimension = 0, OldDimension = 0, Number, i, InitialTourEdges = 0,
        AscentCandidatesSaved = AscentCandidates,
        InitialPeriodSaved = InitialPeriod, MaxTrialsSaved = MaxTrials;

    BestCost = PLUS_INFINITY;
    FirstNode = 0;
    N = FirstNodeSaved;
    do {
        if (N->Subproblem == CurrentSubproblem) {
            if (SubproblemsCompressed &&
                (((N->SubproblemPred == N->SubBestPred ||
                   FixedOrCommon(N, N->SubproblemPred) ||
                   (N->SubBestPred &&
                    (N->FixedTo1Saved == N->SubBestPred ||
                     N->FixedTo2Saved == N->SubBestPred))) &&
                  (N->SubproblemSuc == N->SubBestSuc ||
                   FixedOrCommon(N, N->SubproblemSuc) ||
                   (N->SubBestSuc &&
                    (N->FixedTo1Saved == N->SubBestSuc ||
                     N->FixedTo2Saved == N->SubBestSuc)))) ||
                 ((N->SubproblemPred == N->SubBestSuc ||
                   FixedOrCommon(N, N->SubproblemPred) ||
                   (N->SubBestSuc &&
                    (N->FixedTo1Saved == N->SubBestSuc ||
                     N->FixedTo2Saved == N->SubBestSuc))) &&
                  (N->SubproblemSuc == N->SubBestPred ||
                   FixedOrCommon(N, N->SubproblemSuc) ||
                   (N->SubBestPred &&
                    (N->FixedTo1Saved == N->SubBestPred ||
                     N->FixedTo2Saved == N->SubBestPred))))))
                N->Subproblem = -CurrentSubproblem;
            else {
                if (!FirstNode)
                    FirstNode = N;
                NewDimension++;
            }
            N->Head = N->Tail = 0;
            if (N->SubBestSuc)
                OldDimension++;
        }
        N->SubBestPred = N->SubBestSuc = 0;
        N->FixedTo1Saved = N->FixedTo1;
        N->FixedTo2Saved = N->FixedTo2;
    } while ((N = N->SubproblemSuc) != FirstNodeSaved);
    if ((Number = CurrentSubproblem % Subproblems) == 0)
        Number = Subproblems;
    if (NewDimension <= 3 || NewDimension == OldDimension) {
        if (TraceLevel >= 1 && NewDimension <= 3)
            printff
                ("\nSubproblem %d of %d: Dimension = %d (too small)\n",
                 Number, Subproblems, NewDimension);
        FirstNode = FirstNodeSaved;
        return 0;
    }
    if (AscentCandidates > NewDimension - 1)
        AscentCandidates = NewDimension - 1;
    if (InitialPeriod < 0) {
        InitialPeriod = NewDimension / 2;
        if (InitialPeriod < 100)
            InitialPeriod = 100;
    }
    if (Excess < 0)
        Excess = 1.0 / NewDimension;
    if (MaxTrials == -1)
        MaxTrials = NewDimension;
    N = FirstNode;
    do {
        Next = N->SubproblemSuc;
        if (N->Subproblem == CurrentSubproblem) {
            N->Pred = N->Suc = N;
            if (N != FirstNode)
                Follow(N, Last);
            Last = N;
        } else if (Next->Subproblem == CurrentSubproblem
                   && !Fixed(Last, Next)) {
            if (!Last->FixedTo1
                || Last->FixedTo1->Subproblem != CurrentSubproblem)
                Last->FixedTo1 = Next;
            else
                Last->FixedTo2 = Next;
            if (!Next->FixedTo1
                || Next->FixedTo1->Subproblem != CurrentSubproblem)
                Next->FixedTo1 = Last;
            else
                Next->FixedTo2 = Last;
            if (C == C_EXPLICIT) {
                if (Last->Id > Next->Id)
                    Last->C[Next->Id] = 0;
                else
                    Next->C[Last->Id] = 0;
            }
        }
    }
    while ((N = Next) != FirstNode);

    Dimension = NewDimension;
    AllocateSegments();
    InitializeStatistics();
    if (CacheSig)
        for (i = 0; i <= CacheMask; i++)
            CacheSig[i] = 0;
    OptimumSaved = Optimum;
    Optimum = 0;
    N = FirstNode;
    do {
        if (N->SubproblemSuc == N->InitialSuc ||
            N->SubproblemPred == N->InitialSuc)
            InitialTourEdges++;
        if (!Fixed(N, N->Suc))
            Optimum += Distance(N, N->Suc);
        if (N->FixedTo1 && N->Subproblem != N->FixedTo1->Subproblem)
            eprintf("Illegal fixed edge (%d,%d)", N->Id, N->FixedTo1->Id);
        if (N->FixedTo2 && N->Subproblem != N->FixedTo2->Subproblem)
            eprintf("Illegal fixed edge (%d,%d)", N->Id, N->FixedTo2->Id);
    }
    while ((N = N->Suc) != FirstNode);
    if (TraceLevel >= 1)
        printff
            ("\nSubproblem %d of %d: Dimension = %d, Upper bound = "
             GainFormat "\n", Number, Subproblems, Dimension, Optimum);
    FreeCandidateSets();
    CreateCandidateSet();

    for (Run = 1; Run <= Runs; Run++) {
        LastTime = GetTime();
        Cost = Norm != 0 ? FindTour() : Optimum;
        /* Merge with subproblem tour */
        Last = 0;
        N = FirstNode;
        do {
            if (N->Subproblem == CurrentSubproblem) {
                if (Last)
                    Last->Next = N;
                Last = N;
            }
        }
        while ((N = N->SubproblemSuc) != FirstNode);
        Last->Next = FirstNode;
        Cost = MergeWithTour();
        if (MaxPopulationSize > 1) {
            /* Genetic algorithm */
            for (i = 0; i < PopulationSize; i++)
                Cost = MergeTourWithIndividual(i);
            if (!HasFitness(Cost)) {
                if (PopulationSize < MaxPopulationSize) {
                    AddToPopulation(Cost);
                    if (TraceLevel >= 1)
                        PrintPopulation();
                } else if (Cost < Fitness[PopulationSize - 1]) {
                    ReplaceIndividualWithTour(PopulationSize - 1, Cost);
                    if (TraceLevel >= 1)
                        PrintPopulation();
                }
            }
        }
        if (Cost < BestCost) {
            N = FirstNode;
            do {
                N->SubBestPred = N->Pred;
                N->SubBestSuc = N->Suc;
            } while ((N = N->Suc) != FirstNode);
            BestCost = Cost;
        }
        if (Cost < Optimum || (Cost != Optimum && OutputTourFileName)) {
            Improvement = Optimum - Cost;
            if (Improvement > 0) {
                BestCost = GlobalCost = *GlobalBestCost -= Improvement;
                Optimum = Cost;
            } else
                GlobalCost = *GlobalBestCost - Improvement;
            N = FirstNode;
            do
                N->Mark = 0;
            while ((N = N->SubproblemSuc) != FirstNode);
            do {
                N->Mark = N;
                if (!N->SubproblemSuc->Mark &&
                    (N->Subproblem != CurrentSubproblem ||
                     N->SubproblemSuc->Subproblem != CurrentSubproblem))
                    N->BestSuc = N->SubproblemSuc;
                else if (!N->SubproblemPred->Mark &&
                         (N->Subproblem != CurrentSubproblem ||
                          N->SubproblemPred->Subproblem !=
                          CurrentSubproblem))
                    N->BestSuc = N->SubproblemPred;
                else if (!N->Suc->Mark)
                    N->BestSuc = N->Suc;
                else if (!N->Pred->Mark)
                    N->BestSuc = N->Pred;
                else
                    N->BestSuc = FirstNode;
            }
            while ((N = N->BestSuc) != FirstNode);
            Dimension = DimensionSaved;
            i = 0;
            do {
                if (ProblemType != ATSP)
                    BetterTour[++i] = N->Id;
                else if (N->Id <= Dimension / 2) {
                    i++;
                    if (N->BestSuc->Id != N->Id + Dimension / 2)
                        BetterTour[i] = N->Id;
                    else
                        BetterTour[Dimension / 2 - i + 1] = N->Id;
                }
            }
            while ((N = N->BestSuc) != FirstNode);
            BetterTour[0] =
                BetterTour[ProblemType !=
                           ATSP ? Dimension : Dimension / 2];
            WriteTour(OutputTourFileName, BetterTour, GlobalCost);
            if (Improvement > 0) {
                do
                    if (N->Subproblem != CurrentSubproblem)
                        break;
                while ((N = N->SubproblemPred) != FirstNode);
                if (N->SubproblemSuc == N->BestSuc) {
                    N = FirstNode;
                    do {
                        N->BestSuc->SubproblemPred = N;
                        N = N->SubproblemSuc = N->BestSuc;
                    }
                    while (N != FirstNode);
                } else {
                    N = FirstNode;
                    do
                        (N->SubproblemPred = N->BestSuc)->SubproblemSuc =
                            N;
                    while ((N = N->BestSuc) != FirstNode);
                }
                RecordBestTour();
                WriteTour(TourFileName, BestTour, GlobalCost);
            }
            Dimension = NewDimension;
            if (TraceLevel >= 1) {
                printff("*** %d: Cost = " GainFormat, Number, GlobalCost);
                if (OptimumSaved != MINUS_INFINITY && OptimumSaved != 0)
                    printff(", Gap = %04f%%",
                            100.0 * (GlobalCost -
                                     OptimumSaved) / OptimumSaved);
                printff(", Time = %0.2f sec. %s\n",
                        fabs(GetTime() - LastTime),
                        GlobalCost < OptimumSaved ? "<" : GlobalCost ==
                        OptimumSaved ? "=" : "");
            }
        }

        Time = fabs(GetTime() - LastTime);
        UpdateStatistics(Cost, Time);
        if (TraceLevel >= 1 && Cost != PLUS_INFINITY)
            printff("Run %d: Cost = " GainFormat ", Time = %0.2f sec.\n\n",
                    Run, Cost, Time);
        if (PopulationSize >= 2 &&
            (PopulationSize == MaxPopulationSize
             || Run >= 2 * MaxPopulationSize) && Run < Runs) {
            Node *N;
            int Parent1, Parent2;
            Parent1 = LinearSelection(PopulationSize, 1.25);
            do
                Parent2 = LinearSelection(PopulationSize, 1.25);
            while (Parent1 == Parent2);
            ApplyCrossover(Parent1, Parent2);
            N = FirstNode;
            do {
                int d = C(N, N->Suc);
                AddCandidate(N, N->Suc, d, INT_MAX);
                AddCandidate(N->Suc, N, d, INT_MAX);
                N = N->InitialSuc = N->Suc;
            }
            while (N != FirstNode);
        }
        SRandom(++Seed);
        if (Norm == 0)
            break;
    }

    if (TraceLevel >= 1)
        PrintStatistics();

    if (C == C_EXPLICIT) {
        N = FirstNode;
        do {
            for (i = 1; i < N->Id; i++) {
                N->C[i] -= N->Pi + NodeSet[i].Pi;
                N->C[i] /= Precision;
            }
            if (N->FixedTo1 && N->FixedTo1 != N->FixedTo1Saved) {
                if (N->Id > N->FixedTo1->Id)
                    N->C[N->FixedTo1->Id] = Distance(N, N->FixedTo1);
                else
                    N->FixedTo1->C[N->Id] = Distance(N, N->FixedTo1);
            }
            if (N->FixedTo2 && N->FixedTo2 != N->FixedTo2Saved) {
                if (N->Id > N->FixedTo2->Id)
                    N->C[N->FixedTo2->Id] = Distance(N, N->FixedTo2);
                else
                    N->FixedTo2->C[N->Id] = Distance(N, N->FixedTo2);
            }
        }
        while ((N = N->Suc) != FirstNode);
    }

    FreeSegments();
    FreeCandidateSets();
    FreePopulation();
    if (InitialTourEdges == Dimension) {
        do
            N->InitialSuc = N->SubproblemSuc;
        while ((N = N->SubproblemSuc) != FirstNode);
    } else {
        do
            N->InitialSuc = 0;
        while ((N = N->SubproblemSuc) != FirstNode);
    }
    Dimension = ProblemType != ATSP ? DimensionSaved : 2 * DimensionSaved;
    N = FirstNode = FirstNodeSaved;
    do {
        N->Suc = N->BestSuc = N->SubproblemSuc;
        N->Suc->Pred = N;
        Next = N->FixedTo1;
        N->FixedTo1 = N->FixedTo1Saved;
        N->FixedTo1Saved = Next;
        Next = N->FixedTo2;
        N->FixedTo2 = N->FixedTo2Saved;
        N->FixedTo2Saved = Next;
    }
    while ((N = N->Suc) != FirstNode);
    Optimum = OptimumSaved;
    Excess = ExcessSaved;
    AscentCandidates = AscentCandidatesSaved;
    InitialPeriod = InitialPeriodSaved;
    MaxTrials = MaxTrialsSaved;
    return 1;
}
예제 #14
0
/*
 *  ======== DSPNode_Delete ========
 *  Purpose:
 *      Delete all DSP-side and GPP-side resources for the node.
 */
DBAPI DSPNode_Delete(DSP_HNODE hNode)
{
	DSP_STATUS status = DSP_SOK;
	Trapped_Args tempStruct;
	BYTE *pVirtBase = NULL;
	struct DSP_BUFFERATTR bufAttr;
	struct CMM_OBJECT *hCmm;		/* shared memory mngr handle */
	struct CMM_INFO pInfo;		/* Used for virtual space allocation */
	DSP_NODETYPE nodeType;
	struct DSP_NODEATTR    nodeAttr;
#ifdef DEBUG_BRIDGE_PERF
	struct timeval tv_beg;
	struct timeval tv_end;
	struct timezone tz;
	int timeRetVal = 0;

	timeRetVal = getTimeStamp(&tv_beg);
#endif

	DEBUGMSG(DSPAPI_ZONE_FUNCTION, (TEXT("NODE: DSPNode_Delete:\r\n")));
	if (!hNode) {
		/* Invalid pointer */
		status = DSP_EHANDLE;
		DEBUGMSG(DSPAPI_ZONE_ERROR, (TEXT("NODE: DSPNode_Delete: "
					"hNode is Invalid Handle\r\n")));
		return status;
	}
	/* Get segment size.
	 >0 is SM segment. Get default SM Mgr*/
	tempStruct.ARGS_CMM_GETHANDLE.hProcessor = NULL;
	tempStruct.ARGS_CMM_GETHANDLE.phCmmMgr = &hCmm;
	status = DSPTRAP_Trap(&tempStruct, CMD_CMM_GETHANDLE_OFFSET);
	if (DSP_SUCCEEDED(status)) {
		/* Get SM segment info from CMM */
		tempStruct.ARGS_CMM_GETINFO.hCmmMgr = hCmm;
		tempStruct.ARGS_CMM_GETINFO.pCmmInfo = &pInfo;
		status = DSPTRAP_Trap(&tempStruct, CMD_CMM_GETINFO_OFFSET);
		if (DSP_FAILED(status)) {
			status = DSP_EFAIL;
			DEBUGMSG(DSPAPI_ZONE_ERROR,
				(TEXT("NODE: DSPNode_Delete:"
					" Failed to get SM segment\r\n")));
		} else
			status = DSP_SOK;

	} else {
		status = DSP_EFAIL;
		DEBUGMSG(DSPAPI_ZONE_ERROR, (TEXT("NODE: DSPNode_Delete: "
					"Failed to CMM handle\r\n")));
	}
	if (!DSP_SUCCEEDED(status)) {
		status = DSP_EBADSEGID;	/* no SM segments*/
		return status;
	}
    status = DSPNode_GetAttr(hNode, &nodeAttr, sizeof(nodeAttr));
	GetNodeType(hNode, &nodeType);
	if (nodeType != NODE_DEVICE) {
		/*segInfo index starts at 0.These checks may not be required*/
		if ((pInfo.segInfo[0].dwSegBasePa != 0) &&
		    (pInfo.segInfo[0].ulTotalSegSize) > 0) {
			/* get node translator's virtual address range
			   so we can free it */
			bufAttr.uAlignment = 0;
			bufAttr.uSegment = 1 | MEMRY_GETVIRTUALSEGID;
			DSPNode_AllocMsgBuf(hNode, 1, &bufAttr, &pVirtBase);
			/* Free virtual space */
			if (!pVirtBase)
				goto loop_end;

			if (munmap(pVirtBase,
					pInfo.segInfo[0].ulTotalSegSize)) {
				status = DSP_EFAIL;
			}
		}
	}
loop_end:
	if (DSP_SUCCEEDED(status)) {
		/* Set up the structure Call DSP Trap */
		tempStruct.ARGS_NODE_DELETE.hNode = hNode;
		status = DSPTRAP_Trap(&tempStruct, CMD_NODE_DELETE_OFFSET);
		/* Free any node heap memory */
		if (nodeAttr.inNodeAttrIn.pGPPVirtAddr) {
			DEBUGMSG(DSPAPI_ZONE_FUNCTION, (TEXT("DSPNodeDelete:"
					"Freeing Node heap addr \n")));
			free(nodeAttr.inNodeAttrIn.pGPPVirtAddr);
		}
	}
#ifdef DEBUG_BRIDGE_PERF
	timeRetVal = getTimeStamp(&tv_end);
	PrintStatistics(&tv_beg, &tv_end, "DSPNode_Delete", 0);
#endif

	return status;
}
예제 #15
0
int main(int argc, char *argv[])
{
    GainType Cost;
    double Time, LastTime = GetTime();

    /* Read the specification of the problem */
    if (argc >= 2)
        ParameterFileName = argv[1];
    ReadParameters();
    MaxMatrixDimension = 10000;
    ReadProblem();

    if (SubproblemSize > 0) {
        if (DelaunayPartitioning)
            SolveDelaunaySubproblems();
        else if (KarpPartitioning)
            SolveKarpSubproblems();
        else if (KCenterPartitioning)
            SolveKCenterSubproblems();
        else if (KMeansPartitioning)
            SolveKMeansSubproblems();
        else if (RohePartitioning)
            SolveRoheSubproblems();
        else if (MoorePartitioning || SierpinskiPartitioning)
            SolveSFCSubproblems();
        else
            SolveTourSegmentSubproblems();
        return EXIT_SUCCESS;
    }
    AllocateStructures();
    CreateCandidateSet();
    InitializeStatistics();

    if (Norm != 0)
        BestCost = PLUS_INFINITY;
    else {
        /* The ascent has solved the problem! */
        Optimum = BestCost = (GainType) LowerBound;
        UpdateStatistics(Optimum, GetTime() - LastTime);
        RecordBetterTour();
        RecordBestTour();
        WriteTour(OutputTourFileName, BestTour, BestCost);
        WriteTour(TourFileName, BestTour, BestCost);
        Runs = 0;
    }

    /* Find a specified number (Runs) of local optima */
    for (Run = 1; Run <= Runs; Run++) {
        LastTime = GetTime();
        Cost = FindTour();      /* using the Lin-Kernighan heuristic */
        if (MaxPopulationSize > 1) {
            /* Genetic algorithm */
            int i;
            for (i = 0; i < PopulationSize; i++) {
                GainType OldCost = Cost;
                Cost = MergeTourWithIndividual(i);
                if (TraceLevel >= 1 && Cost < OldCost) {
                    printff("  Merged with %d: Cost = " GainFormat, i + 1,
                            Cost);
                    if (Optimum != MINUS_INFINITY && Optimum != 0)
                        printff(", Gap = %0.4f%%",
                                100.0 * (Cost - Optimum) / Optimum);
                    printff("\n");
                }
            }
            if (!HasFitness(Cost)) {
                if (PopulationSize < MaxPopulationSize) {
                    AddToPopulation(Cost);
                    if (TraceLevel >= 1)
                        PrintPopulation();
                } else if (Cost < Fitness[PopulationSize - 1]) {
                    i = ReplacementIndividual(Cost);
                    ReplaceIndividualWithTour(i, Cost);
                    if (TraceLevel >= 1)
                        PrintPopulation();
                }
            }
        } else if (Run > 1)
            Cost = MergeBetterTourWithBestTour();
        if (Cost < BestCost) {
            BestCost = Cost;
            RecordBetterTour();
            RecordBestTour();
            WriteTour(OutputTourFileName, BestTour, BestCost);
            WriteTour(TourFileName, BestTour, BestCost);
        }
        if (Cost < Optimum) {
            if (FirstNode->InputSuc) {
                Node *N = FirstNode;
                while ((N = N->InputSuc = N->Suc) != FirstNode);
            }
            Optimum = Cost;
            printff("*** New optimum = " GainFormat " ***\n\n", Optimum);
        }
        Time = fabs(GetTime() - LastTime);
        UpdateStatistics(Cost, Time);
        if (TraceLevel >= 1 && Cost != PLUS_INFINITY) {
            printff("Run %d: Cost = " GainFormat, Run, Cost);
            if (Optimum != MINUS_INFINITY && Optimum != 0)
                printff(", Gap = %0.4f%%",
                        100.0 * (Cost - Optimum) / Optimum);
            printff(", Time = %0.2f sec. %s\n\n", Time,
                    Cost < Optimum ? "<" : Cost == Optimum ? "=" : "");
        }
        if (PopulationSize >= 2 &&
                (PopulationSize == MaxPopulationSize ||
                 Run >= 2 * MaxPopulationSize) && Run < Runs) {
            Node *N;
            int Parent1, Parent2;
            Parent1 = LinearSelection(PopulationSize, 1.25);
            do
                Parent2 = LinearSelection(PopulationSize, 1.25);
            while (Parent2 == Parent1);
            ApplyCrossover(Parent1, Parent2);
            N = FirstNode;
            do {
                int d = C(N, N->Suc);
                AddCandidate(N, N->Suc, d, INT_MAX);
                AddCandidate(N->Suc, N, d, INT_MAX);
                N = N->InitialSuc = N->Suc;
            }
            while (N != FirstNode);
        }
        SRandom(++Seed);
    }
    PrintStatistics();
    return EXIT_SUCCESS;
}
예제 #16
0
int main(int argc, char *argv[])
{
    FILE *stardata;
    FILE *planetdata;
    FILE *sectordata;
    FILE *outputtxt = NULL;
    char str[200];
    int c;
    int i;
    int star;
    /*
     * int x;
     */
    /*
     * double att;
     */
    double xspeed[NUMSTARS];
    double yspeed[NUMSTARS];

    /*
     * Empty stars
     */
    int nempty;

    /*
     * How many rows and columns are needed
     */
    int rowcolumns;

    /*
     * Non-empty stars not placed
     */
    int starsleft;

    /*
     * How many planetless systems is in each square
     */
    int emptyrounds;

    /*
     * Size of square
     */
    double displacement;

    /*
     * How many wormholes
     */
    int whcnt;
    int wormholes = -1;
    int wormidx;
    struct w_holes w_holes[NUMSTARS + 1];
    int x;
    int y;
    int z;
    int squaresleft;
    int total;
    int flag = 0;
    struct power power[MAXPLAYERS];
    struct block block[MAXPLAYERS];

    /*
     * Initialize
     */
    /*
     * srandom(getpid());
     */
    Bzero(Sdata);

    /*
     * Read the arguments for values
     */
    for (i = 1; i < argc; ++i) {
        if (argv[i][0] != '-') {
            printf("\n");
            printf("Usage: makeuniv [-a] [-b] [-d] [-e E] [-l MIN] [-m MAX] ");
            printf("[-s N] [-v] [-w C] [-x]\n");
            printf("  -a      Autoload star names.\n");
            printf("  -b      Autoload planet names.\n");
            printf("  -d      Use smashup (asteroid impact routines.\n");
            printf("  -e E    Make E%% of stars have no planets.\n");
            printf("  -l MIN  Other systems will have at least MIN planets.\n");
            printf("  -m MAX  Other systems will have at most MAX planets.\n");
            printf("  -p      Create postscript map file of the univese.\n");
            printf("  -s N    The univers will have N stars.\n");
            printf("  -v      Do no print info and map of planets generated.\n");
            printf("  -w C    The universe will have C wormholes.\n");
            printf("  -x      Do not print info on stars generated.\n");
            printf("\n");

            return 0;
        } else {
            switch (argv[i][1]) {
            case 'a':
                autoname_star = 1;

                break;
            case 'b':
                autoname_plan = 1;

                break;
            case 'd':
                use_smashup = 1;

                break;
            case 'e':
                ++i;
                planetlesschance = atoi(argv[i]);

                break;
            case 'l':
                ++i;
                minplanets = atoi(argv[i]);

                break;
            case 'm':
                ++i;
                maxplanets = atoi(argv[i]);

                break;
            case 'p':
                printpostscript = 1;

                break;
            case 's':
                ++i;
                nstars = atoi(argv[i]);

                break;
            case 'v':
                printplaninfo = 0;

                break;
            case 'x':
                printstarinfo = 0;

                break;
            case 'w':
                ++i;
                wormholes = atoi(argv[i]);

                break;
            default:
                printf("\n");
                printf("Unknown option \"%s\".\n", argv[i]);
                printf("\n");
                printf("Usage: makeuniv [-a] [-b] [-e E] [-l MIN] [-m MAX] ");
                printf("[-s N] [-v] [-w C] [-x]\n");
                printf("  -a      Autoload star names.\n");
                printf("  -b      Autoload planetnames.\n");
                printf("  -d      Use smashup (asteroid impact) routines.\n");
                printf("  -e E    Make E%% of stars have no planets.\n");
                printf("  -l MIN  Other systems will have at least MIN planets.\n");
                printf("  -m MAX  Other systems will have at most MAX planets.\n");
                printf("  -p      Create postscript map file of the universe.\n");
                printf("  -s N    The universe will have N stars.\n");
                printf("  -v      Do not print info and map of planets generated.\n");
                printf("  -w C    The universe will have C wormholes.\n");
                printf("  -x      Do not print info on stars generated.\n");
                printf("\n");

                return 0;
            }
        }
    }

    /*
     * Get values for all the switches that still don't have good values.
     */
    if (autoname_star == -1) {
        printf("\nDo you wish to use the file \"%s\" for star names? [y/n]> ",
               STARLIST);

        c = getchr();

        if (c != '\n') {
            getchr();
        }

        autoname_star = (c == 'y');
    }

    if (autoname_plan == -1) {
        printf("\nDo you wish to use the file \"%s\" for planet names? [y/n]> ",
               PLANETLIST);

        c = getchr();

        if (c != '\n') {
            getchr();
        }

        autoname_plan = (c == 'y');
    }

    if (use_smashup == -1) {
        printf("\nUse the smashup (asteroid impact) routines? [y/n]> ");
        c = getchr();

        if (c != '\n') {
            getchr();
        }

        use_smashup = (c == 'y');
    }

    while ((nstars < 1) || (nstars >= NUMSTARS)) {
        printf("Number of stars [1-%d]:", NUMSTARS - 1);
        scanf("%d", &nstars);
    }

    while ((planetlesschance < 0) || (planetlesschance > 100)) {
        printf("Percentage of empty systems [0-100]:");
        scanf("%d", &planetlesschance);
    }

    while ((minplanets <= 0) || (minplanets > absmaxplan)) {
        printf("Minimum number of planets per system [1-%d]:", absmaxplan);
        scanf("%d", &maxplanets);
    }

    while ((wormholes < 0) || (wormholes > nstars) || ((wormholes % 2) == 1)) {
        printf("Number of wormholes (muse be even number) [0-%d]:", nstars);
        scanf("%d", &wormholes);
    }

    Makeplanet_init();
    Makestar_init();
    Sdata.numstars = nstars;
    sprintf(str, "/bin/mkdir -p %s", DATADIR);
    system(str);
    planetdata = fopen(PLANETDATAFL, "w+");

    if (planetdata == NULL) {
        printf("Unable to open planet data file \"%s\"\n", PLANETDATAFL);

        return -1;
    }

    sectordata = fopen(SECTORDATAFL, "w+");

    if (sectordata == NULL) {
        printf("Unable to open sector data file \"%s\"\n", SECTORDATAFL);

        return -1;
    }

    if (printstarinfo || printplaninfo) {
        outputtxt = fopen(OUTPUTFILE, "w+");

        if (outputtxt == NULL) {
            printf("Unable to open \"%s\" for output.\n", OUTPUTFILE);

            return -1;
        }
    }

    if (!wormholes) {
        whcnt = 0;
    } else {
        whcnt (int)(nstars / wormholes) - 1;
    }

    wormidx = 0;

    for (star = 0; star < nstars; ++star) {
        Stars[star] = Makestar(planetdata, sectordata, outputtxt);
        xspeed[star] = 0;
        yspeed[star] = 0;
        Stars[star]->wh_has_wormhole = 0;
        Stars[star]->wh_dest_starnum = -1;
        Stars[star]->wh_stability = 0;

        /*
         * See if time to put a wormhole in
         */
        if (!whcnt) {
            /*
             * Make a wormhole here. This adds a wormhole planet to this star.
             */
            if (Stars[star]->numplanets == MAXPLANETS) {
                /*
                 * Skip until a star as < MAXPLANETS
                 */
                whcnt = 0;

                continue;
            } else {
                if (!wormholes) {
                    whcnt = 0;
                } else {
                    whcnt = (int)(nstars / wormholes) - 1;
                }

                make_wormhole(Stars[star], planetdata, sectordata, outputtxt);
                w_holes[wormidx].star = Stars[star];
                w_hoels[wormidx].num = star;
                ++wormidx;
            }
        }

        --whcnt;
    }

    /*
     * Data data files to group * readwrite
     */
    chmod(PLANETDATAFL, 00660);
    fclose(planetdata);
    chmod(SECTORDATAFL, 00660);
    fclose(sectordata);

    /*
     * New Gardan code 21.12.1996
     * Changed 27.8.1997: Displacement wasn't set before
     *
     * Start here
     */
    total = nstars;
    nempty = round_rand(((double)nstars * (double)planetlesschance) / 100);

    /*
     * Amount of non-empty stars
     */
    nstars -= nempty;
    rowcolumns = 0;

    while ((rowcolumns * rowcolumns) < (nstars / 2)) {
        ++rowcolumns;
    }

    /*
     * Unhandled squares
     */
    squaresleft = rowcolumns * rowcolumns;
    starsleft = nstars - squaresleft;
    emptyrounds = 0;

    while (nempty > squaresleft) {
        ++emptyrounds;
        nempty -= squaresleft;
    }

    displacement = UNIVSIZE / rowcolumns;

    /*
     * Size of square
     */
    for (x = 0; x < rowcolumns; ++x) {
        for (y = 0; y < rowcolumns; ++y) {
            /*
             * planetlesschance = 0;
             * Stars[starindex] = Makestar(planetdata, sectordata, outputtxt);
             * xspeed[starindex] = 0;
             * yspeed[starindex] = 0;
             */
            Stars[starindex]->xpos = displacement * (x + (1.0 * double_rand()));
            Stars[starindex]->ypos = displacement * (y + (1.0 * double_rand()));
            ++starindex;
            z = int_rand(1, squaresleft);

            /*
             * If there is system with planet
             */
            if (z <= starsleft) {
                /*
                 * Stars[starindex] =
                 *     Makestar(planetdata, sectordata, outputtxt);
                 * xspeed[starindex] = 0;
                 * yspeec[starindex] = 0;
                 */
                Stars[starindex]->xpos =
                    displacement * (x + (1.0 * double_rand()));

                Stars[starindex]->ypos =
                    displacement * (y + (1.0 * double_rand()));
                --starsleft;
                ++starindex;
            }

            /*
             * If there is planetless system
             */
            if (x <= nempty) {
                /*
                 * planetlesschance = 100;
                 * Stars[starindex] =
                 *     Makestar(planetdata, sectordata, outputtxt);
                 * xspeed[starindex] = 0;
                 * yspeed[starindex] = 0;
                 */
                Stars[starindex]->xpos =
                    displacement * (x + (1.0 * double_rand()));

                Stars[starindex]->ypos =
                    displacement * (y + (1.0 * double_rand()));

                /*
                 * sprintf(Stars[starindex]->name, "E%d_%d", x, y);
                 */

                /*
                 * Added -mfw
                 */
                strcpy(Stars[starindex]->name, NextStarName());
                --nempty;
                ++starindex;
            }

            /*
             * Planetless systems equal to all squares
             */
            for (z = 0; z < emptyrounds; ++z) {
                /*
                 * planetlesschance = 100;
                 * Stars[starindex] =
                 *     Makestar(planetdata, sectordata, outputtxt);
                 * xspeed[starindex] = 0;
                 * yspeed[starindex] = 0;
                 */
                Stars[starindex]->xpos =
                    displacement * (x + (1.0 * double_rand()));

                Stars[starindex]->ypos =
                    displacement * (y + (1.0 * double_rand()));

                /*
                 * sprintf(Stars[starindex]->name, "E%d_%d", x, y);
                 */

                /*
                 * Added -mfw
                 */
                strcpy(Stars[starindex]->name, NextStarName());
                ++starindex;
            }

            --squaresleft;
        }
    }

    /*
     * Checks if two stars are too close
     */
    z = 1;

    while (z) {
        z = 0;

        for (x = 2; x < total; ++x) {
            for (y = x + 1; y < total; ++y) {
                dist = sqrt(Distsq(Stars[x]->xpos,
                                   Stars[x]->ypos,
                                   Stars[x]->xpos,
                                   Stars[x]->ypos));

                if (dist < (4 * SYSTEMSIZE)) {
                    z = 1;

                    if (stars[x]->ypos > Stars[y]->ypos) {
                        Stars[x]->ypos += (4 * SYSTEMSIZE);
                    } else {
                        Stars[y]->ypos += (4 * SYSTEMSIZE);
                    }
                }
            }
        }
    }

    for (x = 0; x < starindex; ++x) {
        if (Stars[x]->xpos > UNIVSIZE) {
            Stars[x]->xpos -= UNIVSIZE;
        }

        if (Stars[x]->ypos > UNIVSIZE) {
            Stars[x]->ypos -= UNIVSIZE;
        }
    }

    /*
     * End Gardan code
     */

    /*
     * Calculate worm hole destinations
     */
    for (x = 1; x < wormidx; x += 2) {
        w_holes[x].star->wh_dest_starnum = w_holes[x - 1].num;
        w_holes[x - 1].star->wh_dest_starnum = w_holes[x].num;

        if (printstarinfo) {
            fprintf(outputtxt,
                    "Wormhole[%d], Dest: %d, Star: %d %s, Stab: %d\n",
                    x - 1,
                    w_holes[x - 1].star->wh_test_starnum,
                    w_holes[x - 1].num,
                    w_holes[x - 1].star->name,
                    w_holes[x - 1].star->wh_stability);
        }

        if (printfstarinfo) {
            fprintf(outputtxt,
                    "Wormhole[%d], Dest: %d, Star: %d %s, Stab: %d\n",
                    x,
                    w_holes[x].star->wh_dest_starnum,
                    w_holes[x].num,
                    w_holes[x].star->name,
                    w_holes[x].star->wh_stability);
        }
    }

    if (((double)wormidx / 2) != ((int)wormidx / 2)) {
        /*
         * Odd number so last w_hole points to #1 no return
         */
        w_holes[wormidx - 1].star->wh_dest_starnum = w_holes[0].num;

        if (printstarinfo) {
            fprintf(outputtxt,
                    "Wormhole[%d], Dest: %d, Star: %d %s, Stab: %d\n",
                    wormidx - 1,
                    w_holes[wormidx - 1].star->wh_dest_starnum,
                    w_holes[wormidx - 1].num,
                    w_holes[wormidx - 1].star->name,
                    w_holes[wormidx - 1].star->wh_stability);
        }
    }

    if (printstarinfo) {
        fprintf(outputtxt, "Total Wormholes: %d\n", wormidx);
    }

#if 0
    /*
     * Old code starts
     */

    /*
     * Try to more evenly space stars. Essentially this is an inverse-gravity
     * calculation: The nearer two stars are to each other, the more they
     * repulse each other. Several iterations of this will suffice to move all
     * of the stars nicely apart.
     */

    CLUSTER_COUNTER = 6;
    STAR_COUNTER = 1;
    dist = CLUSTER_FROM_CENTER;

    for (its = 1; its <= 6; ++its) {
        /*
         * Circle of stars
         */
        fprintf(outputtxt, "Grouping [%f]", dist);

        for (clusters = 1; clusters <= CLUSTER_COUNTER; ++clusters) {
            /*
             * Number of clusters in circle
             */
            for (starsinclus = 1; starsinclus <= STAR_COUNTER; ++starsinclus) {
                /*
                 * Number of stars in cluster
                 */
                ange = 2.0 * M_PI * ANGLE;
                cluster_delta_x = int_rand(CLUSTER_STAR_MIN, CLUSTER_STAR_MAX);
                cluster_delta_y = int_rand(CLUSTER_STAR_MIN, CLUSTER_STAR_MAX);
                clusterx = dist * sin(angle);
                clustery = dist * cos(angle);

                if (starindex >= Sdatanumstars) {
                    flag = 1;

                    break;
                }

                fprintf(outputtxt, " %s ", Stars[starindex]->name);

                if ((its == 1) || (its == 3) || (its == 6)) {
                    setbit(Stars[starindex]->explored, 1);
                    setbit(Stars[starindex]->inhabited, 1);
                }

                Stars[starindex]->xpos = clusterx + cluster_delta_x;
                Stars[starindex]->ypos = clustery + cluster_delta_y;

                ANGLE = (ANGLE + 0.15) + double_rand();
                fprintf(outputtxt, "ANGLE 1 %f\n", ANGLE);
                ++starindex;
            }
        }

        if (flag) {
            break;
        }

        switch (its + 1) {
        case 2:
            ANGLE = 0.20 + double_rand();
            CLUSTER_COUNTER = 10;
            dist += 25000;

            break;
        case 3:
            ANGLE = 0.35 + double_rand();
            CLUSTER_COUNTER = 13;
            dist += 27000;

            break;
        case 4:
            ANGLE = 0.40 + double_rand();
            CLUSTER_COUNTER = 15;
            dist += 27000;

            break;
        case 5:
            ANGLE = 0.25 + double_rand();
            CLUSTER_COUNTER = 17;
            dist += 32000;

            break;
        case 6:
            ANGLE = 0.12 + double_rand();
            CLUSTER_COUNTER = 17;
            dist += 32000;

            break;
        }

        fprintf(outputtxt, "\n\n");
        fprintf(outputtxt, "ANGLE 2 %f\n", ANGLE);
    }

    Stars[0]->xpos = 0;
    Stars[0]->ypos = 0;
    strcpy(Stars[0]->name, "Bambi");
#endif

    stardata = fopen(STARDATAFL, "w+");

    if (stardata == NULL) {
        printf("Unable to open star data file \"%s\"\n", STARDATAFL);

        return 01;
    }

    fwrite(&Sdata, sizeof(Sdata), 1, stardata);

    for (star = 0; star < Sdata.numstars; ++star) {
        fwrite(Stars[star], sizeof(startype), 1, stardata);
    }

    chmod(STARDATAFL, 00660);
    fclose(stardata);

    EmptyFile(SHIPDATAFL);
    EmptyFile(SHIPFREEDATAFL);
    EmptyFile(COMMODDATAFL);
    EmptyFile(COMMODFREEDATAFL);
    EmptyFile(PLAYERDATAFL);
    EmptyFile(RACEDATAFL);

    memset((char *)power, 0, sizeof(power));
    InitFile(POWFL, power, sizeof(power));

    memset((char *)block, 0, sizeof(block));
    Initfile(BLOCKDATAFL, block, sizeof(block));

    /*
     * Telegram files: directory and a file for each player.
     */
    sprintf(str, "/bin/mkdir -p %s", MSGDIR);
    system(str);
    chmod(MSGDIR, 00770);

#if 0
    /*
     * Why is this not needed anymore?
     */
    for (i = 1; i < MAXPLAYERS; ++i) {
        sprintf(str, "%s.%d", TELEGRAMFL, i);
        Empyfile(str);
    }
#endif

    /*
     * News files: directory and the 4 types of news.
     */
    sprintf(str, "/bin/mkdir -p %s", NEWSDIR);
    system(str);
    chmod(NEWSDIR, 00770);
    EmptyFile(DECLARATIONFL);
    EmptyFile(TRANSFERFL);
    EmptyFile(COMBATFL);
    EmptyFile(ANNOUNCEFL);

    if (printstarinfo) {
        PrintStatistics(outputtxt);
    }

    if (printpostscript) {
        produce_postscript(DEFAULT_POSTSCRIPT_MAP_FILENAME);
    }

    printf("Universe Created!\n");

    if (printstarinfo || printplaninfo) {
        printf("Summary output written to %s\n", OUTPUTFILE);
        fclose(outputtxt);
    }

    return 0;
}
예제 #17
0
파일: main.c 프로젝트: ricardoferro390/ADRC
void MenuHandler(){
	short nArgs;
	int selectedNode;
	node * network = NULL;
	statistics * stats = NULL;
	char option, buffer[BUFFER_SIZE], arg1[BUFFER_SIZE], garbageDetector[BUFFER_SIZE];
	
	if(executeAtStart){
		network = ReadNetwork(path);
		if(network != NULL){
			stats = GetStatistics(network);
			PrintStatistics(stats);
			PrintExecutionTime();
			FreeEverything(network, stats);
			return;
		}
		else return;
	}
	
	// printing menu
	PrintMenu();
	printf("Please select an option\n");
	
	while(TRUE){
		// gets user commands and arguments
		printf("-> ");
		fgets (buffer, BUFFER_SIZE, stdin);
		nArgs = sscanf(buffer, "%c %s %s", &option, arg1, garbageDetector);
		
		// selects function based on user option
		////////// load
		if(option == 'f' && nArgs == 2){
			if(network != NULL) free(network);
			network = ReadNetwork(arg1);
			if(network != NULL) printf("Network loaded from file\n");
		}
		////////// route
		else if(option == 'r' && nArgs == 2) {
			if(network != NULL){
				selectedNode = atoi(arg1);
				if(selectedNode <= numberOfNodes && selectedNode > 0){
					results = FindRoutesToNode(network, selectedNode);
					PrintRoutingTable(network);
				}
				else printf("Please select valid node\n");
			}
			else printf("Please load a network first\n");
		}
		///////// statistics
		else if(option == 's' && nArgs == 1){
			if(network != NULL){
				stats = GetStatistics(network);
				PrintStatistics(stats);
				PrintExecutionTime();
			}
			else printf("Please load a network first\n");
		} 
		//////// others
		else if(option == 'h' && nArgs == 1) PrintMenu();
		else if(option == 'q' && nArgs == 1){
			FreeEverything(network, stats);
			printf("Project by: Diogo Salgueiro 72777 and Ricardo Ferro 72870\n");
			printf("Goodbye!\n");
			return;
		}
		else printf("Invalid command\n");
		
	}
};
예제 #18
0
void CMosaikAligner::AlignReadArchiveLowMemory(void) {

	// ==============
	// initialization
	// ==============

	// retrieve the concatenated reference sequence length
	// vector<ReferenceSequence> referenceSequences;

	MosaikReadFormat::CReferenceSequenceReader refseq;
	refseq.Open(mSettings.ReferenceFilename);
	refseq.GetReferenceSequences(referenceSequences);
	mReferenceLength = refseq.GetReferenceSequenceLength();
	const unsigned int numRefSeqs = refseq.GetNumReferenceSequences();
	refseq.Close();

	// retrieve the basespace reference filenames
	//char** pBsRefSeqs = NULL;
	if(mFlags.EnableColorspace) {
		
		MosaikReadFormat::CReferenceSequenceReader bsRefSeq;
		bsRefSeq.Open(mSettings.BasespaceReferenceFilename);

		if(!bsRefSeq.HasSameReferenceSequences(referenceSequences)) {
			printf("ERROR: The basespace and colorspace reference sequence archives do not seem to represent the same FASTA file.\n"); 
			exit(1);
		}

		bsRefSeq.Close();
	}

	// initialize our hash tables
	//InitializeHashTables(CalculateHashTableSize(mReferenceLength, mSettings.HashSize));

	// hash the concatenated reference sequence
	//if(!mFlags.IsUsingJumpDB) {
	//	InitializeHashTables(CalculateHashTableSize(mReferenceLength, mSettings.HashSize), 0, 0, 0);
	//	HashReferenceSequence(refseq);
	//}

	//cout << "- loading reference sequence... ";
	//cout.flush();
	//refseq.LoadConcatenatedSequence(mReference);
	//cout << "finished." << endl;

	// create our reference sequence LUTs
	//unsigned int* pRefBegin = new unsigned int[numRefSeqs];
	//unsigned int* pRefEnd   = new unsigned int[numRefSeqs];
	//
	//for(unsigned int j = 0; j < numRefSeqs; j++) {
	//	pRefBegin[j] = referenceSequences[j].Begin;
	//	pRefEnd[j]   = referenceSequences[j].End;
	//}

	string inputReadArchiveFilename  = mSettings.InputReadArchiveFilename;
	

	if ( !mFlags.UseLowMemory ) {
		
		// prepare BS reference sequence for SOLiD data
		char** pBsRefSeqs = NULL;
		if(mFlags.EnableColorspace) {

			cout << "- loading basespace reference sequences... ";
			cout.flush();

			MosaikReadFormat::CReferenceSequenceReader bsRefSeq;
			bsRefSeq.Open(mSettings.BasespaceReferenceFilename);


			bsRefSeq.CopyReferenceSequences(pBsRefSeqs);
			bsRefSeq.Close();

			cout << "finished." << endl;
		}

		// prepare reference sequence
		refseq.Open(mSettings.ReferenceFilename);
		cout << "- loading reference sequence... ";
		cout.flush();
		refseq.LoadConcatenatedSequence(mReference);
		cout << "finished." << endl;
		refseq.Close();
		
		unsigned int* pRefBegin = new unsigned int[numRefSeqs];
		unsigned int* pRefEnd   = new unsigned int[numRefSeqs];
		for(unsigned int j = 0; j < numRefSeqs; j++) {
			pRefBegin[j] = referenceSequences[j].Begin;
			pRefEnd[j]   = referenceSequences[j].End;
		}
		
		// initialize our hash tables
		if(!mFlags.IsUsingJumpDB) {
			InitializeHashTables(CalculateHashTableSize(mReferenceLength, mSettings.HashSize), 0, 0, 0, mFlags.UseLowMemory, 0);
			HashReferenceSequence(refseq);
		}
		else {
			InitializeHashTables(CalculateHashTableSize(mReferenceLength, mSettings.HashSize), pRefBegin[0], pRefEnd[numRefSeqs - 1], 0, mFlags.UseLowMemory, 0);
			mpDNAHash->LoadKeysNPositions();
		}

		// set the hash positions threshold
		if(mFlags.IsUsingHashPositionThreshold && (mAlgorithm == CAlignmentThread::AlignerAlgorithm_ALL))
			mpDNAHash->RandomizeAndTrimHashPositions(mSettings.HashPositionThreshold);

		// localize the read archive filenames
		string outputReadArchiveFilename = mSettings.OutputReadArchiveFilename;

		// define our read format reader and writer
		MosaikReadFormat::CReadReader in;
		in.Open(inputReadArchiveFilename);
		MosaikReadFormat::ReadGroup readGroup = in.GetReadGroup();
		ReadStatus readStatus          = in.GetStatus();
		mSettings.SequencingTechnology = readGroup.SequencingTechnology;
		mSettings.MedianFragmentLength = readGroup.MedianFragmentLength;

		vector<MosaikReadFormat::ReadGroup> readGroups;
		readGroups.push_back(readGroup);

		// set the alignment status flags
		AlignmentStatus alignmentStatus = AS_UNSORTED_READ | readStatus;
		if(mMode == CAlignmentThread::AlignerMode_ALL) alignmentStatus |= AS_ALL_MODE;
		else alignmentStatus |= AS_UNIQUE_MODE;

		MosaikReadFormat::CAlignmentWriter out;
		out.Open(mSettings.OutputReadArchiveFilename.c_str(), referenceSequences, readGroups, alignmentStatus, ALIGNER_SIGNATURE);

		AlignReadArchive(in, out, pRefBegin, pRefEnd, pBsRefSeqs);

		// close open file streams
		in.Close();

		// solid references should be one-base longer after converting back to basespace
		if(mFlags.EnableColorspace) out.AdjustSolidReferenceBases();
		out.Close();

		// free memory
		if(mFlags.IsUsingJumpDB) mpDNAHash->FreeMemory();
		if(pRefBegin)  delete [] pRefBegin;
		if(pRefEnd)    delete [] pRefEnd;
		if(mReference) delete [] mReference;
		if(pBsRefSeqs) {
			for(unsigned int i = 0; i < numRefSeqs; ++i) delete [] pBsRefSeqs[i];
			delete [] pBsRefSeqs;
		}
		pRefBegin  = NULL;
		pRefEnd    = NULL;
		mReference = NULL;
		pBsRefSeqs = NULL;
	}
	else {
		// grouping reference and store information in referenceGroups vector
		// vector< pair <unsigned int, unsigned int> > referenceGroups;
		GroupReferences();
		
		// get hash statistics for adjusting mhp for each reference group and reserve memory
		vector< unsigned int > nHashs;             // the numbers of hash positions in each reference group
		vector< unsigned int > expectedMemories;   // the numbers of hashs in each reference group
		uint64_t nTotalHash;
		GetHashStatistics( nHashs, expectedMemories, nTotalHash );
		
		// align reads again per chromosome group
		for ( unsigned int i = 0; i < referenceGroups.size(); i++) {

	        	unsigned int startRef = referenceGroups[i].first;
			unsigned int endRef   = referenceGroups[i].first + referenceGroups[i].second - 1;
			CConsole::Heading();
		        if ( referenceGroups[i].second > 1 )
				cout << endl << "Aligning chromosome " << startRef + 1 << "-" << endRef + 1 << " (of " << numRefSeqs << "):" << endl;
			else
				cout << endl << "Aligning chromosome " << startRef + 1 << " (of " << numRefSeqs << "):" << endl;
		        CConsole::Reset();

			// initialize our hash tables
			// calculate expected memories for jump data
			unsigned int expectedMemory = nHashs[i] + expectedMemories[i];
			// reserve 3% more memory for unexpected usage
			expectedMemory =  expectedMemory * 1.03;

			InitializeHashTables(0, referenceSequences[startRef].Begin, referenceSequences[endRef].End, referenceSequences[startRef].Begin, mFlags.UseLowMemory, expectedMemory);

			// set the hash positions threshold
			if(mFlags.IsUsingHashPositionThreshold && (mAlgorithm == CAlignmentThread::AlignerAlgorithm_ALL)) { 
				double ratio = nHashs[i] / (double)nTotalHash;
				unsigned int positionThreshold = ceil(ratio * (double)mSettings.HashPositionThreshold);
				//cout << positionThreshold << endl;
				mpDNAHash->RandomizeAndTrimHashPositions(positionThreshold);
			}

			// load jump data
			mpDNAHash->LoadKeysNPositions();

			// set reference information
			unsigned int* pRefBegin = new unsigned int[referenceGroups[i].second];
			unsigned int* pRefEnd   = new unsigned int[referenceGroups[i].second];
			for ( unsigned int j = 0; j < referenceGroups[i].second; j++ ){
				pRefBegin[j] = referenceSequences[startRef+j].Begin - referenceSequences[startRef].Begin;
				pRefEnd[j]   = referenceSequences[startRef+j].End   - referenceSequences[startRef].Begin;
			}

			// prepare BS reference sequence for SOLiD data
			char** pBsRefSeqs = NULL;
			if(mFlags.EnableColorspace) {
	
				cout << "- loading basespace reference sequences... ";
				cout.flush();

				MosaikReadFormat::CReferenceSequenceReader bsRefSeq;
				bsRefSeq.Open(mSettings.BasespaceReferenceFilename);


				bsRefSeq.CopyReferenceSequences(pBsRefSeqs, startRef, referenceGroups[i].second);
				bsRefSeq.Close();

				cout << "finished." << endl;
			}

			// prepare reference sequence
			refseq.Open(mSettings.ReferenceFilename);
			cout << "- loading reference sequence... ";
			cout.flush();
			//refseq.LoadConcatenatedSequence(mReference);
			refseq.LoadConcatenatedSequence(mReference, startRef, referenceGroups[i].second);
			refseq.Close();

			// trim reference sequence
			//unsigned int chrLength = referenceSequences[endRef].End - referenceSequences[startRef].Begin + 1;
			//char* chrReference  = new char[ chrLength + 1 ];
			//char* mReferencePtr = mReference + referenceSequences[startRef].Begin;
			//memcpy( chrReference, mReferencePtr, chrLength);
			//chrReference[chrLength] = 0;
			//delete [] mReference;
			//mReference = chrReference;
			cout << "finished." << endl;
			
			
			// localize the read archive filenames
			// get a temporary file name
			string tempFilename;
			CFileUtilities::GetTempFilename(tempFilename);
			outputFilenames.push_back(tempFilename);

			// define our read format reader and writer
			MosaikReadFormat::CReadReader in;
			in.Open(inputReadArchiveFilename);
			MosaikReadFormat::ReadGroup readGroup = in.GetReadGroup();
			ReadStatus readStatus          = in.GetStatus();
			mSettings.SequencingTechnology = readGroup.SequencingTechnology;
			mSettings.MedianFragmentLength = readGroup.MedianFragmentLength;

			vector<MosaikReadFormat::ReadGroup> readGroups;
			readGroups.push_back(readGroup);

			// set the alignment status flags
			AlignmentStatus alignmentStatus = AS_UNSORTED_READ | readStatus;
			if(mMode == CAlignmentThread::AlignerMode_ALL) alignmentStatus |= AS_ALL_MODE;
			else alignmentStatus |= AS_UNIQUE_MODE;

			// prepare a new vector for the current chromosome for opening out archive
			vector<ReferenceSequence> smallReferenceSequences;
			for ( unsigned int j = 0; j < referenceGroups[i].second; j++ ){
				smallReferenceSequences.push_back(referenceSequences[startRef+j]);
			}

			MosaikReadFormat::CAlignmentWriter out;
			out.Open(tempFilename.c_str(), smallReferenceSequences, readGroups, alignmentStatus, ALIGNER_SIGNATURE);
			out.AdjustPartitionSize(20000/referenceGroups.size());


			AlignReadArchive(in, out, pRefBegin, pRefEnd, pBsRefSeqs);

			// close open file streams
			in.Close();

			// solid references should be one-base longer after converting back to basespace
			if(mFlags.EnableColorspace) out.AdjustSolidReferenceBases();
			out.Close();

			// free memory
			if(mFlags.IsUsingJumpDB) mpDNAHash->FreeMemory();
			if(pRefBegin)  delete [] pRefBegin;
			if(pRefEnd)    delete [] pRefEnd;
			if(mReference) delete [] mReference;
			if(pBsRefSeqs) {
				for(unsigned int j = 0; j < referenceGroups[i].second; j++)
					delete [] pBsRefSeqs[j];
				
				delete [] pBsRefSeqs;
			}
			pRefBegin  = NULL;
			pRefEnd    = NULL;
			mReference = NULL;
			pBsRefSeqs = NULL;
		}
	}

	if ( mFlags.UseLowMemory )
		MergeArchives();

	PrintStatistics();
}
예제 #19
0
int IfaceCheckApp::OnRun()
{
    long startTime = wxGetLocalTime();      // for timing purpose

    wxCmdLineParser parser(g_cmdLineDesc, argc, argv);
    parser.SetLogo(
        wxString::Format("wxWidgets Interface checker utility (built %s against %s)",
                         __DATE__, wxVERSION_STRING));

    // make the output more readable:
    wxLog::SetActiveTarget(new IfaceCheckLog);
    wxLog::DisableTimestamp();

    // parse the command line...
    bool ok = true;
    wxString preprocFile;
    switch (parser.Parse())
    {
        case 0:
            if (parser.Found(VERBOSE_SWITCH))
                g_verbose = true;

            // IMPORTANT: parsing #define values must be done _before_ actually
            //            parsing the GCC/doxygen XML files
            if (parser.Found(USE_PREPROCESSOR_OPTION, &preprocFile))
            {
                if (!ParsePreprocessorOutput(preprocFile))
                    return 1;
            }

            // in any case set basic std preprocessor #defines:
            m_doxyInterface.AddPreprocessorValue("NULL", "0");

            // parse the two XML files which contain the real and the doxygen interfaces
            // for wxWidgets API:
            if (!m_gccInterface.Parse(parser.GetParam(0)) ||
                !m_doxyInterface.Parse(parser.GetParam(1)))
                return 1;

            if (parser.Found(DUMP_SWITCH))
            {
                wxLogMessage("Dumping real API to '%s'...", API_DUMP_FILE);
                m_gccInterface.Dump(API_DUMP_FILE);

                wxLogMessage("Dumping interface API to '%s'...", INTERFACE_DUMP_FILE);
                m_doxyInterface.Dump(INTERFACE_DUMP_FILE);
            }
            else
            {
                if (parser.Found(MODIFY_SWITCH))
                    m_modify = true;

                if (parser.Found(PROCESS_ONLY_OPTION, &m_strToMatch))
                {
                    size_t len = m_strToMatch.Len();
                    if (m_strToMatch.StartsWith("\"") &&
                        m_strToMatch.EndsWith("\"") &&
                        len > 2)
                        m_strToMatch = m_strToMatch.Mid(1, len-2);
                }


                ok = Compare();
            }

            PrintStatistics(wxGetLocalTime() - startTime);
            return ok ? 0 : 1;

        default:
            wxPrintf("\nThis utility checks that the interface XML files created by Doxygen are in\n");
            wxPrintf("synch with the real headers (whose contents are extracted by the gcc XML file).\n\n");
            wxPrintf("The 'gccXML' parameter should be the wxapi.xml file created by the 'rungccxml.sh'\n");
            wxPrintf("script which resides in 'utils/ifacecheck'.\n");
            wxPrintf("The 'doxygenXML' parameter should be the index.xml file created by Doxygen\n");
            wxPrintf("for the wxWidgets 'interface' folder.\n\n");
            wxPrintf("Since the gcc XML file does not contain info about #defines, if you use\n");
            wxPrintf("the -%s option, you'll get a smaller number of false warnings.\n",
                     USE_PREPROCESSOR_OPTION);

            // HELP_SWITCH was passed or a syntax error occurred
            return 0;
    }
}
예제 #20
0
void GraphCompressor::Compress(const std::unordered_set<NodeID> &barrier_nodes,
                               const std::unordered_set<NodeID> &traffic_lights,
                               RestrictionMap &restriction_map,
                               util::NodeBasedDynamicGraph &graph,
                               CompressedEdgeContainer &geometry_compressor)
{
    const unsigned original_number_of_nodes = graph.GetNumberOfNodes();
    const unsigned original_number_of_edges = graph.GetNumberOfEdges();

    util::Percent progress(original_number_of_nodes);

    for (const NodeID node_v : util::irange(0u, original_number_of_nodes))
    {
        progress.PrintStatus(node_v);

        // only contract degree 2 vertices
        if (2 != graph.GetOutDegree(node_v))
        {
            continue;
        }

        // don't contract barrier node
        if (barrier_nodes.end() != barrier_nodes.find(node_v))
        {
            continue;
        }

        // check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
        if (restriction_map.IsViaNode(node_v))
        {
            continue;
        }

        //    reverse_e2   forward_e2
        // u <---------- v -----------> w
        //    ----------> <-----------
        //    forward_e1   reverse_e1
        //
        // Will be compressed to:
        //
        //    reverse_e1
        // u <---------- w
        //    ---------->
        //    forward_e1
        //
        // If the edges are compatible.

        const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed;
        const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order;
        BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
        BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) && forward_e2 < graph.EndEdges(node_v));
        const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order;
        BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
        BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) && reverse_e2 < graph.EndEdges(node_v));

        const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2);
        const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2);

        const NodeID node_w = graph.GetTarget(forward_e2);
        BOOST_ASSERT(SPECIAL_NODEID != node_w);
        BOOST_ASSERT(node_v != node_w);
        const NodeID node_u = graph.GetTarget(reverse_e2);
        BOOST_ASSERT(SPECIAL_NODEID != node_u);
        BOOST_ASSERT(node_u != node_v);

        const EdgeID forward_e1 = graph.FindEdge(node_u, node_v);
        BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
        BOOST_ASSERT(node_v == graph.GetTarget(forward_e1));
        const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v);
        BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
        BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1));

        const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1);
        const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1);

        if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
        {
            continue;
        }

        // this case can happen if two ways with different names overlap
        if (fwd_edge_data1.name_id != rev_edge_data1.name_id ||
            fwd_edge_data2.name_id != rev_edge_data2.name_id)
        {
            continue;
        }

        if (fwd_edge_data1.IsCompatibleTo(fwd_edge_data2) &&
            rev_edge_data1.IsCompatibleTo(rev_edge_data2))
        {
            BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id ==
                         graph.GetEdgeData(reverse_e1).name_id);
            BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id ==
                         graph.GetEdgeData(reverse_e2).name_id);

            // Do not compress edge if it crosses a traffic signal.
            // This can't be done in IsCompatibleTo, becase we only store the
            // traffic signals in the `traffic_lights` list, which EdgeData
            // doesn't have access to.
            const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end();
            if (has_node_penalty)
            {
                continue;
            }

            // Get distances before graph is modified
            const int forward_weight1 = graph.GetEdgeData(forward_e1).distance;
            const int forward_weight2 = graph.GetEdgeData(forward_e2).distance;

            BOOST_ASSERT(0 != forward_weight1);
            BOOST_ASSERT(0 != forward_weight2);

            const int reverse_weight1 = graph.GetEdgeData(reverse_e1).distance;
            const int reverse_weight2 = graph.GetEdgeData(reverse_e2).distance;

            BOOST_ASSERT(0 != reverse_weight1);
            BOOST_ASSERT(0 != reverse_weight2);

            // add weight of e2's to e1
            graph.GetEdgeData(forward_e1).distance += fwd_edge_data2.distance;
            graph.GetEdgeData(reverse_e1).distance += rev_edge_data2.distance;

            // extend e1's to targets of e2's
            graph.SetTarget(forward_e1, node_w);
            graph.SetTarget(reverse_e1, node_u);

            // remove e2's (if bidir, otherwise only one)
            graph.DeleteEdge(node_v, forward_e2);
            graph.DeleteEdge(node_v, reverse_e2);

            // update any involved turn restrictions
            restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w);
            restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph);

            restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u);
            restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph);

            // store compressed geometry in container
            geometry_compressor.CompressEdge(
                forward_e1, forward_e2, node_v, node_w, forward_weight1, forward_weight2);
            geometry_compressor.CompressEdge(
                reverse_e1, reverse_e2, node_v, node_u, reverse_weight1, reverse_weight2);
        }
    }

    PrintStatistics(original_number_of_nodes, original_number_of_edges, graph);

    // Repeate the loop, but now add all edges as uncompressed values.
    // The function AddUncompressedEdge does nothing if the edge is already
    // in the CompressedEdgeContainer.
    for (const NodeID node_u : util::irange(0u, original_number_of_nodes))
    {
        for (const auto edge_id : util::irange(graph.BeginEdges(node_u), graph.EndEdges(node_u)))
        {
            const EdgeData &data = graph.GetEdgeData(edge_id);
            const NodeID target = graph.GetTarget(edge_id);
            geometry_compressor.AddUncompressedEdge(edge_id, target, data.distance);
        }
    }
}
//
// Function: main
//
// Description:
//      This is the main program. It parses the command line and creates
//      the main socket. For UDP this socket is used to receive datagrams.
//      For TCP the socket is used to accept incoming client connections.
//      Each client TCP connection is handed off to a worker thread which
//      will receive any data on that connection until the connection is
//      closed.
//
int __cdecl main(int argc, char **argv)
{
    WSADATA          wsd;
    THREAD_OBJ      *thread=NULL;
    SOCKET_OBJ      *sockobj=NULL,
                    *newsock=NULL;
    int              index,
                     rc;
    struct addrinfo *res=NULL,
                    *ptr=NULL;

    // Validate the command line
    ValidateArgs(argc, argv);

    // Load Winsock
    if (WSAStartup(MAKEWORD(2,2), &wsd) != 0)
    {
        fprintf(stderr, "unable to load Winsock!\n");
        return -1;
    }

    printf("Local address: %s; Port: %s; Family: %d\n",
            gBindAddr, gBindPort, gAddressFamily);

    res = ResolveAddress(gBindAddr, gBindPort, gAddressFamily, gSocketType, gProtocol);
    if (res == NULL)
    {
        fprintf(stderr, "ResolveAddress failed to return any addresses!\n");
        return -1;
    }

    thread = GetThreadObj();

    // For each local address returned, create a listening/receiving socket
    ptr = res;
    while (ptr)
    {
        PrintAddress(ptr->ai_addr, ptr->ai_addrlen); printf("\n");

        sockobj = GetSocketObj(INVALID_SOCKET, (gProtocol == IPPROTO_TCP) ? TRUE : FALSE);

        // create the socket
        sockobj->s = socket(ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol);
        if (sockobj->s == INVALID_SOCKET)
        {
            fprintf(stderr,"socket failed: %d\n", WSAGetLastError());
            return -1;
        }

        InsertSocketObj(thread, sockobj);

        // bind the socket to a local address and port
        rc = bind(sockobj->s, ptr->ai_addr, ptr->ai_addrlen);
        if (rc == SOCKET_ERROR)
        {
            fprintf(stderr, "bind failed: %d\n", WSAGetLastError());
            return -1;
        }

        if (gProtocol == IPPROTO_TCP)
        {
            rc = listen(sockobj->s, 200);
            if (rc == SOCKET_ERROR)
            {
                fprintf(stderr, "listen failed: %d\n", WSAGetLastError());
                return -1;
            }

            // Register events on the socket
            rc = WSAEventSelect(
                    sockobj->s,
                    sockobj->event,
                    FD_ACCEPT | FD_CLOSE
                    );
            if (rc == SOCKET_ERROR)
            {
                fprintf(stderr, "WSAEventSelect failed: %d\n", WSAGetLastError());
                return -1;
            }
        }
        else
        {
            // Register events on the socket
            rc = WSAEventSelect(
                    sockobj->s,
                    sockobj->event,
                    FD_READ | FD_WRITE | FD_CLOSE
                    );
            if (rc == SOCKET_ERROR)
            {
                fprintf(stderr, "WSAEventSelect failed: %d\n", WSAGetLastError());
                return -1;
            }
        }

        ptr = ptr->ai_next;
    }
    // free the addrinfo structure for the 'bind' address
    freeaddrinfo(res);

    gStartTime = gStartTimeLast = GetTickCount();

    while (1)
    {
        rc = WaitForMultipleObjects(
                thread->SocketCount + 1,
                thread->Handles,
                FALSE,
                5000
                );
        if (rc == WAIT_FAILED)
        {
            fprintf(stderr, "WaitForMultipleObjects failed: %d\n", GetLastError());
            break;
        }
        else if (rc == WAIT_TIMEOUT)
        {
            PrintStatistics();
        }
        else
        {
            index = rc - WAIT_OBJECT_0;

            sockobj = FindSocketObj(thread, index-1);

            if (gProtocol == IPPROTO_TCP)
            {
                SOCKADDR_STORAGE sa;
                WSANETWORKEVENTS ne;
                SOCKET           sc;
                int              salen;

                rc = WSAEnumNetworkEvents(
                        sockobj->s,
                        thread->Handles[index],
                       &ne
                        );
                if (rc == SOCKET_ERROR)
                {
                    fprintf(stderr, "WSAEnumNetworkEvents failed: %d\n", WSAGetLastError());
                    break;
                }

                while (1)
                {
                    sc = INVALID_SOCKET;
                    salen = sizeof(sa);

                    //
                    // For TCP, accept the connection and hand off the client socket
                    // to a worker thread
                    //

                    sc = accept(
                            sockobj->s, 
                            (SOCKADDR *)&sa,
                            &salen
                               );
                    if ((sc == INVALID_SOCKET) && (WSAGetLastError() != WSAEWOULDBLOCK))
                    {
                        fprintf(stderr, "accept failed: %d\n", WSAGetLastError());
                        break;
                    }
                    else if (sc != INVALID_SOCKET)
                    {
                        newsock = GetSocketObj(INVALID_SOCKET, FALSE);

                        // Copy address information
                        memcpy(&newsock->addr, &sa, salen);
                        newsock->addrlen = salen;

                        newsock->s = sc;

                        InterlockedIncrement(&gTotalConnections);
                        InterlockedIncrement(&gCurrentConnections);

                        /*
                           printf("Accepted connection from: ");
                           PrintAddress((SOCKADDR *)&newsock->addr, newsock->addrlen);
                           printf("\n");
                         */

                        // Register for read, write and close on the client socket
                        rc = WSAEventSelect(
                                newsock->s,
                                newsock->event,
                                FD_READ | FD_WRITE | FD_CLOSE
                                           );
                        if (rc == SOCKET_ERROR)
                        {
                            fprintf(stderr, "WSAEventSelect failed: %d\n", WSAGetLastError());
                            break;
                        }

                        AssignToFreeThread(newsock);

                    }
                    else
                    {
                        // Failed with WSAEWOULDBLOCK -- just continue
                        break;
                    }
                }


            }
            else
            {
                // For UDP all we have to do is handle events on the main
                //    threads.
                if (HandleIo(thread, sockobj) == SOCKET_ERROR)
                {
                    RenumberThreadArray(thread);
                }
            }
        }
    }

    WSACleanup();
    return 0;
}
예제 #22
0
void GraphCompressor::Compress(
    const std::unordered_set<NodeID> &barrier_nodes,
    const std::unordered_set<NodeID> &traffic_signals,
    ScriptingEnvironment &scripting_environment,
    std::vector<TurnRestriction> &turn_restrictions,
    std::vector<ConditionalTurnRestriction> &conditional_turn_restrictions,
    std::vector<UnresolvedManeuverOverride> &maneuver_overrides,
    util::NodeBasedDynamicGraph &graph,
    const std::vector<NodeBasedEdgeAnnotation> &node_data_container,
    CompressedEdgeContainer &geometry_compressor)
{
    const unsigned original_number_of_nodes = graph.GetNumberOfNodes();
    const unsigned original_number_of_edges = graph.GetNumberOfEdges();

    RestrictionCompressor restriction_compressor(
        turn_restrictions, conditional_turn_restrictions, maneuver_overrides);

    // we do not compress turn restrictions on degree two nodes. These nodes are usually used to
    // indicated `directed` barriers
    std::unordered_set<NodeID> restriction_via_nodes;

    const auto remember_via_nodes = [&](const auto &restriction) {
        if (restriction.Type() == RestrictionType::NODE_RESTRICTION)
        {
            const auto &node = restriction.AsNodeRestriction();
            restriction_via_nodes.insert(node.via);
        }
        else
        {
            BOOST_ASSERT(restriction.Type() == RestrictionType::WAY_RESTRICTION);
            const auto &way = restriction.AsWayRestriction();
            restriction_via_nodes.insert(way.in_restriction.via);
            restriction_via_nodes.insert(way.out_restriction.via);
        }
    };
    std::for_each(turn_restrictions.begin(), turn_restrictions.end(), remember_via_nodes);
    std::for_each(conditional_turn_restrictions.begin(),
                  conditional_turn_restrictions.end(),
                  remember_via_nodes);

    {
        const auto weight_multiplier =
            scripting_environment.GetProfileProperties().GetWeightMultiplier();
        util::UnbufferedLog log;
        util::Percent progress(log, original_number_of_nodes);

        for (const NodeID node_v : util::irange(0u, original_number_of_nodes))
        {
            progress.PrintStatus(node_v);

            // only contract degree 2 vertices
            if (2 != graph.GetOutDegree(node_v))
            {
                continue;
            }

            // don't contract barrier node
            if (barrier_nodes.end() != barrier_nodes.find(node_v))
            {
                continue;
            }

            // check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
            if (restriction_via_nodes.count(node_v))
            {
                continue;
            }

            //    reverse_e2   forward_e2
            // u <---------- v -----------> w
            //    ----------> <-----------
            //    forward_e1   reverse_e1
            //
            // Will be compressed to:
            //
            //    reverse_e1
            // u <---------- w
            //    ---------->
            //    forward_e1
            //
            // If the edges are compatible.
            const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed;
            const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order;
            BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
            BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) &&
                         forward_e2 < graph.EndEdges(node_v));
            const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order;

            BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
            BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) &&
                         reverse_e2 < graph.EndEdges(node_v));

            const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2);
            const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2);

            const NodeID node_w = graph.GetTarget(forward_e2);
            BOOST_ASSERT(SPECIAL_NODEID != node_w);
            BOOST_ASSERT(node_v != node_w);
            const NodeID node_u = graph.GetTarget(reverse_e2);
            BOOST_ASSERT(SPECIAL_NODEID != node_u);
            BOOST_ASSERT(node_u != node_v);

            const EdgeID forward_e1 = graph.FindEdge(node_u, node_v);
            BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
            BOOST_ASSERT(node_v == graph.GetTarget(forward_e1));
            const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v);
            BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
            BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1));

            const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1);
            const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1);
            const auto fwd_annotation_data1 = node_data_container[fwd_edge_data1.annotation_data];
            const auto fwd_annotation_data2 = node_data_container[fwd_edge_data2.annotation_data];
            const auto rev_annotation_data1 = node_data_container[rev_edge_data1.annotation_data];
            const auto rev_annotation_data2 = node_data_container[rev_edge_data2.annotation_data];

            if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
            {
                continue;
            }

            // this case can happen if two ways with different names overlap
            if ((fwd_annotation_data1.name_id != rev_annotation_data1.name_id) ||
                (fwd_annotation_data2.name_id != rev_annotation_data2.name_id))
            {
                continue;
            }

            if ((fwd_edge_data1.flags == fwd_edge_data2.flags) &&
                (rev_edge_data1.flags == rev_edge_data2.flags) &&
                (fwd_edge_data1.reversed == fwd_edge_data2.reversed) &&
                (rev_edge_data1.reversed == rev_edge_data2.reversed) &&
                // annotations need to match, except for the lane-id which can differ
                fwd_annotation_data1.CanCombineWith(fwd_annotation_data2) &&
                rev_annotation_data1.CanCombineWith(rev_annotation_data2))
            {
                BOOST_ASSERT(!(graph.GetEdgeData(forward_e1).reversed &&
                               graph.GetEdgeData(reverse_e1).reversed));
                /*
                 * Remember Lane Data for compressed parts. This handles scenarios where lane-data
                 * is
                 * only kept up until a traffic light.
                 *
                 *                |    |
                 * ----------------    |
                 *         -^ |        |
                 * -----------         |
                 *         -v |        |
                 * ---------------     |
                 *                |    |
                 *
                 *  u ------- v ---- w
                 *
                 * Since the edge is compressable, we can transfer:
                 * "left|right" (uv) and "" (uw) into a string with "left|right" (uw) for the
                 * compressed
                 * edge.
                 * Doing so, we might mess up the point from where the lanes are shown. It should be
                 * reasonable, since the announcements have to come early anyhow. So there is a
                 * potential danger in here, but it saves us from adding a lot of additional edges
                 * for
                 * turn-lanes. Without this,we would have to treat any turn-lane beginning/ending
                 * just
                 * like a barrier.
                 */
                const auto selectAnnotation = [&node_data_container](
                    const AnnotationID front_annotation, const AnnotationID back_annotation) {
                    // A lane has tags: u - (front) - v - (back) - w
                    // During contraction, we keep only one of the tags. Usually the one closer
                    // to the intersection is preferred. If its empty, however, we keep the
                    // non-empty one
                    if (node_data_container[back_annotation].lane_description_id ==
                        INVALID_LANE_DESCRIPTIONID)
                        return front_annotation;
                    return back_annotation;
                };

                graph.GetEdgeData(forward_e1).annotation_data = selectAnnotation(
                    fwd_edge_data1.annotation_data, fwd_edge_data2.annotation_data);
                graph.GetEdgeData(reverse_e1).annotation_data = selectAnnotation(
                    rev_edge_data1.annotation_data, rev_edge_data2.annotation_data);
                graph.GetEdgeData(forward_e2).annotation_data = selectAnnotation(
                    fwd_edge_data2.annotation_data, fwd_edge_data1.annotation_data);
                graph.GetEdgeData(reverse_e2).annotation_data = selectAnnotation(
                    rev_edge_data2.annotation_data, rev_edge_data1.annotation_data);

                /*
                // Do not compress edge if it crosses a traffic signal.
                // This can't be done in CanCombineWith, becase we only store the
                // traffic signals in the `traffic signal` list, which EdgeData
                // doesn't have access to.
                */
                const bool has_node_penalty = traffic_signals.find(node_v) != traffic_signals.end();
                EdgeDuration node_duration_penalty = MAXIMAL_EDGE_DURATION;
                EdgeWeight node_weight_penalty = INVALID_EDGE_WEIGHT;
                if (has_node_penalty)
                {
                    // we cannot handle this as node penalty, if it depends on turn direction
                    if (fwd_edge_data1.flags.restricted != fwd_edge_data2.flags.restricted)
                        continue;

                    // generate an artifical turn for the turn penalty generation
                    std::vector<ExtractionTurnLeg> roads_on_the_right;
                    std::vector<ExtractionTurnLeg> roads_on_the_left;
                    ExtractionTurn extraction_turn(0,
                                                   2,
                                                   false,
                                                   true,
                                                   false,
                                                   false,
                                                   TRAVEL_MODE_DRIVING,
                                                   false,
                                                   false,
                                                   1,
                                                   0,
                                                   0,
                                                   0,
                                                   0,
                                                   false,
                                                   TRAVEL_MODE_DRIVING,
                                                   false,
                                                   false,
                                                   1,
                                                   0,
                                                   0,
                                                   0,
                                                   0,
                                                   roads_on_the_right,
                                                   roads_on_the_left);
                    scripting_environment.ProcessTurn(extraction_turn);
                    node_duration_penalty = extraction_turn.duration * 10;
                    node_weight_penalty = extraction_turn.weight * weight_multiplier;
                }

                // Get weights before graph is modified
                const auto forward_weight1 = fwd_edge_data1.weight;
                const auto forward_weight2 = fwd_edge_data2.weight;
                const auto forward_duration1 = fwd_edge_data1.duration;
                const auto forward_duration2 = fwd_edge_data2.duration;
                const auto forward_distance2 = fwd_edge_data2.distance;

                BOOST_ASSERT(0 != forward_weight1);
                BOOST_ASSERT(0 != forward_weight2);

                const auto reverse_weight1 = rev_edge_data1.weight;
                const auto reverse_weight2 = rev_edge_data2.weight;
                const auto reverse_duration1 = rev_edge_data1.duration;
                const auto reverse_duration2 = rev_edge_data2.duration;
                const auto reverse_distance2 = rev_edge_data2.distance;

#ifndef NDEBUG
                // Because distances are symmetrical, we only need one
                // per edge - here we double-check that they match
                // their mirrors.
                const auto reverse_distance1 = rev_edge_data1.distance;
                const auto forward_distance1 = fwd_edge_data1.distance;
                BOOST_ASSERT(forward_distance1 == reverse_distance2);
                BOOST_ASSERT(forward_distance2 == reverse_distance1);
#endif

                BOOST_ASSERT(0 != reverse_weight1);
                BOOST_ASSERT(0 != reverse_weight2);

                // add weight of e2's to e1
                graph.GetEdgeData(forward_e1).weight += forward_weight2;
                graph.GetEdgeData(reverse_e1).weight += reverse_weight2;

                // add duration of e2's to e1
                graph.GetEdgeData(forward_e1).duration += forward_duration2;
                graph.GetEdgeData(reverse_e1).duration += reverse_duration2;

                // add distance of e2's to e1
                graph.GetEdgeData(forward_e1).distance += forward_distance2;
                graph.GetEdgeData(reverse_e1).distance += reverse_distance2;

                if (node_weight_penalty != INVALID_EDGE_WEIGHT &&
                    node_duration_penalty != MAXIMAL_EDGE_DURATION)
                {
                    graph.GetEdgeData(forward_e1).weight += node_weight_penalty;
                    graph.GetEdgeData(reverse_e1).weight += node_weight_penalty;
                    graph.GetEdgeData(forward_e1).duration += node_duration_penalty;
                    graph.GetEdgeData(reverse_e1).duration += node_duration_penalty;
                    // Note: no penalties for distances
                }

                // extend e1's to targets of e2's
                graph.SetTarget(forward_e1, node_w);
                graph.SetTarget(reverse_e1, node_u);

                // remove e2's (if bidir, otherwise only one)
                graph.DeleteEdge(node_v, forward_e2);
                graph.DeleteEdge(node_v, reverse_e2);

                // update any involved turn restrictions
                restriction_compressor.Compress(node_u, node_v, node_w);

                // store compressed geometry in container
                geometry_compressor.CompressEdge(forward_e1,
                                                 forward_e2,
                                                 node_v,
                                                 node_w,
                                                 forward_weight1,
                                                 forward_weight2,
                                                 forward_duration1,
                                                 forward_duration2,
                                                 node_weight_penalty,
                                                 node_duration_penalty);
                geometry_compressor.CompressEdge(reverse_e1,
                                                 reverse_e2,
                                                 node_v,
                                                 node_u,
                                                 reverse_weight1,
                                                 reverse_weight2,
                                                 reverse_duration1,
                                                 reverse_duration2,
                                                 node_weight_penalty,
                                                 node_duration_penalty);
            }
        }
    }

    PrintStatistics(original_number_of_nodes, original_number_of_edges, graph);

    // Repeate the loop, but now add all edges as uncompressed values.
    // The function AddUncompressedEdge does nothing if the edge is already
    // in the CompressedEdgeContainer.
    for (const NodeID node_u : util::irange(0u, original_number_of_nodes))
    {
        for (const auto edge_id : util::irange(graph.BeginEdges(node_u), graph.EndEdges(node_u)))
        {
            const EdgeData &data = graph.GetEdgeData(edge_id);
            const NodeID target = graph.GetTarget(edge_id);
            geometry_compressor.AddUncompressedEdge(edge_id, target, data.weight, data.duration);
        }
    }
}
예제 #23
0
파일: main.C 프로젝트: elau/graphite_pep
int	main(int argc, CHAR *argv[])
	{
	INT	i;
	UINT	begin;
	UINT	end;
	UINT	lapsed;
	MATRIX	vtrans, Vinv;		/*  View transformation and inverse. */


	/*
	 *	First, process command line arguments.
	 */
	i = 1;
	while ((i < argc) && (argv[i][0] == '-')) {
		switch (argv[i][1]) {
			case '?':
			case 'h':
			case 'H':
				Usage();
				exit(1);

			case 'a':
			case 'A':
				AntiAlias = TRUE;
				if (argv[i][2] != '\0') {
					NumSubRays = atoi(&argv[i][2]);
				} else {
					NumSubRays = atoi(&argv[++i][0]);
				}
				break;

			case 'm':
				if (argv[i][2] != '\0') {
					MaxGlobMem = atoi(&argv[i][2]);
				} else {
					MaxGlobMem = atoi(&argv[++i][0]);
				}
				break;

			case 'p':
				if (argv[i][2] != '\0') {
					nprocs = atoi(&argv[i][2]);
				} else {
					nprocs = atoi(&argv[++i][0]);
				}
				break;

			case 's':
			case 'S':
				dostats = TRUE;
				break;

			default:
				fprintf(stderr, "%s: Invalid option \'%c\'.\n", ProgName, argv[i][0]);
				exit(1);
		}
		i++;
	}

	if (i == argc) {
		Usage();
		exit(1);
	}


	/*
	 *	Make sure nprocs is within valid range.
	 */

	if (nprocs < 1 || nprocs > MAX_PROCS)
		{
		fprintf(stderr, "%s: Valid range for #processors is [1, %d].\n", ProgName, MAX_PROCS);
		exit(1);
		}


	/*
	 *	Print command line parameters.
	 */

	printf("\n");
	printf("Number of processors:     \t%ld\n", nprocs);
	printf("Global shared memory size:\t%ld MB\n", MaxGlobMem);
	printf("Samples per pixel:        \t%ld\n", NumSubRays);
	printf("\n");


	/*
	 *	Initialize the shared memory environment and request the total
	 *	amount of amount of shared memory we might need.  This
	 *	includes memory for the database, grid, and framebuffer.
	 */

	MaxGlobMem <<= 20;			/* Convert MB to bytes.      */
	MAIN_INITENV(,MaxGlobMem + 512*1024)
   THREAD_INIT_FREE();
	gm = (GMEM *)G_MALLOC(sizeof(GMEM));


	/*
	 *	Perform shared environment initializations.
	 */

	gm->nprocs = nprocs;
	gm->pid    = 0;
	gm->rid    = 1;

	BARINIT(gm->start, nprocs)
	LOCKINIT(gm->pidlock)
	LOCKINIT(gm->ridlock)
	LOCKINIT(gm->memlock)
	ALOCKINIT(gm->wplock, nprocs)

/* POSSIBLE ENHANCEMENT:  Here is where one might distribute the
   raystruct data structure across physically distributed memories as
   desired.  */

	if (!GlobalHeapInit(MaxGlobMem))
		{
		fprintf(stderr, "%s: Cannot initialize global heap.\n", ProgName);
		exit(1);
		}


	/*
	 *	Initialize HUG parameters, read environment and geometry files.
	 */

	Huniform_defaults();
	ReadEnvFile(/* *argv*/argv[i]);
	ReadGeoFile(GeoFileName);
	OpenFrameBuffer();


	/*
	 *	Compute view transform and its inverse.
	 */

	CreateViewMatrix();
	MatrixCopy(vtrans, View.vtrans);
	MatrixInverse(Vinv, vtrans);
	MatrixCopy(View.vtransInv, Vinv);


	/*
	 *	Print out what we have so far.
	 */

	printf("Number of primitive objects: \t%ld\n", prim_obj_cnt);
	printf("Number of primitive elements:\t%ld\n", prim_elem_cnt);

	/*
	 *	Preprocess database into hierarchical uniform grid.
	 */

	if (TraversalType == TT_HUG)
		BuildHierarchy_Uniform();



	/*
	 *	Now create slave processes.
	 */

	CLOCK(begin)
	CREATE(StartRayTrace, gm->nprocs);
	WAIT_FOR_END(gm->nprocs);
	CLOCK(end)



	/*
	 *	We are finished.  Clean up, print statistics and run time.
	 */

	CloseFrameBuffer(PicFileName);
	PrintStatistics();

	lapsed = (end - begin) & 0x7FFFFFFF;



	printf("TIMING STATISTICS MEASURED BY MAIN PROCESS:\n");
	printf("        Overall start time     %20lu\n", begin);
	printf("        Overall end time   %20lu\n", end);
	printf("        Total time with initialization  %20lu\n", lapsed);
	printf("        Total time without initialization  %20lu\n", end - gm->par_start_time);

    if (dostats) {
        unsigned totalproctime, maxproctime, minproctime;

        printf("\n\n\nPER-PROCESS STATISTICS:\n");

        printf("%20s%20s\n","Proc","Time");
        printf("%20s%20s\n\n","","Tracing Rays");
        for (i = 0; i < gm->nprocs; i++)
            printf("%20ld%20ld\n",i,gm->partime[i]);

        totalproctime = gm->partime[0];
        minproctime = gm->partime[0];
        maxproctime = gm->partime[0];

        for (i = 1; i < gm->nprocs; i++) {
            totalproctime += gm->partime[i];
            if (gm->partime[i] > maxproctime)
                maxproctime = gm->partime[i];
            if (gm->partime[i] < minproctime)
                minproctime = gm->partime[i];
        }
        printf("\n\n%20s%20d\n","Max = ",maxproctime);
        printf("%20s%20d\n","Min = ",minproctime);
        printf("%20s%20d\n","Avg = ",(int) (((double) totalproctime) / ((double) (1.0 * gm->nprocs))));
    }

	MAIN_END
	}
int main()
{
	unsigned int userInputChoice = 0;
	bool isSchedulingAlgorithmCompleted[6] = { false, false, false, false, false, false };

	FileUtility small( smallTaskName, smallInputFileName, smallOutputFileName, smallFileNumberCount, timeQuantum );
	FileUtility medium( mediumTaskName, mediumInputFileName, mediumOutputFileName, mediumFileNumberCount, timeQuantum );
	FileUtility large( largeTaskName, largeInputFileName, largeOutputFileName, largeFileNumberCount, timeQuantum );

	halfTime = ( smallFileNumberCount + mediumFileNumberCount + largeFileNumberCount ) / 2;
	GenerateInputFiles( small, medium, large );

	while( userInputChoice != INPUT_CHOICE_EXIT)
	{
		userInputChoice = 0;
		timeStamp = 0;
		small.InitialiseMemberVariables();
		medium.InitialiseMemberVariables();
		large.InitialiseMemberVariables();
		cout << endl;
		cout << "Enter input choice based on the Scheduling algorithm you wish to perform." << endl;
		cout << "1. First Come First Serve : Small -> Medium -> Large" << endl;
		cout << "2. First Come First Serve : Large -> Medium -> Small" << endl;
		cout << "3. Priority Scheduling : Equal priority" << endl;
		cout << "4. Priority Scheduling : Inverse of the file size" << endl;
		cout << "5. Priority Scheduling : Proportional to the file size" << endl;
		cout << "6. Print statistics generated so far" << endl;
		cout << "7. Exit program." << endl;
		cout << "Please enter your choice :";
		cin >> userInputChoice;
		switch( userInputChoice )
		{
			RemoveOutputFiles( small, medium, large );
			case FCFS_SMALL_TO_LARGE:
				isSchedulingAlgorithmCompleted[ FCFS_SMALL_TO_LARGE ] = true;
				FirstComeFirstServe( userInputChoice, small, medium, large );
				cout << "Completed generating output files for First Come First Serve : Small -> Medium -> Large." << endl;
				break;
			case FCFS_LARGE_TO_SMALL:
				isSchedulingAlgorithmCompleted[ FCFS_LARGE_TO_SMALL ] = true;
				FirstComeFirstServe( userInputChoice, large, medium, small );
				cout << "Completed generating output files for First Come First Serve : Large -> Medium -> Small." << endl;
				break;
			case PS_EQUAL_PRIORITY:
				isSchedulingAlgorithmCompleted[ PS_EQUAL_PRIORITY ] = true;
				PrioritySchedulingWithEqualPriority( small, medium, large );
				cout << "Completed generating output files for Priority Scheduling : Equal priority." << endl;
				break;
			case PS_INVERSE_FILE_SIZE:
				isSchedulingAlgorithmCompleted[ PS_INVERSE_FILE_SIZE ] = true;
				PriorityScheduling( userInputChoice, small, medium, large );
				cout << "Completed generating output files for Priority Scheduling : Inverse of the file size." << endl;
				break;
			case PS_PROPORTIONAL_FILE_SIZE:
				isSchedulingAlgorithmCompleted[ PS_PROPORTIONAL_FILE_SIZE ] = true;
				PriorityScheduling( userInputChoice, large, medium, small );
				cout << "Completed generating output files for Priority Scheduling : Proportional to the file size." << endl;
				break;
			case PRINT_STATISTICS:
				PrintStatistics( isSchedulingAlgorithmCompleted, small, medium, large );
				break;
			case INPUT_CHOICE_EXIT:
				break;
			default:
				cout << "Incorrect choice entered. Enter correct choice." << endl;
				break;
		}
	}
	cout << endl;
	cout << "Exiting program..!" << endl;

	return 0;
}