void printMenu(){
	int choice;
	do{
		choice = 0;
		printf("Performance assessment:\n");
		printf("-----------------------\n");
		printf("1) Enter parameters\n");
		printf("2) Print table of parameters\n");
		printf("3) Print table of performance\n");
		printf("4) Quit\n");
		printf("\nEnter selection: ");
		scanf("%d", &choice);
		switch(choice){
			case 1:
				enterParameters();
				break;
			case 2:
				printParameters();
				break;
			case 3:
				printPerformance();
				break;
			case 4:
				exit(0);
				break;
			default:
				printf("Invalid selection\n\n");
				//Clear the input stream in case of erroneous inputs
				while ((choice = getchar()) != '\n' && choice != EOF);
				break;	
		}//Switch
	}while(choice != 4);
}//printMenu
예제 #2
0
파일: com.c 프로젝트: 8l/insieme
void
printReportHeader ( void )
{
  char *now;
  int nameSize;
  char myProcName[MPI_MAX_PROCESSOR_NAME];

  if ( rank == 0 )
  {
    now = getTimeStr (  );
    MPI_Get_processor_name ( myProcName, &nameSize );

    printSeparator (  );
    printf ( "\n  com Point-to-Point MPI Bandwidth and Latency Benchmark\n" );
    printf ( "  Version %d.%d.%d\n", majorVersion, minorVersion,
             patchVersion );
    printf ( "  Run at %s, with rank 0 on %s\n\n", now, myProcName );
    free ( now );
    printParameters (  );

    if ( presta_check_data == 1 )
    {
      printf
        ( "  *** Verifying operation message buffers.  Benchmark results not valid! ***\n" );
    }

    if ( strlen ( procSrcTitle ) > 0 )
      printf ( "  Using Task Pair file %s\n", procSrcTitle );

    printf ( "\n" );

    if ( strcmp ( argStruct.testList, "Latency" ) != 0 )
    {
      if ( argStruct.sumLocalBW == 1 )
        printf ( "  Bandwidth calculated as sum of process bandwidths.\n" );
      else if ( argStruct.sumLocalBW == 0 )
        printf
          ( "  Bandwidth calculated as total volume / longest task communication.\n" );

      /*  TODO : Not necessary unless providing info about sample time.
         printTimingInfo ( );   
       */
    }
  }


  if ( argStruct.printHostInfo )
    printCommTargets ( argStruct.procsPerNode, argStruct.useNearestRank );

  printSeparator (  );

}
예제 #3
0
int main(int argc, char *argv[])
{
  double minTime        = 99999.0;
  double testTime       = 0.0;
  double maxBidirBw     = 0.0;
  double maxUnidirBw    = 0.0;
  int    maxBidirSize   = 0;
  int    maxUnidirSize  = 0;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    rank, wsize, iters, procs;
  int    messStart, messStop, messFactor, messSize;
  int    procsPerNode, totalops, procIdx;
  int    useBarrier, printPairs, useNearestRank, dummy;
  double currBw;
  char   allocPattern;
  MPI_Comm activeComm;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);


if(rank == 0) {
    //sleep(5);
    //while (!__sync_bool_compare_and_swap(&r_sync, 59, 0)){ sleep(2); printf("sync value %d \n", r_sync); };
    int k = 0;
    #pragma omp parallel
    #pragma omp atomic 
                k++;
    printf ("Done Sync. Number of Threads counted = %i\n",k);


    int N = 1048576;
    int i = 0;
    int j = 0;
  double start, diff;
for(j=0; j < 50 ; j++) {
    #pragma omp parallel for
    for (i=0; i<N; i++) {
            a[i] = 1.0;
            b[i] = 2.0;
        }


    start = MPI_Wtime();
    #pragma omp parallel for
    for (i = 0; i < N; i++) {
        a[i]= b[i];
    }
        diff = MPI_Wtime() - start ;
        printf("ELAPSED time to copy : %11.6f N: %d \n",diff, N);
}
}else{
//__sync_fetch_and_add(&r_sync, 1);
printf("OK other ranks  \n");
//sleep(100);
}
MPI_Barrier(MPI_COMM_WORLD);

MPI_Finalize();

return 0 ;



  if ( !processArgs(argc, argv, rank, wsize, &iters, &dummy,
                    &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();


  //Set up intra-node shared memory structures.
  if(rank == 0) {
        //One rank per node allocates shared send request lists.
        req_array = malloc(wsize*sizeof(ReqInfo));
        /*if(!IS_SM_BUF((void*)req_array)){
            printf("Failed to allocate from SM region :%p \n", req_array);
            exit(-1);
        }*/
  }
  //broadcast address of the pointer variable so that each rank has access to it
  MPI_Bcast(&req_array , 1, MPI_LONG, 0, MPI_COMM_WORLD);
  //printf("Broadcasting of shared mem region complete! rank : %d  region_addr : %lu region_ptr : %lu \n", rank, &req_array, req_array);
  //printf("Access shm region ! rank : %d  region for sync : %lu  \n", rank, req_array[rank].sync);
 /* 
  if(rank == 0){
	char* comBuf  = (char*)malloc(10);
	memset(comBuf, 1, 10);
	req_array[30].buffer = comBuf ;
	comBuf[0] = 'n';
        req_array[30].sync = malloc(sizeof(int));
        req_array[30].sync[0] = 12;
	printf("rank : %d done sending buff ptr : %p  sync ptr : %p \n",rank, comBuf, req_array[30].sync );
        printf("sleeping ! pid() %d \n", getpid());
	sleep(40000);
  }else if(rank == 30){
	while(req_array[30].sync == NULL){
	
	}

	while(req_array[30].sync[0] != 12){
	
	}
	printf("rank : %d  buffer value: %c sync value : %d  buff ptr : %p  sync ptr : %p  \n",
		rank, req_array[30].buffer[0], req_array[30].sync[0], req_array[30].buffer, req_array[30].sync );
        printf("sleeping ! pid() %d \n", getpid());
		sleep(40000);
  } 
  MPI_Barrier(MPI_COMM_WORLD);
  return 0;*/

  printPairs = 1 ;
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);
//    messStart = 8388608;
//    messStop = 4096 ;
//    messStop = 8388608 ;
    for ( messSize = messStart; messSize <= messStop;
          messSize *= messFactor )
    {
      testTime = runUnicomTest(procs, messSize, iters, rank, 
                               wsize, procsPerNode, allocPattern, 
                               useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * (procs/2);
        currBw = ((double)totalops*(double)messSize/testTime)/1000000;

        if ( currBw > maxUnidirBw )
        {
          maxUnidirBw   = currBw;
          maxUnidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }
  
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    // Create Communicator of all active processes
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runBicomTest(procs, messSize, iters, rank,
                              wsize, procsPerNode, allocPattern,
                              useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * procs;
        currBw = (((double)totalops*(double)messSize)/testTime)/1000000;

        if ( currBw > maxBidirBw )
        {
          maxBidirBw   = currBw;
          maxBidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }


  if ( rank == 0 )
  {
    printf("Max Unidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxUnidirBw, maxUnidirSize);
    printf("Max  Bidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxBidirBw, maxBidirSize);

    printParameters(iters, messStart, messStop, messFactor, 
                    procFile, procsPerNode, allocPattern, useBarrier);
    free(req_array);
  }

  printReportFooter(minTime, rank, wsize, procsPerNode, useNearestRank);
  
  free(procList);

  MPI_Finalize();

  exit(0);
}
예제 #4
0
파일: laten.c 프로젝트: uswick/presta_HMPI
main(int argc, char** argv)
{
  int rank, wsize, iters, i, procs, currtarg, dummy;
  double diff = 0.0;
  double start, max, mintime = 9999;
  MPI_Status stat;
  char comBuf;
  MPI_Comm activeComm;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    messStart, messStop, messFactor;
  int    procsPerNode, procIdx, useBarrier, printPairs, useNearestRank;
  char   allocPattern;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if ( !processArgs(argc, argv, rank, wsize, &iters, 
                    &dummy, &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();

  currtarg = getTargetRank(rank, wsize, procsPerNode, useNearestRank);

  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);
    

    if ( isActiveProc(rank, wsize, procsPerNode, procs, 
                      allocPattern, useNearestRank) )
    {
      if ( rank < currtarg )
      {
        /*  Ensure pair communication has been initialized  */
        MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
        MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
      }
      else 
      {
        /*  Ensure pair communication has been initialized  */
        MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
        MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
      }
    
      //generic_barrier(activeComm);
      MPI_Barrier(activeComm);
      //generic_barrier(activeComm);
      MPI_Barrier(activeComm);

      if ( rank < currtarg )
      {
        /*  Time operation loop  */
        start = MPI_Wtime();

        for ( i = 0; i < iters; i++ )
        {
          MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
          MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
        }
      }
      else 
      {
        /*  Time operation loop  */
        start = MPI_Wtime();

        for ( i = 0; i < iters; i++ )
        {
          MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
          MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
        }
      }

      if ( useBarrier )
        MPI_Barrier(activeComm);
        //generic_barrier(activeComm);

      diff = MPI_Wtime() - start;
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    /*  Get maximum sample length  */
    MPI_Reduce(&diff, &max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);

    if ( rank == 0 )
    {
      if ( max < mintime )
        mintime = max;

      printf(outputFormat, procs, max/iters/2*1000000);
    }
  }
  
  if ( rank == 0 )
  {
    printParameters(iters, procFile, procsPerNode, 
                    allocPattern, useBarrier);
  }

  printReportFooter(mintime, rank, wsize, procsPerNode, useNearestRank);

  MPI_Finalize();

  exit(0);
}
예제 #5
0
void DllEnumerator::ReceivePid( DWORD dwPid )
{
//	printf("********************\nPID: %d\n", dwPid);
	char szPid[16] = {0};
	sprintf(szPid, "%d", dwPid);


	TiXmlElement* process = new TiXmlElement("process");

	HANDLE hProcess = OpenProcess(
		PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
		FALSE,
		dwPid);

	if (INVALID_HANDLE_VALUE == hProcess)
	{
		delete process;
		return;
	}

	HMODULE hNtdll = LoadLibrary(L"Ntdll.dll");
	if (INVALID_HANDLE_VALUE == hNtdll)
	{
		delete process;
		return;
	}

	typedef NTSTATUS (WINAPI * PFNNtQueryInformationProcess)(
		__in          HANDLE ProcessHandle,
		__in          PROCESSINFOCLASS ProcessInformationClass,
		__out         PVOID ProcessInformation,
		__in          ULONG ProcessInformationLength,
		__out_opt     PULONG ReturnLength
		);

	PFNNtQueryInformationProcess pfnNtQueryInformationProcess 
		= (PFNNtQueryInformationProcess)GetProcAddress(
		hNtdll,
		"NtQueryInformationProcess");

	PROCESS_BASIC_INFORMATION pbi;
	DWORD dwLen = sizeof(pbi);
	NTSTATUS ret = pfnNtQueryInformationProcess(
		hProcess,
		ProcessBasicInformation,
		&pbi,
		dwLen,
		&dwLen);
	if (!NT_SUCCESS(ret))
	{
		delete process;
		return;
	}

	// 		UNICODE_STRING imageName;
	// 		dwLen = sizeof(imageName);
	// 		ret = pfnNtQueryInformationProcess(
	// 			hProcess,
	// 			ProcessImageFileName,
	// 			&imageName,
	// 			dwLen,
	// 			&dwLen);
	// 		if (!NT_SUCCESS(ret))
	// 		{
	// 			continue;
	// 		}

	PEB peb;
	memset(&peb, 0 , sizeof(peb));
	dwLen = sizeof(peb);
	ReadProcessMemory(
		hProcess,
		pbi.PebBaseAddress,
		&peb,
		dwLen,
		&dwLen);

	printPEB(&peb, process);

	PEB_LDR_DATA ldr;
	memset(&ldr, 0, sizeof(ldr));
	dwLen = sizeof(ldr);
	ReadProcessMemory(
		hProcess,
		peb.Ldr,
		&ldr,
		dwLen,
		&dwLen);

	printLdr(&ldr, hProcess, process);

	RTL_USER_PROCESS_PARAMETERS parameters;
	memset(&parameters, 0, sizeof(parameters));
	dwLen = sizeof(parameters);
	ReadProcessMemory(
		hProcess,
		peb.ProcessParameters,
		&parameters,
		dwLen,
		&dwLen);

	printParameters(&parameters, hProcess, process);

	m_root->LinkEndChild(process);
}
예제 #6
0
파일: com.c 프로젝트: uswick/presta_HMPI
int main(int argc, char *argv[])
{
  double minTime        = 99999.0;
  double testTime       = 0.0;
  double maxBidirBw     = 0.0;
  double maxUnidirBw    = 0.0;
  int    maxBidirSize   = 0;
  int    maxUnidirSize  = 0;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    rank, wsize, iters, procs;
  int    messStart, messStop, messFactor, messSize;
  int    procsPerNode, totalops, procIdx;
  int    useBarrier, printPairs, useNearestRank, dummy;
  double currBw;
  char   allocPattern;
  MPI_Comm activeComm;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if ( !processArgs(argc, argv, rank, wsize, &iters, &dummy,
                    &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();

  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runUnicomTest(procs, messSize, iters, rank, 
                               wsize, procsPerNode, allocPattern, 
                               useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * (procs/2);
        currBw = ((double)totalops*(double)messSize/testTime)/1000000;

        if ( currBw > maxUnidirBw )
        {
          maxUnidirBw   = currBw;
          maxUnidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }
  
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runBicomTest(procs, messSize, iters, rank, 
                              wsize, procsPerNode, allocPattern, 
                              useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * procs;
        currBw = (((double)totalops*(double)messSize)/testTime)/1000000;

        if ( currBw > maxBidirBw )
        {
          maxBidirBw   = currBw;
          maxBidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }

  if ( rank == 0 )
  {
    printf("Max Unidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxUnidirBw, maxUnidirSize);
    printf("Max  Bidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxBidirBw, maxBidirSize);

    printParameters(iters, messStart, messStop, messFactor, 
                    procFile, procsPerNode, allocPattern, useBarrier);
  }

  printReportFooter(minTime, rank, wsize, procsPerNode, useNearestRank);
  
  free(procList);

  MPI_Finalize();

  exit(0);
}
예제 #7
0
/*! \fn void dumpInitializationStatus(DATA *data)
 *
 *  \param [in]  [data]
 *
 *  \author lochel
 */
void dumpInitialSolution(DATA *simData)
{
  long i, j;

  const MODEL_DATA      *mData = simData->modelData;
  const SIMULATION_INFO *sInfo = simData->simulationInfo;

  if (ACTIVE_STREAM(LOG_INIT))
    printParameters(simData, LOG_INIT);

  if (!ACTIVE_STREAM(LOG_SOTI)) return;
  infoStreamPrint(LOG_SOTI, 1, "### SOLUTION OF THE INITIALIZATION ###");

  if (0 < mData->nStates)
  {
    infoStreamPrint(LOG_SOTI, 1, "states variables");
    for(i=0; i<mData->nStates; ++i)
      infoStreamPrint(LOG_SOTI, 0, "[%ld] Real %s(start=%g, nominal=%g) = %g (pre: %g)", i+1,
                                   mData->realVarsData[i].info.name,
                                   mData->realVarsData[i].attribute.start,
                                   mData->realVarsData[i].attribute.nominal,
                                   simData->localData[0]->realVars[i],
                                   sInfo->realVarsPre[i]);
    messageClose(LOG_SOTI);
  }

  if (0 < mData->nStates)
  {
    infoStreamPrint(LOG_SOTI, 1, "derivatives variables");
    for(i=mData->nStates; i<2*mData->nStates; ++i)
      infoStreamPrint(LOG_SOTI, 0, "[%ld] Real %s = %g (pre: %g)", i+1,
                                   mData->realVarsData[i].info.name,
                                   simData->localData[0]->realVars[i],
                                   sInfo->realVarsPre[i]);
    messageClose(LOG_SOTI);
  }

  if (2*mData->nStates < mData->nVariablesReal)
  {
    infoStreamPrint(LOG_SOTI, 1, "other real variables");
    for(i=2*mData->nStates; i<mData->nVariablesReal; ++i)
      infoStreamPrint(LOG_SOTI, 0, "[%ld] Real %s(start=%g, nominal=%g) = %g (pre: %g)", i+1,
                                   mData->realVarsData[i].info.name,
                                   mData->realVarsData[i].attribute.start,
                                   mData->realVarsData[i].attribute.nominal,
                                   simData->localData[0]->realVars[i],
                                   sInfo->realVarsPre[i]);
    messageClose(LOG_SOTI);
  }

  if (0 < mData->nVariablesInteger)
  {
    infoStreamPrint(LOG_SOTI, 1, "integer variables");
    for(i=0; i<mData->nVariablesInteger; ++i)
      infoStreamPrint(LOG_SOTI, 0, "[%ld] Integer %s(start=%ld) = %ld (pre: %ld)", i+1,
                                   mData->integerVarsData[i].info.name,
                                   mData->integerVarsData[i].attribute.start,
                                   simData->localData[0]->integerVars[i],
                                   sInfo->integerVarsPre[i]);
    messageClose(LOG_SOTI);
  }

  if (0 < mData->nVariablesBoolean)
  {
    infoStreamPrint(LOG_SOTI, 1, "boolean variables");
    for(i=0; i<mData->nVariablesBoolean; ++i)
      infoStreamPrint(LOG_SOTI, 0, "[%ld] Boolean %s(start=%s) = %s (pre: %s)", i+1,
                                   mData->booleanVarsData[i].info.name,
                                   mData->booleanVarsData[i].attribute.start ? "true" : "false",
                                   simData->localData[0]->booleanVars[i] ? "true" : "false",
                                   sInfo->booleanVarsPre[i] ? "true" : "false");
    messageClose(LOG_SOTI);
  }

  if (0 < mData->nVariablesString)
  {
    infoStreamPrint(LOG_SOTI, 1, "string variables");
    for(i=0; i<mData->nVariablesString; ++i)
      infoStreamPrint(LOG_SOTI, 0, "[%ld] String %s(start=\"%s\") = \"%s\" (pre: \"%s\")", i+1,
                                   mData->stringVarsData[i].info.name,
                                   MMC_STRINGDATA(mData->stringVarsData[i].attribute.start),
                                   MMC_STRINGDATA(simData->localData[0]->stringVars[i]),
                                   MMC_STRINGDATA(sInfo->stringVarsPre[i]));
    messageClose(LOG_SOTI);
  }

  messageClose(LOG_SOTI);
}