PetscErrorCode BSSCR_GeneratePetscHeader_for_viewer( PetscViewer viewer ) { PetscErrorCode ierr; char version[256]; char arch[50], hostname[64], username[16], pname[PETSC_MAX_PATH_LEN], date[64]; int size; char *_dir; MPI_Comm comm; ierr = PetscGetArchType(arch, 50);CHKERRQ(ierr); ierr = PetscGetHostName(hostname, 64);CHKERRQ(ierr); ierr = PetscGetUserName(username, 16);CHKERRQ(ierr); ierr = PetscGetProgramName(pname, PETSC_MAX_PATH_LEN);CHKERRQ(ierr); ierr = PetscGetDate(date, 64);CHKERRQ(ierr); ierr = PetscGetVersion(&version,256);CHKERRQ(ierr); ierr = PetscGetPetscDir( (const char**)&_dir ); CHKERRQ(ierr); PetscObjectGetComm( (PetscObject)viewer, &comm ); MPI_Comm_size( comm, &size ); if( size == 1 ) { PetscViewerASCIIPrintf( viewer,"## %s on a %s named %s with %d processor, by %s %s\n", pname, arch, hostname, size, username, date); } else { PetscViewerASCIIPrintf( viewer,"## %s on a %s named %s with %d processors, by %s %s\n", pname, arch, hostname, size, username, date); } PetscViewerASCIIPrintf( viewer, "## Using %s, installed at %s \n", version, _dir ); PetscFunctionReturn(0); }
/*@C PetscHMPISpawn - Initialize additional processes to be used as "worker" processes. This is not generally called by users. One should use -hmpi_spawn_size <n> to indicate that you wish to have n-1 new MPI processes spawned for each current process. Not Collective (could make collective on MPI_COMM_WORLD, generate one huge comm and then split it up) Input Parameter: . nodesize - size of each compute node that will share processors Options Database: . -hmpi_spawn_size nodesize Notes: This is only supported on systems with an MPI 2 implementation that includes the MPI_Comm_Spawn() routine. $ Comparison of two approaches for HMPI usage (MPI started with N processes) $ $ -hmpi_spawn_size <n> requires MPI 2, results in n*N total processes with N directly used by application code $ and n-1 worker processes (used by PETSc) for each application node. $ You MUST launch MPI so that only ONE MPI process is created for each hardware node. $ $ -hmpi_merge_size <n> results in N total processes, N/n used by the application code and the rest worker processes $ (used by PETSc) $ You MUST launch MPI so that n MPI processes are created for each hardware node. $ $ petscmpiexec -n 2 ./ex1 -hmpi_spawn_size 3 gives 2 application nodes (and 4 PETSc worker nodes) $ petscmpiexec -n 6 ./ex1 -hmpi_merge_size 3 gives the SAME 2 application nodes and 4 PETSc worker nodes $ This is what would use if each of the computers hardware nodes had 3 CPUs. $ $ These are intended to be used in conjunction with USER HMPI code. The user will have 1 process per $ computer (hardware) node (where the computer node has p cpus), the user's code will use threads to fully $ utilize all the CPUs on the node. The PETSc code will have p processes to fully use the compute node for $ PETSc calculations. The user THREADS and PETSc PROCESSES will NEVER run at the same time so the p CPUs $ are always working on p task, never more than p. $ $ See PCHMPI for a PETSc preconditioner that can use this functionality $ For both PetscHMPISpawn() and PetscHMPIMerge() PETSC_COMM_WORLD consists of one process per "node", PETSC_COMM_LOCAL_WORLD consists of all the processes in a "node." In both cases the user's code is running ONLY on PETSC_COMM_WORLD (that was newly generated by running this command). Level: developer Concepts: HMPI .seealso: PetscFinalize(), PetscInitializeFortran(), PetscGetArgs(), PetscHMPIFinalize(), PetscInitialize(), PetscHMPIMerge(), PetscHMPIRun() @*/ PetscErrorCode PetscHMPISpawn(PetscMPIInt nodesize) { PetscErrorCode ierr; PetscMPIInt size; MPI_Comm parent,children; PetscFunctionBegin; ierr = MPI_Comm_get_parent(&parent);CHKERRQ(ierr); if (parent == MPI_COMM_NULL) { /* the original processes started by user */ char programname[PETSC_MAX_PATH_LEN]; char **argv; ierr = PetscGetProgramName(programname,PETSC_MAX_PATH_LEN);CHKERRQ(ierr); ierr = PetscGetArguments(&argv);CHKERRQ(ierr); ierr = MPI_Comm_spawn(programname,argv,nodesize-1,MPI_INFO_NULL,0,PETSC_COMM_SELF,&children,MPI_ERRCODES_IGNORE);CHKERRQ(ierr); ierr = PetscFreeArguments(argv);CHKERRQ(ierr); ierr = MPI_Intercomm_merge(children,0,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); ierr = PetscInfo2(0,"PETSc HMPI successfully spawned: number of nodes = %d node size = %d\n",size,nodesize);CHKERRQ(ierr); saved_PETSC_COMM_WORLD = PETSC_COMM_WORLD; } else { /* worker nodes that get spawned */ ierr = MPI_Intercomm_merge(parent,1,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr); ierr = PetscHMPIHandle(PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr); PetscHMPIWorker = PETSC_TRUE; /* so that PetscHMPIFinalize() will not attempt a broadcast from this process */ PetscEnd(); /* cannot continue into user code */ } PetscFunctionReturn(0); }
PetscErrorCode BSSCR_GeneratePetscHeader_for_file( FILE *fd, MPI_Comm comm ) { PetscErrorCode ierr; char version[256]; char arch[50], hostname[64], username[16], pname[PETSC_MAX_PATH_LEN], date[64]; int size, rank; char *_dir; ierr = PetscGetArchType(arch, 50);CHKERRQ(ierr); ierr = PetscGetHostName(hostname, 64);CHKERRQ(ierr); ierr = PetscGetUserName(username, 16);CHKERRQ(ierr); ierr = PetscGetProgramName(pname, PETSC_MAX_PATH_LEN);CHKERRQ(ierr); ierr = PetscGetDate(date, 64);CHKERRQ(ierr); ierr = PetscGetVersion(&version,256);CHKERRQ(ierr); ierr = PetscGetPetscDir( (const char**)&_dir ); CHKERRQ(ierr); MPI_Comm_size( comm, &size ); MPI_Comm_rank( comm, &rank ); if( rank != 0 ) PetscFunctionReturn(0); if( size == 1 ) { fprintf( fd,"## %s on a %s named %s with %d processor, by %s %s\n", pname, arch, hostname, size, username, date); } else { fprintf( fd,"## %s on a %s named %s with %d processors, by %s %s\n", pname, arch, hostname, size, username, date); } fprintf( fd, "## Using %s, installed at %s \n", version, _dir ); PetscFunctionReturn(0); }
PetscErrorCode PetscOpenHistoryFile(const char filename[],FILE **fd) { PetscErrorCode ierr; PetscMPIInt rank,size; char pfile[PETSC_MAX_PATH_LEN],pname[PETSC_MAX_PATH_LEN],fname[PETSC_MAX_PATH_LEN],date[64]; char version[256]; PetscFunctionBegin; ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank); CHKERRQ(ierr); if (!rank) { char arch[10]; int err; ierr = PetscGetArchType(arch,10); CHKERRQ(ierr); ierr = PetscGetDate(date,64); CHKERRQ(ierr); ierr = PetscGetVersion(version,256); CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size); CHKERRQ(ierr); if (filename) { ierr = PetscFixFilename(filename,fname); CHKERRQ(ierr); } else { ierr = PetscGetHomeDirectory(pfile,240); CHKERRQ(ierr); ierr = PetscStrcat(pfile,"/.petschistory"); CHKERRQ(ierr); ierr = PetscFixFilename(pfile,fname); CHKERRQ(ierr); } *fd = fopen(fname,"a"); if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Cannot open file: %s",fname); ierr = PetscFPrintf(PETSC_COMM_SELF,*fd,"---------------------------------------------------------\n"); CHKERRQ(ierr); ierr = PetscFPrintf(PETSC_COMM_SELF,*fd,"%s %s\n",version,date); CHKERRQ(ierr); ierr = PetscGetProgramName(pname,PETSC_MAX_PATH_LEN); CHKERRQ(ierr); ierr = PetscFPrintf(PETSC_COMM_SELF,*fd,"%s on a %s, %d proc. with options:\n",pname,arch,size); CHKERRQ(ierr); ierr = PetscFPrintf(PETSC_COMM_SELF,*fd,"---------------------------------------------------------\n"); CHKERRQ(ierr); err = fflush(*fd); if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fflush() failed on file"); } PetscFunctionReturn(0); }
/*@C PetscLogMPEDump - Dumps the MPE logging info to file for later use with Upshot. Collective over PETSC_COMM_WORLD Level: advanced .seealso: PetscLogDump(), PetscLogAllBegin(), PetscLogMPEBegin() @*/ PetscErrorCode PetscLogMPEDump(const char sname[]) { char name[PETSC_MAX_PATH_LEN]; PetscErrorCode ierr; PetscFunctionBegin; if (PetscBeganMPE) { ierr = PetscInfo(0,"Finalizing MPE.\n");CHKERRQ(ierr); if (sname) { ierr = PetscStrcpy(name,sname);CHKERRQ(ierr);} else { ierr = PetscGetProgramName(name,PETSC_MAX_PATH_LEN);CHKERRQ(ierr);} ierr = MPE_Finish_log(name);CHKERRQ(ierr); } else { ierr = PetscInfo(0,"Not finalizing MPE (not started by PETSc).\n");CHKERRQ(ierr); } PetscFunctionReturn(0); }
/* Initializes arch, hostname, username,date so that system calls do NOT need to be made during the error handler. */ PetscErrorCode PetscErrorPrintfInitialize() { PetscErrorCode ierr; PetscBool use_stdout = PETSC_FALSE,use_none = PETSC_FALSE; PetscFunctionBegin; ierr = PetscGetArchType(arch,sizeof(arch));CHKERRQ(ierr); ierr = PetscGetHostName(hostname,sizeof(hostname));CHKERRQ(ierr); ierr = PetscGetUserName(username,sizeof(username));CHKERRQ(ierr); ierr = PetscGetProgramName(pname,PETSC_MAX_PATH_LEN);CHKERRQ(ierr); ierr = PetscGetDate(date,sizeof(date));CHKERRQ(ierr); ierr = PetscGetVersion(version,sizeof(version));CHKERRQ(ierr); ierr = PetscOptionsGetBool(NULL,"-error_output_stdout",&use_stdout,NULL);CHKERRQ(ierr); if (use_stdout) PETSC_STDERR = PETSC_STDOUT; ierr = PetscOptionsGetBool(NULL,"-error_output_none",&use_none,NULL);CHKERRQ(ierr); if (use_none) PetscErrorPrintf = PetscErrorPrintfNone; PetscErrorPrintfInitializeCalled = PETSC_TRUE; PetscFunctionReturn(0); }
PetscErrorCode PetscLogView_VecScatter(PetscViewer viewer) { MPI_Comm comm = PetscObjectComm((PetscObject) viewer); PetscEventPerfInfo *eventInfo = NULL; PetscLogDouble locTotalTime,stats[6],maxstats[6],minstats[6],sumstats[6],avetime,ksptime; PetscStageLog stageLog; const int stage = 2; int event,events[] = {VEC_ScatterBegin,VEC_ScatterEnd}; PetscMPIInt rank,size; PetscErrorCode ierr; PetscInt i; char arch[128],hostname[128],username[128],pname[PETSC_MAX_PATH_LEN],date[128],version[256]; PetscFunctionBegin; PetscTime(&locTotalTime); locTotalTime -= petsc_BaseTime; ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); ierr = PetscLogGetStageLog(&stageLog);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer,"numProcs = %d\n",size);CHKERRQ(ierr); ierr = PetscGetArchType(arch,sizeof(arch));CHKERRQ(ierr); ierr = PetscGetHostName(hostname,sizeof(hostname));CHKERRQ(ierr); ierr = PetscGetUserName(username,sizeof(username));CHKERRQ(ierr); ierr = PetscGetProgramName(pname,sizeof(pname));CHKERRQ(ierr); ierr = PetscGetDate(date,sizeof(date));CHKERRQ(ierr); ierr = PetscGetVersion(version,sizeof(version));CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer,"%s on a %s named %s with %d processors, by %s %s\n", pname, arch, hostname, size, username, date);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "Using %s\n", version);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "Configure options: %s",petscconfigureoptions);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "%s", petscmachineinfo);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "%s", petsccompilerinfo);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "%s", petsccompilerflagsinfo);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "%s", petsclinkerinfo);CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer, "%s\n", PETSC_MPICC_SHOW);CHKERRQ(ierr); ierr = PetscOptionsView(NULL,viewer);CHKERRQ(ierr); #if defined(PETSC_HAVE_HWLOC) ierr = PetscProcessPlacementView(viewer);CHKERRQ(ierr); #endif ierr = PetscViewerASCIIPrintf(viewer, "----------------------------------------------------\n");CHKERRQ(ierr); ierr = PetscViewerASCIIPrintf(viewer," Time Min to Max Range Proportion of KSP\n");CHKERRQ(ierr); eventInfo = stageLog->stageInfo[stage].eventLog->eventInfo; ierr = MPI_Allreduce(&eventInfo[KSP_Solve].time,&ksptime,1,MPIU_PETSCLOGDOUBLE,MPI_SUM,PETSC_COMM_WORLD);CHKERRQ(ierr); ksptime = ksptime/size; for (i=0; i<(int)(sizeof(events)/sizeof(int)); i++) { event = events[i]; stats[COUNT] = eventInfo[event].count; stats[TIME] = eventInfo[event].time; stats[NUMMESS] = eventInfo[event].numMessages; stats[MESSLEN] = eventInfo[event].messageLength; stats[REDUCT] = eventInfo[event].numReductions; stats[FLOPS] = eventInfo[event].flops; ierr = MPI_Allreduce(stats,maxstats,6,MPIU_PETSCLOGDOUBLE,MPI_MAX,PETSC_COMM_WORLD);CHKERRQ(ierr); ierr = MPI_Allreduce(stats,minstats,6,MPIU_PETSCLOGDOUBLE,MPI_MIN,PETSC_COMM_WORLD);CHKERRQ(ierr); ierr = MPI_Allreduce(stats,sumstats,6,MPIU_PETSCLOGDOUBLE,MPI_SUM,PETSC_COMM_WORLD);CHKERRQ(ierr); avetime = sumstats[1]/size; ierr = PetscViewerASCIIPrintf(viewer,"%s %4.2e -%5.1f %% %5.1f %% %4.2e %%\n",stageLog->eventLog->eventInfo[event].name, avetime,100.*(avetime-minstats[1])/avetime,100.*(maxstats[1]-avetime)/avetime,100.*avetime/ksptime);CHKERRQ(ierr); } ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@C PetscStopForDebugger - Prints a message to the screen indicating how to attach to the process with the debugger and then waits for the debugger to attach. Not Collective Level: developer Notes: This is likely never needed since PetscAttachDebugger() is easier to use and seems to always work. Developer Notes: Since this can be called by the error handler, should it be calling SETERRQ() and CHKERRQ()? Concepts: debugger^waiting for attachment .seealso: PetscSetDebugger(), PetscAttachDebugger() @*/ PetscErrorCode PetscStopForDebugger(void) { PetscErrorCode ierr; PetscInt sleeptime=0; #if !defined(PETSC_CANNOT_START_DEBUGGER) int ppid; PetscMPIInt rank; char program[PETSC_MAX_PATH_LEN],hostname[256]; PetscBool isdbx,isxldb,isxxgdb,isddd,iskdbg,isups,isxdb,islldb; #endif PetscFunctionBegin; #if defined(PETSC_CANNOT_START_DEBUGGER) (*PetscErrorPrintf)("System cannot start debugger; just continuing program\n"); #else ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank); if (ierr) rank = 0; /* ignore error since this may be already in error handler */ ierr = PetscGetHostName(hostname,256); if (ierr) { (*PetscErrorPrintf)("Cannot determine hostname; just continuing program\n"); PetscFunctionReturn(0); } ierr = PetscGetProgramName(program,256); if (ierr) { (*PetscErrorPrintf)("Cannot determine program name; just continuing program\n"); PetscFunctionReturn(0); } if (!program[0]) { (*PetscErrorPrintf)("Cannot determine program name; just continuing program\n"); PetscFunctionReturn(0); } ppid = getpid(); ierr = PetscStrcmp(Debugger,"xxgdb",&isxxgdb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"ddd",&isddd);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"kdbg",&iskdbg);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"ups",&isups);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"xldb",&isxldb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"xdb",&isxdb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"dbx",&isdbx);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"lldb",&islldb);CHKERRQ(ierr); if (isxxgdb || isups || isddd || iskdbg) (*PetscErrorPrintf)("[%d]%s>>%s %s %d\n",rank,hostname,Debugger,program,ppid); else if (isxldb) (*PetscErrorPrintf)("[%d]%s>>%s -a %d %s\n",rank,hostname,Debugger,ppid,program); else if (islldb) (*PetscErrorPrintf)("[%d]%s>>%s -p %d\n",rank,hostname,Debugger,ppid); else if (isdbx) { #if defined(PETSC_USE_P_FOR_DEBUGGER) (*PetscErrorPrintf)("[%d]%s>>%s -p %d %s\n",rank,hostname,Debugger,ppid,program); #elif defined(PETSC_USE_LARGEP_FOR_DEBUGGER) (*PetscErrorPrintf)("[%d]%s>>%s -l ALL -P %d %s\n",rank,hostname,Debugger,ppid,program); #elif defined(PETSC_USE_A_FOR_DEBUGGER) (*PetscErrorPrintf)("[%d]%s>>%s -a %d\n",rank,hostname,Debugger,ppid); #elif defined(PETSC_USE_PID_FOR_DEBUGGER) (*PetscErrorPrintf)("[%d]%s>>%s -pid %d %s\n",rank,hostname,Debugger,ppid,program); #else (*PetscErrorPrintf)("[%d]%s>>%s %s %d\n",rank,hostname,Debugger,program,ppid); #endif } #endif /* PETSC_CANNOT_START_DEBUGGER */ fflush(stdout); /* ignore error because may already be in error handler */ sleeptime = 25; /* default to sleep waiting for debugger */ PetscOptionsGetInt(NULL,"-debugger_pause",&sleeptime,NULL); /* ignore error because may already be in error handler */ if (sleeptime < 0) sleeptime = -sleeptime; #if defined(PETSC_NEED_DEBUGGER_NO_SLEEP) /* HP cannot attach process to sleeping debugger, hence count instead */ { PetscReal x = 1.0; int i =10000000; while (i--) x++; /* cannot attach to sleeper */ } #elif defined(PETSC_HAVE_SLEEP_RETURNS_EARLY) /* IBM sleep may return at anytime, hence must see if there is more time to sleep */ { int left = sleeptime; while (left > 0) left = sleep(left) - 1; } #else PetscSleep(sleeptime); #endif PetscFunctionReturn(0); }
/*@ PetscAttachDebugger - Attaches the debugger to the running process. Not Collective Level: advanced Concepts: debugger^starting from program Developer Notes: Since this can be called by the error handler should it be calling SETERRQ() and CHKERRQ()? .seealso: PetscSetDebugger() @*/ PetscErrorCode PetscAttachDebugger(void) { #if !defined(PETSC_CANNOT_START_DEBUGGER) int child =0; PetscReal sleeptime=0; PetscErrorCode ierr; char program[PETSC_MAX_PATH_LEN],display[256],hostname[64]; #endif PetscFunctionBegin; #if defined(PETSC_CANNOT_START_DEBUGGER) || !defined(PETSC_HAVE_FORK) (*PetscErrorPrintf)("System cannot start debugger\n"); (*PetscErrorPrintf)("On Cray run program in Totalview debugger\n"); (*PetscErrorPrintf)("On Windows use Developer Studio(MSDEV)\n"); MPI_Abort(PETSC_COMM_WORLD,1); #else ierr = PetscGetDisplay(display,128);CHKERRQ(ierr); ierr = PetscGetProgramName(program,PETSC_MAX_PATH_LEN);CHKERRQ(ierr); if (ierr) { (*PetscErrorPrintf)("Cannot determine program name\n"); PetscFunctionReturn(1); } if (!program[0]) { (*PetscErrorPrintf)("Cannot determine program name\n"); PetscFunctionReturn(1); } child = (int)fork(); if (child < 0) { (*PetscErrorPrintf)("Error in fork() attaching debugger\n"); PetscFunctionReturn(1); } /* Swap role the parent and child. This is (I think) so that control c typed in the debugger goes to the correct process. */ if (child) child = 0; else child = (int)getppid(); if (child) { /* I am the parent, will run the debugger */ const char *args[10]; char pid[10]; PetscInt j,jj; PetscBool isdbx,isidb,isxldb,isxxgdb,isups,isxdb,isworkshop,isddd,iskdbg,islldb; ierr = PetscGetHostName(hostname,64);CHKERRQ(ierr); /* We need to send a continue signal to the "child" process on the alpha, otherwise it just stays off forever */ #if defined(PETSC_NEED_KILL_FOR_DEBUGGER) kill(child,SIGCONT); #endif sprintf(pid,"%d",child); ierr = PetscStrcmp(Debugger,"xxgdb",&isxxgdb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"ddd",&isddd);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"kdbg",&iskdbg);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"ups",&isups);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"xldb",&isxldb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"xdb",&isxdb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"dbx",&isdbx);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"idb",&isidb);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"workshop",&isworkshop);CHKERRQ(ierr); ierr = PetscStrcmp(Debugger,"lldb",&islldb);CHKERRQ(ierr); if (isxxgdb || isups || isddd) { args[1] = program; args[2] = pid; args[3] = "-display"; args[0] = Debugger; args[4] = display; args[5] = 0; (*PetscErrorPrintf)("PETSC: Attaching %s to %s %s on %s\n",args[0],args[1],pid,hostname); if (execvp(args[0],(char**)args) < 0) { perror("Unable to start debugger"); exit(0); } } else if (iskdbg) { args[1] = "-p"; args[2] = pid; args[3] = program; args[4] = "-display"; args[0] = Debugger; args[5] = display; args[6] = 0; (*PetscErrorPrintf)("PETSC: Attaching %s to %s %s on %s\n",args[0],args[3],pid,hostname); if (execvp(args[0],(char**)args) < 0) { perror("Unable to start debugger"); exit(0); } } else if (isxldb) { args[1] = "-a"; args[2] = pid; args[3] = program; args[4] = "-display"; args[0] = Debugger; args[5] = display; args[6] = 0; (*PetscErrorPrintf)("PETSC: Attaching %s to %s %s on %s\n",args[0],args[1],pid,hostname); if (execvp(args[0],(char**)args) < 0) { perror("Unable to start debugger"); exit(0); } } else if (isworkshop) { args[1] = "-s"; args[2] = pid; args[3] = "-D"; args[4] = "-"; args[0] = Debugger; args[5] = pid; args[6] = "-display"; args[7] = display; args[8] = 0; (*PetscErrorPrintf)("PETSC: Attaching %s to %s on %s\n",args[0],pid,hostname); if (execvp(args[0],(char**)args) < 0) { perror("Unable to start debugger"); exit(0); } } else { j = 0; if (Xterm) { PetscBool cmp; char *tmp,*tmp1; ierr = PetscStrncmp(DebugTerminal,"screen",6,&cmp);CHKERRQ(ierr); if (cmp) display[0] = 0; /* when using screen, we never pass -display */ args[j++] = tmp = DebugTerminal; if (display[0]) { args[j++] = "-display"; args[j++] = display; } while (*tmp) { ierr = PetscStrchr(tmp,' ',&tmp1);CHKERRQ(ierr); if (!tmp1) break; *tmp1 = 0; tmp = tmp1+1; args[j++] = tmp; } } args[j++] = Debugger; jj = j; args[j++] = program; args[j++] = pid; args[j++] = 0; if (isidb) { j = jj; args[j++] = "-pid"; args[j++] = pid; args[j++] = "-gdb"; args[j++] = program; args[j++] = 0; } if (islldb) { j = jj; args[j++] = "-p"; args[j++] = pid; args[j++] = 0; } if (isdbx) { j = jj; #if defined(PETSC_USE_P_FOR_DEBUGGER) args[j++] = "-p"; args[j++] = pid; args[j++] = program; #elif defined(PETSC_USE_LARGEP_FOR_DEBUGGER) args[j++] = "-l"; args[j++] = "ALL"; args[j++] = "-P"; args[j++] = pid; args[j++] = program; #elif defined(PETSC_USE_A_FOR_DEBUGGER) args[j++] = "-a"; args[j++] = pid; #elif defined(PETSC_USE_PID_FOR_DEBUGGER) args[j++] = "-pid"; args[j++] = pid; args[j++] = program; #endif args[j++] = 0; } if (Xterm) { if (display[0]) (*PetscErrorPrintf)("PETSC: Attaching %s to %s of pid %s on display %s on machine %s\n",Debugger,program,pid,display,hostname); else (*PetscErrorPrintf)("PETSC: Attaching %s to %s on pid %s on %s\n",Debugger,program,pid,hostname); if (execvp(args[0],(char**)args) < 0) { perror("Unable to start debugger in xterm"); exit(0); } } else { (*PetscErrorPrintf)("PETSC: Attaching %s to %s of pid %s on %s\n",Debugger,program,pid,hostname); if (execvp(args[0],(char**)args) < 0) { perror("Unable to start debugger"); exit(0); } } } } else { /* I am the child, continue with user code */ sleeptime = 10; /* default to sleep waiting for debugger */ ierr = PetscOptionsGetReal(NULL,"-debugger_pause",&sleeptime,NULL);CHKERRQ(ierr); if (sleeptime < 0) sleeptime = -sleeptime; #if defined(PETSC_NEED_DEBUGGER_NO_SLEEP) /* HP cannot attach process to sleeping debugger, hence count instead */ { PetscReal x = 1.0; int i =10000000; while (i--) x++; /* cannot attach to sleeper */ } #elif defined(PETSC_HAVE_SLEEP_RETURNS_EARLY) /* IBM sleep may return at anytime, hence must see if there is more time to sleep */ { int left = sleeptime; while (left > 0) left = PetscSleep(left) - 1; } #else PetscSleep(sleeptime); #endif } #endif PetscFunctionReturn(0); }