/* * The main program to "drive" the crew... */ int main (int argc, char *argv[]) { crew_t my_crew; char line[128], *next; int status; if (argc < 3) { fprintf (stderr, "Usage: %s string path\n", argv[0]); return -1; } #ifdef sun /* * On Solaris 2.5, threads are not timesliced. To ensure * that our threads can run concurrently, we need to * increase the concurrency level to CREW_SIZE. */ DPRINTF (("Setting concurrency level to %d\n", CREW_SIZE)); thr_setconcurrency (CREW_SIZE); #endif status = crew_create (&my_crew, CREW_SIZE); if (status != 0) err_abort (status, "Create crew"); status = crew_start (&my_crew, argv[2], argv[1]); if (status != 0) err_abort (status, "Start crew"); return 0; }
void do_threads(SSL_CTX *s_ctx, SSL_CTX *c_ctx) { SSL_CTX *ssl_ctx[2]; thread_t thread_ctx[MAX_THREAD_NUMBER]; int i; ssl_ctx[0]=s_ctx; ssl_ctx[1]=c_ctx; thr_setconcurrency(thread_number); for (i=0; i<thread_number; i++) { thr_create(NULL, THREAD_STACK_SIZE, (void *(*)())ndoit, (void *)ssl_ctx, 0L, &(thread_ctx[i])); } printf("reaping\n"); for (i=0; i<thread_number; i++) { thr_join(thread_ctx[i],NULL,NULL); } printf("solaris threads done (%d,%d)\n", s_ctx->references,c_ctx->references); }
int main (int argc, char *argv[]) { pthread_t thread_id; int status; #ifdef sun /* * On Solaris 2.5, threads are not timesliced. To ensure * that our threads can run concurrently, we need to * increase the concurrency level to at least 2 plus THREADS * (the number of workers). */ DPRINTF (("Setting concurrency level to %d\n", THREADS+2)); thr_setconcurrency (THREADS+2); #endif status = pthread_create (&thread_id, NULL, thread_routine, NULL); if (status != 0) err_abort (status, "Create team"); sleep (5); printf ("Cancelling...\n"); status = pthread_cancel (thread_id); if (status != 0) err_abort (status, "Cancel team"); status = pthread_join (thread_id, NULL); if (status != 0) err_abort (status, "Join team"); }
PR_IMPLEMENT(void) PR_SetConcurrency(PRUintn numCPUs) { #ifdef SOLARIS thr_setconcurrency(numCPUs); #else PT_LOG("PR_SetConcurrency"); #endif }
void tosThread_init() { #ifdef rt_LIB_Solaris2_SPARC thr_setconcurrency(5); #endif #if !defined(rt_LIB_Win32_i386) initSignals(); #endif }
main (int nArgument, char *aszArgument[]) { int iSizeofSAin; int SClientIn; int iFcntlflags; int iPort; int iFcntlFlags; struct sockaddr_in SAinServer; UMyUID = getuid (); if (nArgument != 2) vFatalError ("Not enough args"); iPort = atoi (aszArgument[1]); iServerPort = iPort; if (!bInitializeConnection (&SAinServer)) vFatalError ("InitConnection"); iSizeofSAin = sizeof (struct sockaddr_in); iFcntlFlags = fcntl(Shttp, F_GETFL, 0); fcntl(Shttp, F_SETFL, iFcntlFlags | O_NONBLOCK); while (TRUE) { if ((SClientIn = accept (Shttp, (struct sockaddr *) &SAinServer, &iSizeofSAin)) < 0) { if (errno != EWOULDBLOCK) { fprintf (stderr, "Accept failed, errno=%d.\\n", errno); vFatalError ("Accept"); } else { continue; } } else { fprintf (stderr, "\\tIncoming: %d\\n", SClientIn); if (thr_create (NULL, NULL, vHandleConnection, (void *)SClientIn, THR_DETACHED, NULL) != 0) vFatalError ("Create Thread"); thr_setconcurrency(0); DEBUG ("\\t\\tThread created."); } } }
int ldap_pvt_thread_set_concurrency(int n) { #ifdef HAVE_PTHREAD_SETCONCURRENCY return pthread_setconcurrency( n ); #elif defined(HAVE_THR_SETCONCURRENCY) return thr_setconcurrency( n ); #else return 0; #endif }
void thread_work() { int i; #ifdef UNBOUND #ifdef SOLARIS if (thr_setconcurrency(nthreads) != 0) { perror("Oops, thr_setconcurrency failed"); exit(1); } #endif #endif /* create nthreads threads, having each start at do_work */ for (i = 0; i < nthreads; i++) { int retval; #ifdef SOLARIS #ifdef BOUND retval = thr_create(0, 0, do_work, 0, THR_BOUND, &(tid[i])); #endif #ifdef UNBOUND retval = thr_create(0, 0, do_work, 0, 0, &(tid[i])); #endif #endif #ifdef POSIX #ifdef BOUND retval = pthread_create(&(tid[i]), &attr, do_work, 0); #endif #ifdef UNBOUND retval = pthread_create(&(tid[i]), 0, do_work, 0); #endif #endif if(retval != 0) { perror("Oops, thr_create failed"); exit(1); } } /* wait for all threads to complete their work and join */ for (i = 0; i < nthreads; i++) { #ifdef SOLARIS thr_join(tid[i], 0, 0); #endif #ifdef POSIX pthread_join(tid[i], 0); #endif } }
main (int argc, char *argv[]) { int ncorr, t1arg, t0arg, orig_ncorr; thread_t tid1, tid0; float rate; if (argc != 6) { printf ("usage: %s t0_bound t0_new_lwp t1_bound t1_new_lwp ncorr\n", argv[0]); exit (1); } t0arg = THR_DETACHED; if (atoi (argv[1])) t0arg |= THR_BOUND; if (atoi (argv[2])) t0arg |= THR_NEW_LWP; t1arg = THR_DETACHED; if (atoi (argv[3])) t1arg |= THR_BOUND; if (atoi (argv[4])) t1arg |= THR_NEW_LWP; ncorr = atoi (argv[5]); if (thr_create (0, 0, work, 0, t0arg, &tid0) != 0) perror ("couldn't create thread 0"); if (thr_create (0, 0, work, (void *) 1, t1arg, &tid1) != 0) perror ("couldn't create thread 1"); orig_ncorr = thr_getconcurrency (); if (ncorr) thr_setconcurrency (ncorr); sleep (NSLEEP); rate = (count[0] + count[1]) / ((float) NSLEEP); printf ("\n------------------------------------------------------------------------\n"); printf ("t0arg 0x%x (%s, %s, %s)\nt1arg 0x%x (%s, %s, %s)\ncount[0] %d count[1] %d\n\ ncorr_orig %d ncorr_set %d ncorr_end %d rate %.3f per_cxt %.2f usec\n", t0arg, (t0arg & THR_DETACHED) ? "THR_DETACHED" : "Not Detached", (t0arg & THR_BOUND) ? "THR_BOUND" : "Not Bound", (t0arg & THR_NEW_LWP) ? "THR_NEW_LWP" : "No New_LWP", t1arg, (t1arg & THR_DETACHED) ? "THR_DETACHED" : "Not Detached", (t1arg & THR_BOUND) ? "THR_BOUND" : "Not Bound", (t1arg & THR_NEW_LWP) ? "THR_NEW_LWP" : "No New_LWP", count[0], count[1], orig_ncorr, ncorr, thr_getconcurrency (), rate, 1.0e6 / rate); }
bool wxThread::SetConcurrency(size_t level) { #ifdef HAVE_THR_SETCONCURRENCY int rc = thr_setconcurrency(level); if ( rc != 0 ) { wxLogSysError(rc, _T("thr_setconcurrency() failed")); } return rc == 0; #else // !HAVE_THR_SETCONCURRENCY // ok only for the default value return level == 0; #endif // HAVE_THR_SETCONCURRENCY/!HAVE_THR_SETCONCURRENCY }
main () { Timer *timer1, *timer2; int ret; #ifdef SOLARIS thr_setconcurrency (10); #endif init_timer_master(); timer1 = New_Timer (my_timer_fire, 2, "timer2", NULL); timer2 = New_Timer (my_timer_fire, 4, "timer4", NULL); Timer_Turn_ON (timer1); Timer_Turn_ON (timer2); /* loop forever */ while (1); }
PUBLIC S16 Trylock() { S32 status; pthread_t counter_thread_id; pthread_t monitor_thread_id; #ifdef sun /* * On Solaris 2.5, threads are not timesliced. To ensure * that our threads can run concurrently, we need to * increase the concurrency level to 2. */ DPRINTF (("Setting concurrency level to 2\n")); thr_setconcurrency (2); #endif end_time = time(NULL) + 60; /* Run for 1 minute */ status = pthread_create(&counter_thread_id, NULL, counter_thread, NULL); if (status != 0) { err_abort(status, "Create counter thread"); } status = pthread_create(&monitor_thread_id, NULL, monitor_thread, NULL); if (status != 0) { err_abort (status, "Create monitor thread"); } status = pthread_join(counter_thread_id, NULL); if (status != 0) { err_abort (status, "Join counter thread"); } status = pthread_join (monitor_thread_id, NULL); if (status != 0) { err_abort (status, "Join monitor thread"); } return ROK; }
int main (int argc, char *argv[]) { pthread_t thread1, thread2, thread3; void *string; int status; #ifdef sun /* * On Solaris 2.5, threads are not timesliced. To ensure * that our threads can run concurrently, we need to * increase the concurrency level. */ DPRINTF (("Setting concurrency level to 4\n")); thr_setconcurrency (4); #endif status = pthread_create ( &thread1, NULL, prompt_routine, "Thread 1> "); if (status != 0) err_abort (status, "Create thread"); status = pthread_create ( &thread2, NULL, prompt_routine, "Thread 2> "); if (status != 0) err_abort (status, "Create thread"); status = pthread_create ( &thread3, NULL, prompt_routine, "Thread 3> "); if (status != 0) err_abort (status, "Create thread"); status = pthread_join (thread1, &string); if (status != 0) err_abort (status, "Join thread"); printf ("Thread 1: \"%s\"\n", (char*)string); free (string); status = pthread_join (thread2, &string); if (status != 0) err_abort (status, "Join thread"); printf ("Thread 2: \"%s\"\n", (char*)string); free (string); status = pthread_join (thread3, &string); if (status != 0) err_abort (status, "Join thread"); printf ("Thread 3: \"%s\"\n", (char*)string); free (string); return 0; }
int main (int argc, char *argv[]) { pthread_t thread; int count; int status; #ifdef sun /* * On Solaris 2.5, threads are not timesliced. To ensure * that our threads can run concurrently, we need to * increase the concurrency level to CLIENT_THREADS. */ DPRINTF (("Setting concurrency level to %d\n", CLIENT_THREADS)); thr_setconcurrency (CLIENT_THREADS); #endif /* * Create CLIENT_THREADS clients. */ client_threads = CLIENT_THREADS; for (count = 0; count < CLIENT_THREADS; count++) { status = pthread_create (&thread, NULL, client_routine, (void*)count); if (status != 0) err_abort (status, "Create client thread"); } status = pthread_mutex_lock (&client_mutex); if (status != 0) err_abort (status, "Lock client mutex"); while (client_threads > 0) { status = pthread_cond_wait (&clients_done, &client_mutex); if (status != 0) err_abort (status, "Wait for clients to finish"); } status = pthread_mutex_unlock (&client_mutex); if (status != 0) err_abort (status, "Unlock client mutex"); printf ("All clients done\n"); tty_server_request (REQ_QUIT, 1, NULL, NULL); return 0; }
void GalSS_RunServer(GalIO_ServerStruct *server) { /* If timed tasks are configured to run, then if threads are enabled, the timed task handler will start a thread for each task, so we can exit. If threads aren't enabled, then we run the timed task loop. */ #if defined(GAL_THREADS) && defined(__solaris__) /* Apparently, Solaris 2.5 doesn't timeslice threads, so you need to boost the concurrency level. */ thr_setconcurrency(2); #endif if (GalIO_ServerUsesTimedTasks(server)) { if (!Gal_TimedTaskThreadsEnabled()) { Gal_TimedTasksLoop(); } else { /* It's a bad plan to exit the main thread, since the caller might want to do some cleanup. But we want to duplicate the situation where the system returns from this function only when the timed task loop is terminated. So we define a waiter. I don't need to conditionalize this on GAL_PTHREADS, because Gal_EnableTimedTaskThreads() takes care of that. */ Gal_TimedTaskLoopThreadWaiter(); } } #ifdef __INSURE__ GalIO_SetServerDone(server); GalIO_DestroyServerStruct(server); _GalIO_FreeReaderQueue(); _Gal_FreeAllObjects(); if (_Gal_FreeAllFrames()) { _Gal_DestroySymTable(); } _Gal_FreeAllByteBuffers(); _Gal_FreeAllPointerBuffers(); _Gal_FreeAllVlists(); #endif }
void bu_parallel(void (*func)(int, void *), int ncpu, void *arg) { /* avoid using the 'register' keyword in here "just in case" */ #ifndef PARALLEL bu_log("bu_parallel(%d., %p): Not compiled for PARALLEL machine, running single-threaded\n", ncpu, arg); /* do the work anyways */ (*func)(0, arg); #else struct thread_data *user_thread_data_bu; int avail_cpus = 1; int x; char *libbu_affinity = NULL; /* OFF by default until linux issue is debugged */ int affinity = 0; /* * multithreading support for SunOS 5.X / Solaris 2.x */ # if defined(SUNOS) && SUNOS >= 52 static int concurrency = 0; /* Max concurrency we have set */ # endif # if (defined(SUNOS) && SUNOS >= 52) || defined(HAVE_PTHREAD_H) int nthreadc; int nthreade; rt_thread_t thread; rt_thread_t thread_tbl[MAX_PSW]; int i; # endif /* SUNOS */ # ifdef WIN32 int nthreadc = ncpu; HANDLE hThreadArray[MAX_PSW] = {0}; int i; DWORD returnCode; # endif /* WIN32 */ if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(%d, %p)\n", ncpu, arg); if (UNLIKELY(pid_of_initiating_thread)) bu_bomb("bu_parallel() called from within parallel section\n"); pid_of_initiating_thread = bu_process_id(); if (ncpu > MAX_PSW) { bu_log("WARNING: bu_parallel() ncpu(%d) > MAX_PSW(%d), adjusting ncpu\n", ncpu, MAX_PSW); ncpu = MAX_PSW; } parallel_nthreads_started = 0; parallel_nthreads_finished = 0; libbu_affinity = getenv("LIBBU_AFFINITY"); if (libbu_affinity) affinity = (int)strtol(libbu_affinity, NULL, 0x10); if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { if (affinity) bu_log("CPU affinity enabled. (LIBBU_AFFINITY=%d)\n", affinity); else bu_log("CPU affinity disabled.\n", affinity); } user_thread_data_bu = (struct thread_data *)bu_calloc(ncpu, sizeof(*user_thread_data_bu), "struct thread_data *user_thread_data_bu"); /* Fill in the data of user_thread_data_bu structures of all threads */ for (x = 0; x < ncpu; x++) { user_thread_data_bu[x].user_func = func; user_thread_data_bu[x].user_arg = arg; user_thread_data_bu[x].cpu_id = x; user_thread_data_bu[x].counted = 0; user_thread_data_bu[x].affinity = affinity; } /* if we're in debug mode, allow additional cpus */ if (!(bu_debug & BU_DEBUG_PARALLEL)) { avail_cpus = bu_avail_cpus(); if (ncpu > avail_cpus) { bu_log("%d cpus requested, but only %d available\n", ncpu, avail_cpus); ncpu = avail_cpus; } } /* * multithreading support for SunOS 5.X / Solaris 2.x */ # if defined(SUNOS) && SUNOS >= 52 thread = 0; nthreadc = 0; /* Give the thread system a hint... */ if (ncpu > concurrency) { if (thr_setconcurrency(ncpu)) { bu_log("ERROR parallel.c/bu_parallel(): thr_setconcurrency(%d) failed\n", ncpu); /* Not much to do, lump it */ } else { concurrency = ncpu; } } /* Create the threads */ for (x = 0; x < ncpu; x++) { if (thr_create(0, 0, (void *(*)(void *))parallel_interface_arg, &user_thread_data_bu[x], 0, &thread)) { bu_log("ERROR: bu_parallel: thr_create(0x0, 0x0, 0x%x, 0x0, 0, 0x%x) failed for processor thread # %d\n", parallel_interface_arg, &thread, x); /* Not much to do, lump it */ } else { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): created thread: (thread: 0x%x) (loop:%d) (nthreadc:%d)\n", thread, x, nthreadc); thread_tbl[nthreadc] = thread; nthreadc++; } } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) for (i = 0; i < nthreadc; i++) bu_log("bu_parallel(): thread_tbl[%d] = 0x%x\n", i, thread_tbl[i]); /* * Wait for completion of all threads. We don't wait for threads * in order. We wait for any old thread but we keep track of how * many have returned and whether it is one that we started */ thread = 0; nthreade = 0; for (x = 0; x < nthreadc; x++) { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): waiting for thread to complete:\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", x, nthreadc, nthreade); if (thr_join((rt_thread_t)0, &thread, NULL)) { /* badness happened */ perror("thr_join"); bu_log("thr_join() failed"); } /* Check to see if this is one the threads we created */ for (i = 0; i < nthreadc; i++) { if (thread_tbl[i] == thread) { thread_tbl[i] = (rt_thread_t)-1; nthreade++; break; } } if ((thread_tbl[i] != (rt_thread_t)-1) && i < nthreadc) { bu_log("bu_parallel(): unknown thread %d completed.\n", thread); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): thread completed: (thread: %d)\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", thread, x, nthreadc, nthreade); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): %d threads created. %d threads exited.\n", nthreadc, nthreade); # endif /* SUNOS */ # if defined(HAVE_PTHREAD_H) thread = 0; nthreadc = 0; /* Create the posix threads. * * Start at 1 so we can treat the parent as thread 0. */ for (x = 0; x < ncpu; x++) { pthread_attr_t attrs; pthread_attr_init(&attrs); pthread_attr_setstacksize(&attrs, 10*1024*1024); if (pthread_create(&thread, &attrs, (void *(*)(void *))parallel_interface_arg, &user_thread_data_bu[x])) { bu_log("ERROR: bu_parallel: pthread_create(0x0, 0x0, 0x%lx, 0x0, 0, %p) failed for processor thread # %d\n", (unsigned long int)parallel_interface_arg, (void *)&thread, x); /* Not much to do, lump it */ } else { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { bu_log("bu_parallel(): created thread: (thread: %p) (loop: %d) (nthreadc: %d)\n", (void*)thread, x, nthreadc); } thread_tbl[nthreadc] = thread; nthreadc++; } /* done with the attributes after create */ pthread_attr_destroy(&attrs); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { for (i = 0; i < nthreadc; i++) { bu_log("bu_parallel(): thread_tbl[%d] = %p\n", i, (void *)thread_tbl[i]); } # ifdef SIGINFO /* may be BSD-only (calls _thread_dump_info()) */ raise(SIGINFO); # endif } /* * Wait for completion of all threads. * Wait for them in order. */ thread = 0; nthreade = 0; for (x = 0; x < nthreadc; x++) { int ret; if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): waiting for thread %p to complete:\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", (void *)thread_tbl[x], x, nthreadc, nthreade); if ((ret = pthread_join(thread_tbl[x], NULL)) != 0) { /* badness happened */ bu_log("pthread_join(thread_tbl[%d]=%p) ret=%d\n", x, (void *)thread_tbl[x], ret); } nthreade++; thread_tbl[x] = (rt_thread_t)-1; if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): thread completed: (thread: %p)\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", (void *)thread, x, nthreadc, nthreade); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): %d threads created. %d threads exited.\n", nthreadc, nthreade); # endif /* end if posix threads */ # ifdef WIN32 /* Create the Win32 threads */ for (i = 0; i < nthreadc; i++) { hThreadArray[i] = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)parallel_interface_arg_stub, &user_thread_data_bu[i], 0, NULL); /* Ensure that all successfully created threads are in sequential order.*/ if (hThreadArray[i] == NULL) { bu_log("bu_parallel(): Error in CreateThread, Win32 error code %d.\n", GetLastError()); --nthreadc; } } /* Wait for other threads in the array */ returnCode = WaitForMultipleObjects(nthreadc, hThreadArray, TRUE, INFINITE); if (returnCode == WAIT_FAILED) { bu_log("bu_parallel(): Error in WaitForMultipleObjects, Win32 error code %d.\n", GetLastError()); } for (x = 0; x < nthreadc; x++) { int ret; if ((ret = CloseHandle(hThreadArray[x]) == 0)) { /* Thread didn't close properly if return value is zero; don't retry and potentially loop forever. */ bu_log("bu_parallel(): Error closing thread %d of %d, Win32 error code %d.\n", x, nthreadc, GetLastError()); } } # endif /* end if Win32 threads */ /* * Ensure that all the threads are REALLY finished. On some * systems, if threads core dump, the rest of the gang keeps * going, so this can actually happen (sigh). */ if (UNLIKELY(parallel_nthreads_finished != parallel_nthreads_started)) { bu_log("*** ERROR bu_parallel(%d): %d workers did not finish!\n\n", ncpu, ncpu - parallel_nthreads_finished); } if (UNLIKELY(parallel_nthreads_started != ncpu)) { bu_log("bu_parallel() NOTICE: only %d workers started, expected %d\n", parallel_nthreads_started, ncpu); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(%d) complete, now serial\n", ncpu); # if defined(unix) || defined(__unix) /* Cray is known to wander among various pids, perhaps others. * * At this point, all multi-tasking activity should have ceased, * and we should be just a single UNIX process with our original * PID and open file table (kernel struct u). If not, then any * output may be written into the wrong file. */ x = bu_process_id(); if (UNLIKELY(pid_of_initiating_thread != x)) { bu_log("WARNING: bu_parallel(): PID of initiating thread changed from %d to %d, open file table may be botched!\n", pid_of_initiating_thread, x); } # endif pid_of_initiating_thread = 0; /* No threads any more */ bu_free(user_thread_data_bu, "struct thread_data *user_thread_data_bu"); #endif /* PARALLEL */ return; }
int ParallelInit(int n, pxgstrf_relax_t *pxgstrf_relax, pdgstrf_options_t *pdgstrf_options, pxgstrf_shared_t *pxgstrf_shared) { int *etree = pdgstrf_options->etree; register int w, dad, ukids, i, j, k, rs, panel_size, relax; register int P, w_top, do_split = 0; panel_t panel_type; int *panel_histo = pxgstrf_shared->Gstat->panel_histo; register int nthr, concurrency, info; #if ( MACH==SUN ) register int sync_type = USYNC_THREAD; /* Set concurrency level. */ nthr = sysconf(_SC_NPROCESSORS_ONLN); thr_setconcurrency(nthr); /* number of LWPs */ concurrency = thr_getconcurrency(); #if ( PRNTlevel==1 ) printf(".. CPUs %d, concurrency (#LWP) %d, P %d\n", nthr, concurrency, P); #endif /* Initialize mutex variables. */ pxgstrf_shared->lu_locks = (mutex_t *) SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(mutex_t)); for (i = 0; i < NO_GLU_LOCKS; ++i) mutex_init(&pxgstrf_shared->lu_locks[i], sync_type, 0); #elif ( MACH==DEC || MACH==PTHREAD ) pxgstrf_shared->lu_locks = (pthread_mutex_t *) SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(pthread_mutex_t)); for (i = 0; i < NO_GLU_LOCKS; ++i) pthread_mutex_init(&pxgstrf_shared->lu_locks[i], NULL); #else pxgstrf_shared->lu_locks = (mutex_t *) SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(mutex_t)); #endif #if ( PRNTlevel==1 ) printf(".. ParallelInit() ... nprocs %2d\n", pdgstrf_options->nprocs); #endif pxgstrf_shared->spin_locks = intCalloc(n); pxgstrf_shared->pan_status = (pan_status_t *) SUPERLU_MALLOC((n+1)*sizeof(pan_status_t)); pxgstrf_shared->fb_cols = intMalloc(n+1); panel_size = pdgstrf_options->panel_size; relax = pdgstrf_options->relax; w = MAX(panel_size, relax) + 1; for (i = 0; i < w; ++i) panel_histo[i] = 0; pxgstrf_shared->num_splits = 0; if ( (info = queue_init(&pxgstrf_shared->taskq, n)) ) { fprintf(stderr, "ParallelInit(): %d\n", info); ABORT("queue_init fails."); } /* Count children of each node in the etree. */ for (i = 0; i <= n; ++i) pxgstrf_shared->pan_status[i].ukids = 0; for (i = 0; i < n; ++i) { dad = etree[i]; ++pxgstrf_shared->pan_status[dad].ukids; } /* Find the panel partitions and initialize each panel's status */ #ifdef PROFILE num_panels = 0; #endif pxgstrf_shared->tasks_remain = 0; rs = 1; w_top = panel_size/2; if ( w_top == 0 ) w_top = 1; P = 12; for (i = 0; i < n; ) { if ( pxgstrf_relax[rs].fcol == i ) { w = pxgstrf_relax[rs++].size; panel_type = RELAXED_SNODE; pxgstrf_shared->pan_status[i].state = CANGO; } else { w = MIN(panel_size, pxgstrf_relax[rs].fcol - i); #ifdef SPLIT_TOP if ( !do_split ) { if ( (n-i) < panel_size * P ) do_split = 1; } if ( do_split && w > w_top ) { /* split large panel */ w = w_top; ++pxgstrf_shared->num_splits; } #endif for (j = i+1; j < i + w; ++j) /* Do not allow panel to cross a branch point in the etree. */ if ( pxgstrf_shared->pan_status[j].ukids > 1 ) break; w = j - i; /* j should start a new panel */ panel_type = REGULAR_PANEL; pxgstrf_shared->pan_status[i].state = UNREADY; #ifdef DOMAINS if ( in_domain[i] == TREE_DOMAIN ) panel_type = TREE_DOMAIN; #endif } if ( panel_type == REGULAR_PANEL ) { ++pxgstrf_shared->tasks_remain; /*printf("nondomain panel %6d -- %6d\n", i, i+w-1); fflush(stdout);*/ } ukids = k = 0; for (j = i; j < i + w; ++j) { pxgstrf_shared->pan_status[j].size = k--; pxgstrf_shared->pan_status[j].type = panel_type; ukids += pxgstrf_shared->pan_status[j].ukids; } pxgstrf_shared->pan_status[i].size = w; /* leading column */ /* only count those kids outside the panel */ pxgstrf_shared->pan_status[i].ukids = ukids - (w-1); panel_histo[w]++; #ifdef PROFILE panstat[i].size = w; ++num_panels; #endif pxgstrf_shared->fb_cols[i] = i; i += w; } /* for i ... */ /* Dummy root */ pxgstrf_shared->pan_status[n].size = 1; pxgstrf_shared->pan_status[n].state = UNREADY; #if ( PRNTlevel==1 ) printf(".. Split: P %d, #nondomain panels %d\n", P, pxgstrf_shared->tasks_remain); #endif #ifdef DOMAINS EnqueueDomains(&pxgstrf_shared->taskq, list_head, pxgstrf_shared); #else EnqueueRelaxSnode(&pxgstrf_shared->taskq, n, pxgstrf_relax, pxgstrf_shared); #endif #if ( PRNTlevel==1 ) printf(".. # tasks %d\n", pxgstrf_shared->tasks_remain); fflush(stdout); #endif #ifdef PREDICT_OPT /* Set up structure describing children */ for (i = 0; i <= n; cp_firstkid[i++] = EMPTY); for (i = n-1; i >= 0; i--) { dad = etree[i]; cp_nextkid[i] = cp_firstkid[dad]; cp_firstkid[dad] = i; } #endif return 0; } /* ParallelInit */
main(int argc,char **argv) { int *ev, len, i, j, status, numread, totalread=0, nevents_max, event_size, chunk; int con[ET_STATION_SELECT_INTS]; et_event **pe; et_openconfig openconfig; et_statconfig sconfig; et_att_id attach; et_stat_id my_stat; et_sys_id id; sigset_t sigblock; struct timespec timeout; pthread_t tid; BOSIOptr BIOstream; char *ch; char *str1 = "OPEN UNIT=11 FILE='/scratch/boiarino/test.A00' WRITE RECL=32768 SPLITMB=2047 RAW SEQ NEW BINARY"; /*"OPEN UNIT=11 FILE='/work/clas/disk1/boiarino/test.A00' WRITE RECL=32768 SPLITMB=2047 RAW SEQ NEW BINARY";*/ if(argc != 3) { printf("Usage: et2bos_test <et_filename> <station_name>\n"); exit(1); } timeout.tv_sec = 0; timeout.tv_nsec = 1; /* setup signal handling */ sigfillset(&sigblock); /* block all signals */ status = pthread_sigmask(SIG_BLOCK, &sigblock, NULL); if (status != 0) { printf("et2bos: pthread_sigmask failure\n"); exit(1); } #ifdef sun thr_setconcurrency(thr_getconcurrency() + 1); #endif /* spawn signal handling thread */ pthread_create(&tid, NULL, signal_thread, (void *)NULL); restartLinux: /* open ET system */ et_open_config_init(&openconfig); et_open_config_setwait(openconfig, ET_OPEN_WAIT); if (et_open(&id, argv[1], openconfig) != ET_OK) { printf("et2bos: et_open problems\n"); exit(1); } et_open_config_destroy(openconfig); /* * Now that we have access to an ET system, find out how many * events it has and what size they are. Then allocate an array * of pointers to use for reading, writing, and modifying these events. */ if (et_system_getnumevents(id, &nevents_max) != ET_OK) { printf("et2bos: ET has died\n"); exit(1); } if (et_system_geteventsize(id, &event_size) != ET_OK) { printf("et2bos: ET has died\n"); exit(1); } if ( (pe = (et_event **) calloc(nevents_max, sizeof(et_event *))) == NULL) { printf("et2bos: cannot allocate memory\n"); exit(1); } et_station_config_init(&sconfig); et_station_config_setuser(sconfig, ET_STATION_USER_MULTI); et_station_config_setrestore(sconfig, ET_STATION_RESTORE_OUT); et_station_config_setprescale(sconfig, 1); /* old "all" mode */ et_station_config_setselect(sconfig, ET_STATION_SELECT_ALL); et_station_config_setblock(sconfig, ET_STATION_BLOCKING); et_station_config_setcue(sconfig, 150); /* if ET_STATION_NONBLOCKING */ /* set debug level */ et_system_setdebug(id, ET_DEBUG_INFO); /* create station */ if ((status = et_station_create(id, &my_stat, argv[2], sconfig)) < 0) { if (status == ET_ERROR_EXISTS) { /* my_stat contains pointer to existing station */; printf("et2bos: set ptr to the existing station\n"); } else if (status == ET_ERROR_TOOMANY) { printf("et2bos: too many stations created\n"); goto error; } else { printf("et2bos: error in station creation\n"); goto error; } } printf("et2bos: station ready\n"); if (et_station_attach(id, my_stat, &attach) < 0) { printf("et2bos: error in station attach\n"); goto error; } /* open FPACK file for writing */ /*if( (status = FParm(str1,&fd)) !=0) { printf("FParm status %d \n",status); printf("command was >%s<\n",str1); exit(1); }*/ /* set file descriptor pointer */ BIOstream = (BOSIOptr)fd; /* initialize BOS array */ bosInit(jw,NBCS); while (et_alive(id)) { /**************/ /* read data */ /**************/ /* example of single, timeout read */ /* status = et_event_get(&id, attach, &pe[0], ET_TIMED, &timeout); */ /* example of single, asynchronous read */ /* status = et_event_get(&id, attach, &pe[0], ET_ASYNC, NULL);*/ /* example of reading array of up to "chunk" events */ /* chunk = 500; */ /* numread = status = et_events_get(&id, attach, pe, ET_SLEEP, NULL, chunk, &numread);*/ chunk = 500; status = et_events_get(id, attach, pe, ET_SLEEP, NULL, chunk, &numread); if (status == ET_OK) { ; } else if (status == ET_ERROR_DEAD) { printf("et2bos: ET system is dead\n"); goto end; } else if (status == ET_ERROR_TIMEOUT) { printf("et2bos: got timeout\n"); goto end; } else if (status == ET_ERROR_EMPTY) { printf("et2bos: no events\n"); goto end; } else if (status == ET_ERROR_BUSY) { printf("et2bos: station is busy\n"); goto end; } else if (status == ET_ERROR_WAKEUP) { printf("et2bos: someone told me to wake up\n"); goto end; } else if ((status == ET_ERROR_WRITE) || (status == ET_ERROR_READ)) { printf("et2bos: socket communication error\n"); goto end; } else if (status != ET_OK) { printf("et2bos: get error\n"); goto error; } /****************************************/ /* print data, write data to FPACK file */ /****************************************/ for (j=0; j<numread; j++) { et_event_getdata(pe[j], (void **) &ev); /* et_event_getlength(pe[j], &len); printf("et2bos: recname=>%4.4s%4.4s<, run#=%d, event#=%d, reclen=%d\n", (char *)&ev[3],(char *)&ev[4],ev[5],ev[6],ev[10]); printf(" temp = %d, pri = %d, len = %d\n", pe[j]->temp,et_event_getpriority(pe[j]),len); et_event_getcontrol(pe[j], con); for (i=0; i < ET_STATION_SELECT_INTS; i++) { printf("control[%d] = %d\n",i,con[i]); } */ /* drop banks from previous event */ bosLdrop(jw, "E"); bosNgarbage(jw); bosWgarbage(jw); /* create banks in BOS array */ status = et2bos(ev, jw, "E"); if (status != 0) { printf ("et2bos_test: error %d in et2bos()\n",status); exit(1); } /* call next if want to use ET record header; otherwise record name will be "RUNEVENT" and other info from HEAD bank */ /*bosWriteRecord(fd,&ev[3],ev[5],ev[6],ev[8]);*/ /* write down BOS banks to file */ /* status = bosWrite(fd, jw, "E"); if (status != 0) { printf ("et2bos_test: error %d in bosWrite()\n",status); exit(1); } */ } /*********************/ /* return data to ET */ /*********************/ /* example of writing single event */ /* status = et_event_put(id, attach, pe[0]);*/ /* example of writing array of events */ status = et_events_put(id, attach, pe, numread); if (status == ET_ERROR_DEAD) { printf("et2bos: ET is dead\n"); goto end; } else if ((status == ET_ERROR_WRITE) || (status == ET_ERROR_READ)) { printf("et2bos: socket communication error\n"); goto end; } else if (status != ET_OK) { printf("et2bos: put error\n"); goto error; } totalread += numread; end: /* print something out after having read NUMEVENTS events */ if (totalread >= NUMEVENTS) { totalread = 0; printf(" et2bos: %d events\n", NUMEVENTS); } /* if ET system is dead, wait here until it comes back */ while (!et_alive(id)) { status = et_wait_for_alive(id); if (status == ET_OK) { int locality; et_system_getlocality(id, &locality); /* if Linux, re-establish connection to ET system since socket broken */ if (locality == ET_LOCAL_NOSHARE) { printf("et2bos: try to reconnect Linux client\n"); et_forcedclose(id); goto restartLinux; } } } } /* while(alive) */ error: free(pe); printf("et2bos: ERROR\n"); exit(0); }
int main(int argc, char *argv[]) { int i, bins; int n_thr=N_THREADS; int i_max=I_MAX; unsigned long size=SIZE; struct thread_st *st; #if USE_MALLOC && USE_STARTER==2 ptmalloc_init(); printf("ptmalloc_init\n"); #endif if(argc > 1) n_total_max = atoi(argv[1]); if(n_total_max < 1) n_thr = 1; if(argc > 2) n_thr = atoi(argv[2]); if(n_thr < 1) n_thr = 1; if(n_thr > 100) n_thr = 100; if(argc > 3) i_max = atoi(argv[3]); if(argc > 4) size = atol(argv[4]); if(size < 2) size = 2; bins = MEMORY/(size*n_thr); if(argc > 5) bins = atoi(argv[5]); if(bins < 4) bins = 4; /*protect_stack(n_thr);*/ thread_init(); printf("total=%d threads=%d i_max=%d size=%ld bins=%d\n", n_total_max, n_thr, i_max, size, bins); st = (struct thread_st *)malloc(n_thr*sizeof(*st)); if(!st) exit(-1); #if !defined NO_THREADS && (defined __sun__ || defined sun) /* I know of no other way to achieve proper concurrency with Solaris. */ thr_setconcurrency(n_thr); #endif /* Start all n_thr threads. */ for(i=0; i<n_thr; i++) { st[i].u.bins = bins; st[i].u.max = i_max; st[i].u.size = size; st[i].u.seed = ((long)i_max*size + i) ^ bins; st[i].sp = 0; st[i].func = malloc_test; if(thread_create(&st[i])) { printf("Creating thread #%d failed.\n", i); n_thr = i; break; } printf("Created thread %lx.\n", (long)st[i].id); } /* Start an extra thread so we don't run out of stacks. */ if(0) { struct thread_st lst; lst.u.bins = 10; lst.u.max = 20; lst.u.size = 8000; lst.u.seed = 8999; lst.sp = 0; lst.func = malloc_test; if(thread_create(&lst)) { printf("Creating thread #%d failed.\n", i); } else { wait_for_thread(&lst, 1, NULL); } } for(n_running=n_total=n_thr; n_running>0;) { wait_for_thread(st, n_thr, my_end_thread); } for(i=0; i<n_thr; i++) { free(st[i].sp); } free(st); #if USE_MALLOC malloc_stats(); #endif printf("Done.\n"); return 0; }
int main(int argc, char *argv[]) { int i, j, bins; int n_thr=N_THREADS; int i_max=I_MAX; unsigned long size=SIZE; struct thread_st *st; #if USE_MALLOC && USE_STARTER==2 ptmalloc_init(); printf("ptmalloc_init\n"); #endif if(argc > 1) n_total_max = atoi(argv[1]); if(n_total_max < 1) n_thr = 1; if(argc > 2) n_thr = atoi(argv[2]); if(n_thr < 1) n_thr = 1; if(n_thr > 100) n_thr = 100; if(argc > 3) i_max = atoi(argv[3]); if(argc > 4) size = atol(argv[4]); if(size < 2) size = 2; bins = MEMORY/size; if(argc > 5) bins = atoi(argv[5]); if(bins < BINS_PER_BLOCK) bins = BINS_PER_BLOCK; n_blocks = bins/BINS_PER_BLOCK; blocks = (struct block *)malloc(n_blocks*sizeof(*blocks)); if(!blocks) exit(1); thread_init(); printf("total=%d threads=%d i_max=%d size=%ld bins=%d\n", n_total_max, n_thr, i_max, size, n_blocks*BINS_PER_BLOCK); for(i=0; i<n_blocks; i++) { mutex_init(&blocks[i].mutex); for(j=0; j<BINS_PER_BLOCK; j++) blocks[i].b[j].size = 0; } st = (struct thread_st *)malloc(n_thr*sizeof(*st)); if(!st) exit(-1); #if !defined NO_THREADS && (defined __sun__ || defined sun) /* I know of no other way to achieve proper concurrency with Solaris. */ thr_setconcurrency(n_thr); #endif /* Start all n_thr threads. */ for(i=0; i<n_thr; i++) { st[i].u.max = i_max; st[i].u.size = size; st[i].u.seed = ((long)i_max*size + i) ^ n_blocks; st[i].sp = 0; st[i].func = malloc_test; if(thread_create(&st[i])) { printf("Creating thread #%d failed.\n", i); n_thr = i; break; } printf("Created thread %lx.\n", (long)st[i].id); } for(n_running=n_total=n_thr; n_running>0;) { wait_for_thread(st, n_thr, my_end_thread); } for(i=0; i<n_blocks; i++) { for(j=0; j<BINS_PER_BLOCK; j++) bin_free(&blocks[i].b[j]); } for(i=0; i<n_thr; i++) { free(st[i].sp); } free(st); free(blocks); #if USE_MALLOC malloc_stats(); #endif printf("Done.\n"); return 0; }
int ldap_pvt_thread_set_concurrency(int n) { return thr_setconcurrency( n ); }
/* * init_elements - get status for all elements in the library. * * exit - */ int /* 0 = all ok !0 = failure */ init_elements( library_t *library) { uint16_t count, start_element; uint16_t avail_drives; int i, err, conlevel = 5; size_t retry; dev_ent_t *un; char *drv_tbl; mode_sense_t *mode_sense; drive_state_t *drive; xport_state_t *xport; iport_state_t *import; robot_ms_page1d_t *pg1d = NULL; robot_ms_page1e_t *pg1e = NULL; robot_ms_page1f_t *pg1f = NULL; sam_extended_sense_t *sense; SANITY_CHECK(library != (library_t *)0); un = library->un; SANITY_CHECK(un != (dev_ent_t *)0); /* Put mode sense data into shared memory. */ /* LINTED pointer cast may result in improper alignment */ mode_sense = (mode_sense_t *)SHM_REF_ADDR(un->mode_sense); sense = (sam_extended_sense_t *)SHM_REF_ADDR(un->sense); SANITY_CHECK(mode_sense != (mode_sense_t *)0); SANITY_CHECK(sense != (sam_extended_sense_t *)0); (void) memset(mode_sense, 0, sizeof (mode_sense_t)); mutex_lock(&un->io_mutex); pg1d = (robot_ms_page1d_t *)lib_mode_sense(library, 0x1d, (uchar_t *)& mode_sense->u.robot_ms.pg1d, sizeof (robot_ms_page1d_t)); pg1f = (robot_ms_page1f_t *)lib_mode_sense(library, 0x1f, (uchar_t *)& mode_sense->u.robot_ms.pg1f, sizeof (robot_ms_page1f_t)); pg1e = (robot_ms_page1e_t *)lib_mode_sense(library, 0x1e, (uchar_t *)& mode_sense->u.robot_ms.pg1e, sizeof (robot_ms_page1e_t)); mutex_unlock(&un->io_mutex); if (pg1d == NULL || pg1f == NULL || pg1e == NULL) { DevLog(DL_ERR(5115)); return (1); } library->status.b.two_sided = pg1e->transport_sets[0].rotate; if (un->type == DT_CYGNET) library->status.b.two_sided = 0; /* Allocate the drive tables. */ BE16toH(&pg1d->first_drive, &start_element); BE16toH(&pg1d->num_drive, &count); library->range.drives_lower = start_element; library->range.drives_count = count; library->range.drives_upper = start_element + count - 1; /* * This code is currently applied to IBM3584 only since the IBM3584 * returns a valid status if drive unit is not installed in a * library. ASC/ASCQ:0x82/0x00. May need to add other library types * to this check, check scsi docs. * * If drive is not fully populated and there is an empty slot for the * drive, we don't need to create a redundant drive_thread. */ avail_drives = count; drv_tbl = malloc_wait(count, 2, 0); (void) memset(drv_tbl, TRUE, count); if (DT_IBM3584 == un->type) if ((avail_drives = (uint16_t)populate_drives(library, drv_tbl)) == 0) { /* * No drives installed, assum fully populated. */ DevLog(DL_ERR(5361)); avail_drives = count; (void) memset(drv_tbl, TRUE, count); } else if (avail_drives > count) { avail_drives = count; } DevLog(DL_DETAIL(5362), avail_drives); /* one for the drive, one for stage and one for the stage helper */ conlevel += (avail_drives * 3); library->drive = (drive_state_t *)malloc_wait( sizeof (drive_state_t), 5, 0); library->index = library->drive; (void) memset(library->drive, 0, sizeof (drive_state_t)); /* * For each drive, build the drive state structure, put the init * request on the list and start a thread with a new lwp. */ for (drive = library->drive, i = 0; i < (int)count && avail_drives > 0; i++) { if (drv_tbl[i] == FALSE) { continue; } /* assign element number */ drive->element = start_element + i; drive->library = library; /* hold the lock until ready */ mutex_lock(&drive->mutex); drive->new_slot = ROBOT_NO_SLOT; drive->open_fd = -1; drive->active_count = 1; drive->first = (robo_event_t *)malloc_wait( sizeof (robo_event_t), 5, 0); (void) memset(drive->first, 0, sizeof (robo_event_t)); drive->first->type = EVENT_TYPE_INTERNAL; drive->first->status.bits = REST_FREEMEM; drive->first->request.internal.command = ROBOT_INTRL_INIT; if (thr_create(NULL, MD_THR_STK, &drive_thread, (void *) drive, (THR_NEW_LWP | THR_BOUND | THR_DETACHED), &drive->thread)) { DevLog(DL_SYSERR(5116)); drive->status.b.offline = TRUE; drive->thread = (thread_t)- 1; } if (--avail_drives <= 0) { break; } else { /* Allocate next entry */ drive->next = (drive_state_t *)malloc_wait( sizeof (drive_state_t), 5, 0); (void) memset(drive->next, 0, sizeof (drive_state_t)); drive->next->previous = drive; /* set back link */ drive = drive->next; } } drive->next = NULL; /* no next drive */ library->drive->previous = NULL; /* no previous drive */ free(drv_tbl); /* Allocate transport tables */ BE16toH(&pg1d->first_tport, &start_element); BE16toH(&pg1d->num_tport, &count); library->range.transport_lower = start_element; library->range.transport_count = count; library->range.transport_upper = start_element + count - 1; library->range.default_transport = 0; library->page1f = pg1f; conlevel += count; library->transports = (xport_state_t *)malloc_wait(sizeof (xport_state_t), 5, 0); (void) memset(library->transports, 0, sizeof (xport_state_t)); for (xport = library->transports, i = 0; i < (int)count; i++) { /* assign element number */ xport->element = start_element + i; xport->library = library; mutex_lock(&xport->mutex); /* start only one transport thread */ if (i == 0) { xport->first = (robo_event_t *)malloc_wait( sizeof (robo_event_t), 5, 0); (void) memset(xport->first, 0, sizeof (robo_event_t)); xport->first->type = EVENT_TYPE_INTERNAL; xport->first->status.bits = REST_FREEMEM; xport->first->request.internal.command = ROBOT_INTRL_INIT; xport->active_count = 1; if (thr_create(NULL, SM_THR_STK, &transport_thread, (void *) xport, (THR_NEW_LWP | THR_BOUND | THR_DETACHED), &xport->thread)) { DevLog(DL_SYSERR(5117)); xport->thread = (thread_t)- 1; } } /* Allocate next entry */ if (i != (count - 1)) { xport->next = (xport_state_t *)malloc_wait( sizeof (xport_state_t), 5, 0); (void) memset(xport->next, 0, sizeof (xport_state_t)); xport->next->previous = xport; /* set back link */ xport = xport->next; } } /* for the metrum d-360 the last transport is used with import export */ xport->next = NULL; /* no next transport */ library->transports->previous = NULL; /* Allocate mailbox (import/export) tables */ BE16toH(&pg1d->first_mail, &start_element); BE16toH(&pg1d->num_mail, &count); library->range.ie_lower = start_element; library->range.ie_count = count; if (count != 0) library->range.ie_upper = start_element + count - 1; else library->range.ie_upper = 0; conlevel += 1; /* only one import/export thread */ library->import = (iport_state_t *)malloc_wait( sizeof (iport_state_t), 5, 0); (void) memset(library->import, 0, sizeof (iport_state_t)); /* store the transport used in import/export for the metrum D-360 */ if (un->type == DT_METD28) library->import->xport = xport; for (import = library->import, i = 0; i < (int)count; i++) { SANITY_CHECK(import != (iport_state_t *)0); /* assign element number */ import->element = start_element + i; import->library = library; mutex_lock(&import->mutex); /* Create only one mailbox thread */ if (i == 0) { import->active_count = 1; import->first = (robo_event_t *)malloc_wait( sizeof (robo_event_t), 5, 0); (void) memset(import->first, 0, sizeof (robo_event_t)); import->first->type = EVENT_TYPE_INTERNAL; import->first->status.bits = REST_FREEMEM; import->first->request.internal.command = ROBOT_INTRL_INIT; if (thr_create(NULL, SM_THR_STK, &import_thread, (void *) import, (THR_DETACHED | THR_BOUND | THR_NEW_LWP), &import->thread)) { DevLog(DL_SYSERR(5118)); import->thread = (thread_t)- 1; } } if (i != (count - 1)) { /* Allocate next entry */ import->next = (iport_state_t *)malloc_wait( sizeof (iport_state_t), 5, 0); (void) memset(import->next, 0, sizeof (iport_state_t)); /* set back link */ import->next->previous = import; import = import->next; } } import->next = NULL; /* no next mailbox */ SANITY_CHECK(library->import != (iport_state_t *)0); library->import->previous = NULL; /* allocate the audit table if needed */ BE16toH(&pg1d->first_stor, &start_element); BE16toH(&pg1d->num_stor, &count); library->range.storage_lower = start_element; library->range.storage_count = count; library->range.storage_upper = start_element + count - 1; /* add for the import/export door slots */ if (un->type == DT_ACL452) count += library->range.ie_count; DevLog(DL_DETAIL(5220), library->range.drives_count, library->range.transport_count, library->range.storage_count, library->range.ie_count); if (thr_setconcurrency(conlevel)) { DevLog(DL_SYSERR(5058)); } /* * If the audit table is the wrong length (based on the number of * storage elements returned by mode-sense) or the audit bit is set, * the set up for an audit. */ if ((library->audit_tab_len == 0) || un->status.b.audit) { int added_more_time = FALSE; char *l_mess = un->dis_mes[DIS_MES_NORM]; /* * Audit table does not exist or is the wrong length. This * is generally a bad thing and will force an initialize * element scsi command and an audit. Both of these take a * long time. */ /* tell the outside world */ un->status.b.audit = TRUE; memccpy(l_mess, catgets(catfd, SET, 9022, "initializing elements"), '\0', DIS_MES_LEN); mutex_lock(&un->io_mutex); retry = 2; do { /* * Allow 16 seconds for each storage element and 30 * seconds of slop. */ (void) memset(sense, 0, sizeof (sam_extended_sense_t)); if ((err = scsi_cmd(library->open_fd, un, SCMD_INIT_ELEMENT_STATUS, (count << 4) + 30)) < 0) { TAPEALERT_SKEY(library->open_fd, un); GENERIC_SCSI_ERROR_PROCESSING(un, library->scsi_err_tab, 0, err, added_more_time, retry, /* code for DOWN_EQU */ down_library(library, SAM_STATE_CHANGE); mutex_unlock(&un->io_mutex); return (-1); /* MACRO for cstyle */, /* code for ILLREQ */ mutex_unlock(&un->io_mutex); return (-1); /* MACRO for cstyle */,
/* ------------------------------------------------------------ purpose -- compute a QR factorization using multiple threads created -- 98may29, cca ------------------------------------------------------------ */ void FrontMtx_MT_QR_factor ( FrontMtx *frontmtx, InpMtx *mtxA, ChvManager *chvmanager, IV *ownersIV, double cpus[], double *pfacops, int msglvl, FILE *msgFile ) { ChvList *updlist ; double t0, t1 ; IVL *rowsIVL ; int ithread, myid, nthread, rc ; int *firstnz ; QR_factorData *data, *dataObjects ; /* --------------- check the input --------------- */ if ( frontmtx == NULL || mtxA == NULL || chvmanager == NULL || ownersIV == NULL || cpus == NULL || pfacops == NULL || (msglvl > 0 && msgFile == NULL) ) { fprintf(stderr, "\n fatal error in FrontMtx_MT_QR_factor()" "\n bad input\n") ; exit(-1) ; } nthread = 1 + IV_max(ownersIV) ; /* ---------------------------------------------------------------- create the update Chv list object create the rowsIVL object, where list(J) = list of rows that are assembled in front J firstnz[irowA] = first column with nonzero element in A(irowA,*) ---------------------------------------------------------------- */ MARKTIME(t0) ; updlist = FrontMtx_postList(frontmtx, ownersIV, LOCK_IN_PROCESS) ; FrontMtx_QR_setup(frontmtx, mtxA, &rowsIVL, &firstnz, msglvl, msgFile) ; MARKTIME(t1) ; cpus[0] = t1 - t0 ; /* ------------------------------------ create and load nthread data objects ------------------------------------ */ ALLOCATE(dataObjects, struct _QR_factorData, nthread) ; for ( myid = 0, data = dataObjects ; myid < nthread ; myid++, data++ ) { data->mtxA = mtxA ; data->rowsIVL = rowsIVL ; data->firstnz = firstnz ; data->ownersIV = ownersIV ; data->frontmtx = frontmtx ; data->chvmanager = chvmanager ; data->updlist = updlist ; data->myid = myid ; DVzero(7, data->cpus) ; data->facops = 0.0 ; data->msglvl = msglvl ; if ( msglvl > 0 ) { char buffer[20] ; sprintf(buffer, "res.%d", myid) ; if ( (data->msgFile = fopen(buffer, "w")) == NULL ) { fprintf(stderr, "\n fatal error in FrontMtx_MT_QR_factor()" "\n unable to open file %s", buffer) ; exit(-1) ; } } else { data->msgFile = NULL ; } } #if THREAD_TYPE == TT_SOLARIS /* ---------------------------------- Solaris threads. (1) set the concurrency (2) create nthread - 1 new threads (3) execute own thread (4) join the threads ---------------------------------- */ thr_setconcurrency(nthread) ; for ( myid = 0, data = dataObjects ; myid < nthread - 1 ; myid++, data++ ) { rc = thr_create(NULL, 0, FrontMtx_QR_workerFactor, data, 0, NULL) ; if ( rc != 0 ) { fprintf(stderr, "\n fatal error, myid = %d, rc = %d from thr_create()", myid, rc) ; exit(-1) ; } } FrontMtx_QR_workerFactor(data) ; for ( myid = 0 ; myid < nthread - 1 ; myid++ ) { thr_join(0, 0, 0) ; } #endif #if THREAD_TYPE == TT_POSIX /* ---------------------------------- POSIX threads. (1) if SGI, set the concurrency (2) create nthread new threads (3) join the threads ---------------------------------- */ { pthread_t *tids ; pthread_attr_t attr ; void *status ; /* --------------------------------------------------------- #### NOTE: for SGI machines, this command must be present #### for the thread scheduling to be efficient. #### this is NOT a POSIX call, but necessary --------------------------------------------------------- pthread_setconcurrency(nthread) ; */ pthread_attr_init(&attr) ; /* pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) ; */ pthread_attr_setscope(&attr, PTHREAD_SCOPE_PROCESS) ; ALLOCATE(tids, pthread_t, nthread) ; for ( myid = 0 ; myid < nthread ; myid++ ) { #ifdef _MSC_VER tids[myid].p = 0; tids[myid].x = 0; #else tids[myid] = 0 ; #endif } for ( myid = 0, data = dataObjects ; myid < nthread ; myid++, data++ ) { rc = pthread_create(&tids[myid], &attr, FrontMtx_QR_workerFactor, data) ; if ( rc != 0 ) { fprintf(stderr, "\n fatal error in FrontMtx_MT_QR_factor()" "\n myid = %d, rc = %d from pthread_create()", myid, rc) ; exit(-1) ; } else if ( msglvl > 2 ) { fprintf(stderr, "\n thread %d created", myid) ; } } for ( myid = 0 ; myid < nthread ; myid++ ) { pthread_join(tids[myid], &status) ; } FREE(tids) ; pthread_attr_destroy(&attr) ; } #endif /* ---------------------------------------------- fill the cpu vector and factor operation count ---------------------------------------------- */ *pfacops = 0 ; for ( myid = 0, data = dataObjects ; myid < nthread ; myid++, data++ ) { if ( msglvl > 3 ) { fprintf(msgFile, "\n thread %d cpus", myid) ; DVfprintf(msgFile, 7, data->cpus) ; } for ( ithread = 0 ; ithread < 7 ; ithread++ ) { cpus[ithread] += data->cpus[ithread] ; } *pfacops += data->facops ; } /* ------------- free the data ------------- */ ChvList_free(updlist) ; IVL_free(rowsIVL) ; IVfree(firstnz) ; FREE(dataObjects) ; return ; }
int main(int argc, char *argv[]) { int i, j, t; int n_tids = 0; thread_t tid[256]; time_t start_time, stop_time; signal(SIGPIPE, SIG_IGN); for (i = 1; i < argc && argv[i][0] == '-'; i++) switch (argv[i][1]) { case '?': case 'H': exit(help(argv[0])); break; case 'd': debug = atoi(argv[i]+2); break; case 'R': rcvbuf_size = atoi(argv[i]+2); break; case 't': test_length = atoi(argv[i]+2); break; case 'h': server_host = argv[i]+2; break; case 'p': server_port = argv[i]+2; break; case 'b': if (isdigit(argv[i][2])) bound_threads = atoi(argv[i]+2); else bound_threads = 1; break; case 'c': concurrency = atoi(argv[i]+2); break; case 'E': use_exit = 1; break; case 'e': expected_bytes = atoi(argv[i]+2); break; case 'r': repeat = atoi(argv[i]+2); break; case 'k': keepalive = atoi(argv[i]+2); break; case '0': http_0 = 1; break; default: fprintf(stderr, "%s: unknown switch '%s', try -H for help\n", argv[0], argv[1]); exit(1); } keepalive++; mutex_init(&result_lock, USYNC_THREAD, NULL); if (mksockaddr_in(server_host, server_port, &server_addr) < 0) { fprintf(stderr, "Error creating socket address\n"); exit(1); } fprintf(stderr, "Running test: "); for (t = 0; t < test_length; t++) { if (t % 10) putc('-', stderr); else putc('+', stderr); } fprintf(stderr, "\rRunning test: "); if (concurrency > 0) thr_setconcurrency(concurrency); time(&start_time); for (; i < argc; i++) { for (j = 0; j < repeat; j++) { if (thr_create(NULL, 0, (void *(*)(void *)) test_thread, (void *) argv[i], (bound_threads ? THR_BOUND : 0) + THR_NEW_LWP, &tid[n_tids])) { perror("thr_create"); exit(1); } n_tids++; } } for (t = 0; t < test_length; t++) { xsleep(1); putc('>', stderr); fflush(stdout); } putc('\n', stderr); putc('\n', stderr); printf("%-19s %4s %6s %6s %6s %6s %6s %6s %s\n", "URL", "NRq", "Min Ct", "Avg Ct", "Max Ct", "Min Tx", "Avg Tx", "Max Tx", "Bytes"); stop_flag = 1; for (i = 0; i < n_tids; i++) thr_join(tid[i], NULL, NULL); if (failure) exit(1); time(&stop_time); test_length = (int) stop_time - (int) start_time; putchar('\n'); printf("Actual test time.. %d seconds\n", test_length); printf("Total requests.... %d (%d requests/sec)\n", total_nrq, total_nrq / test_length); printf("Total failed...... %d (%d requests/sec)\n", total_failed, total_failed / test_length); printf("Total bytes....... %d (%d bytes/sec)\n", total_bytes, total_bytes / test_length); putchar('\n'); printf("Min Tx: %.4f\n", min_tx); printf("Max Tx: %.4f\n", max_tx); exit(0); }
void bu_parallel(void (*func)(int, void *), size_t ncpu, void *arg) { #ifndef PARALLEL if (!func) return; /* nothing to do */ bu_log("bu_parallel(%zu., %p): Not compiled for PARALLEL machine, running single-threaded\n", ncpu, arg); /* do the work anyways */ (*func)(0, arg); #else struct thread_data *thread_context; rt_thread_t thread_tbl[MAX_PSW]; size_t avail_cpus = 1; size_t x; size_t i; /* number of threads created/ended */ size_t nthreadc; size_t nthreade; char *libbu_affinity = NULL; /* OFF by default as modern schedulers are smarter than this. */ int affinity = 0; /* ncpu == 0 means throttle our thread creation as slots become available */ int throttle = 0; struct parallel_info *parent; rt_thread_t thread; if (!func) return; /* nothing to do */ if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(%zu, %p)\n", ncpu, arg); if (ncpu > MAX_PSW) { bu_log("WARNING: bu_parallel() ncpu(%zd) > MAX_PSW(%d), adjusting ncpu\n", ncpu, MAX_PSW); ncpu = MAX_PSW; } libbu_affinity = getenv("LIBBU_AFFINITY"); if (libbu_affinity) affinity = (int)strtol(libbu_affinity, NULL, 0x10); if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { if (affinity) bu_log("CPU affinity enabled. (LIBBU_AFFINITY=%d)\n", affinity); else bu_log("CPU affinity disabled.\n"); } /* if we're in debug mode, allow additional cpus */ if (!(bu_debug & BU_DEBUG_PARALLEL)) { /* otherwise, limit ourselves to what is actually available */ avail_cpus = bu_avail_cpus(); if (ncpu > avail_cpus) { bu_log("%zd cpus requested, but only %d available\n", ncpu, avail_cpus); ncpu = avail_cpus; } } parent = parallel_mapping(PARALLEL_GET, bu_parallel_id(), ncpu); if (ncpu < 1) { /* want to maximize threading potential, but have to throttle * thread creation. what is our parallelization limit? */ throttle = 1; /* any "zero" limit scopes propagate upward */ while (parent->lim == 0 && parent->id > 0) { parent = parallel_mapping(PARALLEL_GET, parent->parent, ncpu); } /* if the top-most parent is unspecified, use all available cpus */ if (parent->lim == 0) { ncpu = bu_avail_cpus(); } else { ncpu = parent->lim; } /* starting a "zero" bu_parallel means we get one worker * thread back (for this thread) */ bu_semaphore_acquire(BU_SEM_THREAD); if (parent->started > 0) parent->started--; bu_semaphore_release(BU_SEM_THREAD); } else if (ncpu == 1) { /* single cpu case bypasses nearly everything, just invoke */ (*func)(0, arg); parallel_mapping(PARALLEL_PUT, bu_parallel_id(), 0); return; } thread_context = (struct thread_data *)bu_calloc(ncpu, sizeof(*thread_context), "struct thread_data *thread_context"); /* Fill in the data of thread_context structures of all threads */ for (x = 0; x < ncpu; x++) { struct parallel_info *next = parallel_mapping(PARALLEL_GET, -1, ncpu); thread_context[x].user_func = func; thread_context[x].user_arg = arg; thread_context[x].cpu_id = next->id; thread_context[x].affinity = affinity; thread_context[x].parent = parent; } /* * multithreading support for SunOS 5.X / Solaris 2.x */ # if defined(SUNOS) && SUNOS >= 52 nthreadc = 0; /* Give the thread system a hint... */ { static size_t concurrency = 0; /* Max concurrency we have set */ if (ncpu > concurrency) { if (thr_setconcurrency((int)ncpu)) { bu_log("ERROR parallel.c/bu_parallel(): thr_setconcurrency(%zd) failed\n", ncpu); /* Not much to do, lump it */ } else { concurrency = ncpu; } } } /* Create the threads */ for (x = 0; x < ncpu; x++) { parallel_wait_for_slot(throttle, parent, ncpu); if (thr_create(0, 0, (void *(*)(void *))parallel_interface_arg, &thread_context[x], 0, &thread)) { bu_log("ERROR: bu_parallel: thr_create(0x0, 0x0, 0x%x, 0x0, 0, 0x%x) failed for processor thread # %d\n", parallel_interface_arg, &thread, x); /* Not much to do, lump it */ } else { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): created thread: (thread: 0x%x) (loop:%d) (nthreadc:%zu)\n", thread, x, nthreadc); thread_tbl[nthreadc] = thread; nthreadc++; } } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) for (i = 0; i < nthreadc; i++) bu_log("bu_parallel(): thread_tbl[%d] = 0x%x\n", i, thread_tbl[i]); /* * Wait for completion of all threads. We don't wait for threads * in order. We wait for any old thread but we keep track of how * many have returned and whether it is one that we started */ nthreade = 0; for (x = 0; x < nthreadc; x++) { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): waiting for thread to complete:\t(loop:%d) (nthreadc:%zu) (nthreade:%zu)\n", x, nthreadc, nthreade); if (thr_join((rt_thread_t)0, &thread, NULL)) { /* badness happened */ perror("thr_join"); bu_log("thr_join() failed"); } /* Check to see if this is one the threads we created */ for (i = 0; i < nthreadc; i++) { if (thread_tbl[i] == thread) { thread_tbl[i] = (rt_thread_t)-1; nthreade++; break; } } if ((thread_tbl[i] != (rt_thread_t)-1) && i < nthreadc) { bu_log("bu_parallel(): unknown thread %d completed.\n", thread); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): thread completed: (thread: %d)\t(loop:%d) (nthreadc:%zu) (nthreade:%zu)\n", thread, x, nthreadc, nthreade); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): %zu threads created. %zud threads exited.\n", nthreadc, nthreade); # endif /* SUNOS */ # if defined(HAVE_PTHREAD_H) /* Create the posix threads. * * Start at 1 so we can treat the parent as thread 0. */ nthreadc = 0; for (x = 0; x < ncpu; x++) { pthread_attr_t attrs; pthread_attr_init(&attrs); pthread_attr_setstacksize(&attrs, 10*1024*1024); parallel_wait_for_slot(throttle, parent, ncpu); if (pthread_create(&thread, &attrs, (void *(*)(void *))parallel_interface_arg, &thread_context[x])) { bu_log("ERROR: bu_parallel: pthread_create(0x0, 0x0, 0x%lx, 0x0, 0, %p) failed for processor thread # %zu\n", (unsigned long int)parallel_interface_arg, (void *)&thread, x); } else { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { bu_log("bu_parallel(): created thread: (thread: %p) (loop: %zu) (nthreadc: %zu)\n", (void*)thread, x, nthreadc); } thread_tbl[nthreadc] = thread; nthreadc++; } /* done with the attributes after create */ pthread_attr_destroy(&attrs); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { for (i = 0; i < nthreadc; i++) { bu_log("bu_parallel(): thread_tbl[%d] = %p\n", i, (void *)thread_tbl[i]); } # ifdef SIGINFO /* may be BSD-only (calls _thread_dump_info()) */ raise(SIGINFO); # endif } /* * Wait for completion of all threads. * Wait for them in order. */ nthreade = 0; for (x = 0; x < nthreadc; x++) { int ret; if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): waiting for thread %p to complete:\t(loop:%d) (nthreadc:%zu) (nthreade:%zu)\n", (void *)thread_tbl[x], x, nthreadc, nthreade); if ((ret = pthread_join(thread_tbl[x], NULL)) != 0) { /* badness happened */ bu_log("pthread_join(thread_tbl[%d]=%p) ret=%d\n", x, (void *)thread_tbl[x], ret); } nthreade++; thread = thread_tbl[x]; thread_tbl[x] = (rt_thread_t)-1; if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): thread completed: (thread: %p)\t(loop:%zu) (nthreadc:%zu) (nthreade:%zu)\n", (void *)thread, x, nthreadc, nthreade); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): %zu threads created. %zu threads exited.\n", nthreadc, nthreade); # endif /* end if posix threads */ # ifdef WIN32 /* Create the Win32 threads */ nthreadc = 0; for (i = 0; i < ncpu; i++) { parallel_wait_for_slot(throttle, parent, ncpu); thread = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)parallel_interface_arg_stub, &thread_context[i], 0, NULL); thread_tbl[i] = thread; nthreadc++; /* Ensure that all successfully created threads are in sequential order.*/ if (thread_tbl[i] == NULL) { bu_log("bu_parallel(): Error in CreateThread, Win32 error code %d.\n", GetLastError()); --nthreadc; } } { /* Wait for other threads in the array */ DWORD returnCode; returnCode = WaitForMultipleObjects((DWORD)nthreadc, thread_tbl, TRUE, INFINITE); if (returnCode == WAIT_FAILED) { bu_log("bu_parallel(): Error in WaitForMultipleObjects, Win32 error code %d.\n", GetLastError()); } } nthreade = 0; for (x = 0; x < nthreadc; x++) { int ret; if ((ret = CloseHandle(thread_tbl[x]) == 0)) { /* Thread didn't close properly if return value is zero; don't retry and potentially loop forever. */ bu_log("bu_parallel(): Error closing thread %zu of %zu, Win32 error code %d.\n", x, nthreadc, GetLastError()); } nthreade++; thread_tbl[x] = (rt_thread_t)-1; } # endif /* end if Win32 threads */ parallel_mapping(PARALLEL_PUT, bu_parallel_id(), 0); if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(%zd) complete\n", ncpu); bu_free(thread_context, "struct thread_data *thread_context"); #endif /* PARALLEL */ return; }