/* * This is called by _thrp_exit() to deallocate the thread's TLS. * Destructors for all allocated TLS are called here. */ void tls_exit() { ulwp_t *self = curthread; tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata; tls_t *tlsent; TLS_modinfo *tlsp; long moduleid; ulong_t nmods; if (tlsm->static_tls.tls_size == 0 && self->ul_ntlsent == 0) return; /* no TLS */ /* * Call TLS destructors for all TLS allocated for this thread. */ lmutex_lock(&tlsm->tls_lock); nmods = tlsm->tls_modinfo.tls_size; for (moduleid = nmods - 1; moduleid >= 0; --moduleid) { /* * Resume where we left off in the module array. * tls_modinfo.tls_data may have changed since we * dropped and reacquired tls_lock, but TLS modules * retain their positions in the new array. */ tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid; /* * Call destructors for this module if there are any * to be called and if it is part of the static TLS or * if the dynamic TLS for the module has been allocated. */ if (tlsp->tm_tlsfiniarraycnt != 0 && ((tlsp->tm_flags & TM_FLG_STATICTLS) || (moduleid < self->ul_ntlsent && (tlsent = self->ul_tlsent) != NULL && tlsent[moduleid].tls_data != NULL))) { ulong_t arraycnt = tlsp->tm_tlsfiniarraycnt; void (**finiarray)(void) = tlsp->tm_tlsfiniarray; /* * Call the destructors in descending order. * We must drop tls_lock while doing this because * we have no idea what the destructors will do. */ lmutex_unlock(&tlsm->tls_lock); finiarray += arraycnt; do { (**--finiarray)(); } while (--arraycnt != 0); lmutex_lock(&tlsm->tls_lock); } } lmutex_unlock(&tlsm->tls_lock); tls_free(self); }
int getpw(uid_t uid, char buf[]) { int n, c; char *bp; FILE *fp; rmutex_t *lk; if (pwf == NULL) { fp = fopen(PASSWD, "rF"); lmutex_lock(&_pwlock); if (pwf == NULL) { if ((pwf = fp) == NULL) { lmutex_unlock(&_pwlock); return (1); } fp = NULL; } lmutex_unlock(&_pwlock); if (fp != NULL) /* someone beat us to it */ (void) fclose(fp); } FLOCKFILE(lk, pwf); _rewind_unlocked(pwf); for (;;) { bp = buf; while ((c = GETC(pwf)) != '\n') { if (c == EOF) { FUNLOCKFILE(lk); return (1); } *bp++ = (char)c; } *bp = '\0'; bp = buf; n = 3; while (--n) while ((c = *bp++) != ':') if (c == '\n') { FUNLOCKFILE(lk); return (1); } while ((c = *bp++) != ':') if (isdigit(c)) n = n*10+c-'0'; else continue; if (n == uid) { FUNLOCKFILE(lk); return (0); } } }
/* * This is called by _thrp_setup() to initialize the thread's static TLS. * Constructors for initially allocated static TLS are called here. */ void tls_setup() { ulwp_t *self = curthread; tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata; TLS_modinfo *tlsp; long moduleid; ulong_t nmods; if (tlsm->static_tls.tls_size == 0) /* no static TLS */ return; /* static TLS initialization */ (void) memcpy((caddr_t)self - tlsm->static_tls.tls_size, tlsm->static_tls.tls_data, tlsm->static_tls.tls_size); /* call TLS constructors for the static TLS just initialized */ lmutex_lock(&tlsm->tls_lock); nmods = tlsm->tls_modinfo.tls_size; for (moduleid = 0; moduleid < nmods; moduleid++) { /* * Resume where we left off in the module array. * tls_modinfo.tls_data may have changed since we * dropped and reacquired tls_lock, but TLS modules * retain their positions in the new array. */ tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid; /* * Call constructors for this module if there are any * to be called and if it is part of the static TLS. */ if (tlsp->tm_tlsinitarraycnt != 0 && (tlsp->tm_flags & TM_FLG_STATICTLS)) { ulong_t arraycnt = tlsp->tm_tlsinitarraycnt; void (**initarray)(void) = tlsp->tm_tlsinitarray; /* * Call the constructors in ascending order. * We must drop tls_lock while doing this because * we have no idea what the constructors will do. */ lmutex_unlock(&tlsm->tls_lock); do { (**initarray++)(); } while (--arraycnt != 0); lmutex_lock(&tlsm->tls_lock); } } lmutex_unlock(&tlsm->tls_lock); }
/* * _sbrk_grow_aligned() aligns the old break to a low_align boundry, * adds min_size, aligns to a high_align boundry, and calls _brk_unlocked() * to set the new break. The low_aligned-aligned value is returned, and * the actual space allocated is returned through actual_size. * * Unlike sbrk(2), _sbrk_grow_aligned takes an unsigned size, and does * not allow shrinking the heap. */ void * _sbrk_grow_aligned(size_t min_size, size_t low_align, size_t high_align, size_t *actual_size) { uintptr_t old_brk; uintptr_t ret_brk; uintptr_t high_brk; uintptr_t new_brk; int brk_result; if (!primary_link_map) { errno = ENOTSUP; return ((void *)-1); } if ((low_align & (low_align - 1)) != 0 || (high_align & (high_align - 1)) != 0) { errno = EINVAL; return ((void *)-1); } low_align = MAX(low_align, ALIGNSZ); high_align = MAX(high_align, ALIGNSZ); lmutex_lock(&__sbrk_lock); old_brk = (uintptr_t)BRKALIGN(_nd); ret_brk = P2ROUNDUP(old_brk, low_align); high_brk = ret_brk + min_size; new_brk = P2ROUNDUP(high_brk, high_align); /* * Check for overflow */ if (ret_brk < old_brk || high_brk < ret_brk || new_brk < high_brk) { lmutex_unlock(&__sbrk_lock); errno = ENOMEM; return ((void *)-1); } if ((brk_result = _brk_unlocked((void *)new_brk)) == 0) _nd = (void *)new_brk; lmutex_unlock(&__sbrk_lock); if (brk_result != 0) return ((void *)-1); if (actual_size != NULL) *actual_size = (new_brk - ret_brk); return ((void *)ret_brk); }
/* * Note: Instead of making this function static, we reduce it to local * scope in the mapfile. That allows the linker to prevent it from * appearing in the .SUNW_dynsymsort section. */ off64_t telldir64(DIR *dirp) { private_DIR *pdirp = (private_DIR *)(uintptr_t)dirp; dirent64_t *dp64; off64_t off = 0; lmutex_lock(&pdirp->dd_lock); /* if at beginning of dir, return 0 */ if (lseek64(dirp->dd_fd, 0, SEEK_CUR) != 0) { dp64 = (dirent64_t *)(uintptr_t)(&dirp->dd_buf[dirp->dd_loc]); /* was converted by readdir and needs to be reversed */ if (dp64->d_ino == (ino64_t)-1) { dirent_t *dp32; dp32 = (dirent_t *)((uintptr_t)dp64 + sizeof (ino64_t)); dp64->d_ino = (ino64_t)dp32->d_ino; dp64->d_off = (off64_t)dp32->d_off; dp64->d_reclen = (unsigned short)(dp32->d_reclen + ((char *)&dp64->d_off - (char *)dp64)); } off = dp64->d_off; } lmutex_unlock(&pdirp->dd_lock); return (off); }
int __nsw_freeconfig(struct __nsw_switchconfig *conf) { struct cons_cell *cellp; if (conf == NULL) { return (-1); } /* * Hacked to make life easy for the code in nss_common.c. Free conf * iff it was created by calling _nsw_getoneconfig() directly * rather than by calling nsw_getconfig. */ lmutex_lock(&serialize_config); for (cellp = concell_list; cellp; cellp = cellp->next) { if (cellp->sw == conf) { break; } } lmutex_unlock(&serialize_config); if (cellp == NULL) { /* Not in the cache; free it */ freeconf(conf); return (1); } else { /* In the cache; don't free it */ return (0); } }
static void delete_pool(tpool_t *tpool) { tpool_job_t *job; ASSERT(tpool->tp_current == 0 && tpool->tp_active == NULL); /* * Unlink the pool from the global list of all pools. */ lmutex_lock(&thread_pool_lock); if (thread_pools == tpool) thread_pools = tpool->tp_forw; if (thread_pools == tpool) thread_pools = NULL; else { tpool->tp_back->tp_forw = tpool->tp_forw; tpool->tp_forw->tp_back = tpool->tp_back; } lmutex_unlock(&thread_pool_lock); /* * There should be no pending jobs, but just in case... */ for (job = tpool->tp_head; job != NULL; job = tpool->tp_head) { tpool->tp_head = job->tpj_next; lfree(job, sizeof (*job)); } (void) pthread_attr_destroy(&tpool->tp_attr); lfree(tpool, sizeof (*tpool)); }
void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback) { connected_init(); if (lmutex_lock_interruptible(&g_connected_mutex) != 0) return; if (g_connected) /* We're already connected. Call the callback immediately. */ callback(); else { if (g_num_deferred_callbacks >= MAX_CALLBACKS) vchiq_log_error(vchiq_core_log_level, "There are already %d callbacks registered - " "please increase MAX_CALLBACKS", g_num_deferred_callbacks); else { g_deferred_callback[g_num_deferred_callbacks] = callback; g_num_deferred_callbacks++; } } lmutex_unlock(&g_connected_mutex); }
/* * ulckpwdf() returns 0 for a successful unlock and -1 otherwise */ int ulckpwdf(void) { lmutex_lock(&lck_lock); if (lck_tid == thr_self() && fildes >= 0) { flock.l_type = F_UNLCK; (void) fcntl(fildes, F_SETLK, &flock); (void) close(fildes); fildes = -1; lck_pid = 0; lck_tid = 0; lmutex_unlock(&lck_lock); return (0); } lmutex_unlock(&lck_lock); return (-1); }
int sem_close(sem_t *sem) { semaddr_t **next; semaddr_t *freeit; lmutex_lock(&semlock); for (next = &semheadp; (freeit = *next) != NULL; next = &(freeit->sad_next)) { if (freeit->sad_addr == sem) { *next = freeit->sad_next; lmutex_unlock(&semlock); free(freeit); return (munmap((caddr_t)sem, sizeof (sem_t))); } } lmutex_unlock(&semlock); errno = EINVAL; return (-1); }
int lckpwdf(void) { int seconds = 0; lmutex_lock(&lck_lock); for (;;) { if (lck_pid != 0 && lck_pid != getpid()) { /* somebody forked */ lck_pid = 0; lck_tid = 0; } if (lck_tid == 0) { if ((fildes = creat(LOCKFILE, 0600)) == -1) break; flock.l_type = F_WRLCK; if (fcntl(fildes, F_SETLK, &flock) != -1) { lck_pid = getpid(); lck_tid = thr_self(); lmutex_unlock(&lck_lock); return (0); } (void) close(fildes); fildes = -1; } if (seconds++ >= S_WAITTIME) { /* * For compatibility with the past, pretend * that we were interrupted by SIGALRM. */ errno = EINTR; break; } lmutex_unlock(&lck_lock); (void) sleep(1); lmutex_lock(&lck_lock); } lmutex_unlock(&lck_lock); return (-1); }
sigfpe_handler_type sigfpe(sigfpe_code_type code, sigfpe_handler_type hdl) { sigfpe_handler_type oldhdl; int i; lmutex_lock(&sigfpe_lock); (void) _test_sigfpe_master(); for (i = 0; (i < N_SIGFPE_CODE) && (code != sigfpe_codes[i]); i++) continue; /* Find index of handler. */ if (i >= N_SIGFPE_CODE) { errno = EINVAL; lmutex_unlock(&sigfpe_lock); /* Not 0 or SIGFPE code */ return ((sigfpe_handler_type)BADSIG); } oldhdl = sigfpe_handlers[i]; sigfpe_handlers[i] = hdl; lmutex_unlock(&sigfpe_lock); return (oldhdl); }
/* * This is called from the dynamic linker for each module not included * in the static TLS mod list, after the module has been loaded but * before any of the module's init code has been executed. */ void __tls_mod_add(TLS_modinfo *tlsp) { tls_metadata_t *tlsm = &curthread->ul_uberdata->tls_metadata; ulong_t moduleid = tlsp->tm_modid; TLS_modinfo *modinfo; lmutex_lock(&tlsm->tls_lock); ASSERT(!(tlsp->tm_flags & TM_FLG_STATICTLS)); ASSERT(tlsp->tm_filesz <= tlsp->tm_memsz); modinfo = tls_modinfo_alloc(tlsm, moduleid); (void) memcpy(&modinfo[moduleid], tlsp, sizeof (*tlsp)); lmutex_unlock(&tlsm->tls_lock); }
/* * Called for each module as it is unloaded from memory by dlclose(). */ void __tls_mod_remove(TLS_modinfo *tlsp) { tls_metadata_t *tlsm = &curthread->ul_uberdata->tls_metadata; ulong_t moduleid = tlsp->tm_modid; TLS_modinfo *modinfo; lmutex_lock(&tlsm->tls_lock); ASSERT(tlsm->tls_modinfo.tls_data != NULL && moduleid < tlsm->tls_modinfo.tls_size); modinfo = tlsm->tls_modinfo.tls_data; (void) memset(&modinfo[moduleid], 0, sizeof (TLS_modinfo)); lmutex_unlock(&tlsm->tls_lock); }
static int nss_dbop_search(const char *name, uint32_t dbop) { getXbyY_to_dbop_t *hptr; int count = (sizeof (getXbyY_to_dbop) / sizeof (getXbyY_to_dbop_t)); uint32_t hval, g; const char *cp; int i, idx; static const uint32_t hbits_tst = 0xf0000000; /* Uses a table size is known to have no collisions */ if (getXbyYdbop_hashed == 0) { lmutex_lock(&getXbydbop_hash_lock); if (getXbyYdbop_hashed == 0) { for (i = 0; i < count; i++) { cp = getXbyY_to_dbop[i].name; hval = 0; while (*cp) { hval = (hval << 4) + *cp++; if ((g = (hval & hbits_tst)) != 0) hval ^= g >> 24; hval &= ~g; } hval += getXbyY_to_dbop[i].dbop; hval %= DBOP_PRIME_HASH; if (getXbyYdbopHASH[hval] != 0) { /* hash table collision-see above */ lmutex_unlock(&getXbydbop_hash_lock); return (-1); } getXbyYdbopHASH[hval] = i | DBOP_HASH_TAG; } membar_producer(); getXbyYdbop_hashed = 1; } lmutex_unlock(&getXbydbop_hash_lock); }
void * sbrk(intptr_t addend) { void *result; if (!primary_link_map) { errno = ENOTSUP; return ((void *)-1); } lmutex_lock(&__sbrk_lock); result = _sbrk_unlocked(addend); lmutex_unlock(&__sbrk_lock); return (result); }
void vchiq_call_connected_callbacks(void) { int i; connected_init(); if (lmutex_lock_interruptible(&g_connected_mutex) != 0) return; for (i = 0; i < g_num_deferred_callbacks; i++) g_deferred_callback[i](); g_num_deferred_callbacks = 0; g_connected = 1; lmutex_unlock(&g_connected_mutex); }
long telldir(DIR *dirp) { private_DIR *pdirp = (private_DIR *)dirp; dirent_t *dp; off_t off = 0; lmutex_lock(&pdirp->dd_lock); /* if at beginning of dir, return 0 */ if (lseek(dirp->dd_fd, 0, SEEK_CUR) != 0) { dp = (dirent_t *)(uintptr_t)(&dirp->dd_buf[dirp->dd_loc]); off = dp->d_off; } lmutex_unlock(&pdirp->dd_lock); return (off); }
static void load_scf(void) { void *scf_handle = dlopen("libscf.so.1", RTLD_LAZY); scf_simple_prop_get_t scf_simple_prop_get = (scf_handle == NULL)? NULL : (scf_simple_prop_get_t)dlsym(scf_handle, "scf_simple_prop_get"); scf_simple_prop_next_boolean_t scf_simple_prop_next_boolean = (scf_handle == NULL)? NULL : (scf_simple_prop_next_boolean_t)dlsym(scf_handle, "scf_simple_prop_next_boolean"); scf_simple_prop_free_t scf_simple_prop_free = (scf_handle == NULL)? NULL : (scf_simple_prop_free_t)dlsym(scf_handle, "scf_simple_prop_free"); lmutex_lock(&scf_lock); if (real_scf_simple_prop_get == NULL || real_scf_simple_prop_next_boolean == NULL || real_scf_simple_prop_free == NULL) { if (scf_simple_prop_get == NULL) real_scf_simple_prop_get = (scf_simple_prop_get_t)(-1); else { real_scf_simple_prop_get = scf_simple_prop_get; scf_handle = NULL; /* don't dlclose it */ } if (scf_simple_prop_next_boolean == NULL) real_scf_simple_prop_next_boolean = (scf_simple_prop_next_boolean_t)(-1); else { real_scf_simple_prop_next_boolean = scf_simple_prop_next_boolean; scf_handle = NULL; /* don't dlclose it */ } if (scf_simple_prop_free == NULL) real_scf_simple_prop_free = (scf_simple_prop_free_t)(-1); else { real_scf_simple_prop_free = scf_simple_prop_free; scf_handle = NULL; /* don't dlclose it */ } membar_producer(); } lmutex_unlock(&scf_lock); if (scf_handle) (void) dlclose(scf_handle); }
/*ARGSUSED*/ static FILE * _common(boolean_t large_file) { char tfname[L_tmpnam]; FILE *p; char *q; int mkret; mode_t current_umask; (void) strcpy(tfname, P_tmpdir); lmutex_lock(&seed_lk); (void) strcat(tfname, seed); (void) strcat(tfname, XS); q = seed; while (*q == 'z') *q++ = 'a'; if (*q != '\0') ++*q; lmutex_unlock(&seed_lk); #if !defined(_LP64) if (large_file == B_TRUE) { if ((mkret = mkstemp64(tfname)) == -1) return (NULL); } else #endif if ((mkret = mkstemp(tfname)) == -1) return (NULL); (void) unlink(tfname); current_umask = umask(0777); (void) umask(current_umask); (void) fchmod(mkret, 0666 & ~current_umask); if ((p = fdopen(mkret, "w+")) == NULL) { (void) close(mkret); return (NULL); } return (p); }
int brk(void *new_brk) { int result; if (!primary_link_map) { errno = ENOTSUP; return (-1); } /* * Need to align this here; _brk_unlocked won't do it for us. */ new_brk = BRKALIGN(new_brk); lmutex_lock(&__sbrk_lock); if ((result = _brk_unlocked(new_brk)) == 0) _nd = new_brk; lmutex_unlock(&__sbrk_lock); return (result); }
struct __nsw_switchconfig * __nsw_getconfig(const char *dbase, enum __nsw_parse_err *errp) { struct __nsw_switchconfig *cfp, *retp = NULL; int syslog_error = 0; FILE *fp = NULL; char *linep; char lineq[BUFSIZ]; lmutex_lock(&serialize_config); top: if (cfp = scrounge_cache(dbase)) { *errp = __NSW_CONF_PARSE_SUCCESS; lmutex_unlock(&serialize_config); if (fp != NULL) (void) fclose(fp); return (cfp); } if (fp == NULL) { struct cons_cell *cp = concell_list; /* open_conf() must be called w/o locks held */ lmutex_unlock(&serialize_config); if ((fp = open_conf()) == NULL) { *errp = __NSW_CONF_PARSE_NOFILE; return (NULL); } lmutex_lock(&serialize_config); /* Cache changed? */ if (cp != concell_list) goto top; } *errp = __NSW_CONF_PARSE_NOPOLICY; while (linep = fgets(lineq, BUFSIZ, fp)) { enum __nsw_parse_err line_err; char *tokenp, *comment; /* * Ignore portion of line following the comment character '#'. */ if ((comment = strchr(linep, '#')) != NULL) { *comment = '\0'; } /* * skip past blank lines. * otherwise, cache as a struct switchconfig. */ if ((*linep == '\0') || isspace(*linep)) { continue; } if ((tokenp = skip(&linep, ':')) == NULL) { continue; /* ignore this line */ } if (cfp = scrounge_cache(tokenp)) { continue; /* ? somehow this database is in the cache */ } if (cfp = _nsw_getoneconfig(tokenp, linep, &line_err)) { (void) add_concell(cfp); if (strcmp(cfp->dbase, dbase) == 0) { *errp = __NSW_CONF_PARSE_SUCCESS; retp = cfp; } } else { /* * Got an error on this line, if it is a system * error we might as well give right now. If it * is a parse error on the second entry of the * database we are looking for and the first one * was a good entry we end up logging the following * syslog message and using a default policy instead. */ if (line_err == __NSW_CONF_PARSE_SYSERR) { *errp = __NSW_CONF_PARSE_SYSERR; break; } else if (line_err == __NSW_CONF_PARSE_NOPOLICY && strcmp(tokenp, dbase) == 0) { syslog_error = 1; *errp = __NSW_CONF_PARSE_NOPOLICY; break; } /* * Else blithely ignore problems on this line and * go ahead with the next line. */ } } lmutex_unlock(&serialize_config); /* * We have to drop the lock before calling fclose()/syslog(). */ (void) fclose(fp); if (syslog_error) syslog_warning(dbase); return (retp); }
static void _sigfpe_master(int sig, siginfo_t *siginfo, void *arg) { ucontext_t *ucontext = arg; int i; int code; enum fp_exception_type exception; lmutex_lock(&sigfpe_lock); code = siginfo->si_code; for (i = 0; (i < N_SIGFPE_CODE) && (code != sigfpe_codes[i]); i++) continue; /* Find index of handler. */ if (i >= N_SIGFPE_CODE) i = N_SIGFPE_CODE - 1; switch ((intptr_t)sigfpe_handlers[i]) { case ((intptr_t)(SIGFPE_DEFAULT)): switch (code) { case FPE_FLTINV: exception = fp_invalid; goto ieee; case FPE_FLTRES: exception = fp_inexact; goto ieee; case FPE_FLTDIV: exception = fp_division; goto ieee; case FPE_FLTUND: exception = fp_underflow; goto ieee; case FPE_FLTOVF: exception = fp_overflow; goto ieee; #if defined(__i386) || defined(__amd64) case FPE_FLTDEN: exception = fp_denormalized; goto ieee; #endif default: /* The common default treatment is to abort. */ break; } /* FALLTHROUGH */ case ((intptr_t)(SIGFPE_ABORT)): abort(); break; case ((intptr_t)(SIGFPE_IGNORE)): lmutex_unlock(&sigfpe_lock); return; default: /* User-defined not SIGFPE_DEFAULT or SIGFPE_ABORT. */ (sigfpe_handlers[i])(sig, siginfo, ucontext); lmutex_unlock(&sigfpe_lock); return; } ieee: switch ((intptr_t)ieee_handlers[(int)exception]) { case ((intptr_t)(SIGFPE_DEFAULT)): /* Error condition but ignore it. */ case ((intptr_t)(SIGFPE_IGNORE)): /* Error condition but ignore it. */ lmutex_unlock(&sigfpe_lock); return; case ((intptr_t)(SIGFPE_ABORT)): abort(); default: (ieee_handlers[(int)exception])(sig, siginfo, ucontext); lmutex_unlock(&sigfpe_lock); return; } }
/* * Return the address of a TLS variable for the current thread. * Run the constructors for newly-allocated dynamic TLS. */ void * slow_tls_get_addr(TLS_index *tls_index) { ulwp_t *self = curthread; tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata; TLS_modinfo *tlsp; ulong_t moduleid; tls_t *tlsent; caddr_t base; void (**initarray)(void); ulong_t arraycnt = 0; /* * Defer signals until we have finished calling * all of the constructors. */ sigoff(self); lmutex_lock(&tlsm->tls_lock); if ((moduleid = tls_index->ti_moduleid) < self->ul_ntlsent) tlsent = self->ul_tlsent; else { ASSERT(moduleid < tlsm->tls_modinfo.tls_size); tlsent = lmalloc(tlsm->tls_modinfo.tls_size * sizeof (tls_t)); if (self->ul_tlsent != NULL) { (void) memcpy(tlsent, self->ul_tlsent, self->ul_ntlsent * sizeof (tls_t)); lfree(self->ul_tlsent, self->ul_ntlsent * sizeof (tls_t)); } self->ul_tlsent = tlsent; self->ul_ntlsent = tlsm->tls_modinfo.tls_size; } tlsent += moduleid; if ((base = tlsent->tls_data) == NULL) { tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid; if (tlsp->tm_memsz == 0) { /* dlclose()d module? */ base = NULL; } else if (tlsp->tm_flags & TM_FLG_STATICTLS) { /* static TLS is already allocated/initialized */ base = (caddr_t)self - tlsp->tm_stattlsoffset; tlsent->tls_data = base; tlsent->tls_size = 0; /* don't lfree() this space */ } else { /* allocate/initialize the dynamic TLS */ base = lmalloc(tlsp->tm_memsz); if (tlsp->tm_filesz != 0) (void) memcpy(base, tlsp->tm_tlsblock, tlsp->tm_filesz); tlsent->tls_data = base; tlsent->tls_size = tlsp->tm_memsz; /* remember the constructors */ arraycnt = tlsp->tm_tlsinitarraycnt; initarray = tlsp->tm_tlsinitarray; } } lmutex_unlock(&tlsm->tls_lock); /* * Call constructors, if any, in ascending order. * We have to do this after dropping tls_lock because * we have no idea what the constructors will do. * At least we have signals deferred until they are done. */ if (arraycnt) { do { (**initarray++)(); } while (--arraycnt != 0); } if (base == NULL) /* kludge to get x86/x64 to boot */ base = (caddr_t)self - 512; sigon(self); return (base + tls_index->ti_tlsoffset); }
/* * Returns the cached data if the locale name is the same. If not, * returns NULL (cache miss). The locdata is returned with a hold on * it, taken on behalf of the caller. The caller should drop the hold * when it is finished. */ static struct locdata * locdata_get_cache(int category, const char *locname) { struct locdata *loc; if (category < 0 || category >= LC_ALL) return (NULL); /* Try cache first. */ lmutex_lock(&cache_lock); loc = cache_data[category]; if ((loc != NULL) && (strcmp(loc->l_lname, locname) == 0)) { lmutex_unlock(&cache_lock); return (loc); } /* * Failing that try previously loaded locales (linear search) -- * this could be optimized to a hash, but its unlikely that a single * application will ever need to work with more than a few locales. */ for (loc = cat_data[category]; loc != NULL; loc = loc->l_next) { if (strcmp(locname, loc->l_lname) == 0) { break; } } /* * Finally, if we still don't have one, try loading the locale * data from the actual on-disk data. * * We drop the lock (libc wants to ensure no internal locks * are held when we call other routines required to read from * files, allocate memory, etc.) There is a small race here, * but the consequences of the race are benign -- if multiple * threads hit this at precisely the same point, we could * wind up with duplicates of the locale data in the cache. * * This wastes the memory for an extra copy of the locale * data, but there is no further harm beyond that. Its not * worth the effort to recode this to something "safe" * (which would require rescanning the list, etc.), given * that this race will probably never actually occur. */ if (loc == NULL) { lmutex_unlock(&cache_lock); loc = (*loaders[category])(locname); lmutex_lock(&cache_lock); if (loc != NULL) (void) strlcpy(loc->l_lname, locname, sizeof (loc->l_lname)); } /* * Assuming we got one, update the cache, and stick us on the list * of loaded locale data. We insert into the head (more recent * use is likely to win.) */ if (loc != NULL) { cache_data[category] = loc; if (!loc->l_cached) { loc->l_cached = 1; loc->l_next = cat_data[category]; cat_data[category] = loc; } } lmutex_unlock(&cache_lock); return (loc); }
tpool_t * tpool_create(uint_t min_threads, uint_t max_threads, uint_t linger, pthread_attr_t *attr) { tpool_t *tpool; void *stackaddr; size_t stacksize; size_t minstack; int error; if (min_threads > max_threads || max_threads < 1) { errno = EINVAL; return (NULL); } if (attr != NULL) { if (pthread_attr_getstack(attr, &stackaddr, &stacksize) != 0) { errno = EINVAL; return (NULL); } /* * Allow only one thread in the pool with a specified stack. * Require threads to have at least the minimum stack size. */ minstack = thr_min_stack(); if (stackaddr != NULL) { if (stacksize < minstack || max_threads != 1) { errno = EINVAL; return (NULL); } } else if (stacksize != 0 && stacksize < minstack) { errno = EINVAL; return (NULL); } } tpool = lmalloc(sizeof (*tpool)); if (tpool == NULL) { errno = ENOMEM; return (NULL); } (void) mutex_init(&tpool->tp_mutex, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_busycv, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_workcv, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_waitcv, USYNC_THREAD, NULL); tpool->tp_minimum = min_threads; tpool->tp_maximum = max_threads; tpool->tp_linger = linger; /* * We cannot just copy the attribute pointer. * We need to initialize a new pthread_attr_t structure * with the values from the user-supplied pthread_attr_t. * If the attribute pointer is NULL, we need to initialize * the new pthread_attr_t structure with default values. */ error = _pthread_attr_clone(&tpool->tp_attr, attr); if (error) { lfree(tpool, sizeof (*tpool)); errno = error; return (NULL); } /* make all pool threads be detached daemon threads */ (void) pthread_attr_setdetachstate(&tpool->tp_attr, PTHREAD_CREATE_DETACHED); (void) _pthread_attr_setdaemonstate_np(&tpool->tp_attr, PTHREAD_CREATE_DAEMON_NP); /* insert into the global list of all thread pools */ lmutex_lock(&thread_pool_lock); if (thread_pools == NULL) { tpool->tp_forw = tpool; tpool->tp_back = tpool; thread_pools = tpool; } else { thread_pools->tp_back->tp_forw = tpool; tpool->tp_forw = thread_pools; tpool->tp_back = thread_pools->tp_back; thread_pools->tp_back = tpool; } lmutex_unlock(&thread_pool_lock); return (tpool); }
sem_t * sem_open(const char *path, int oflag, /* mode_t mode, int value */ ...) { va_list ap; mode_t crmode = 0; sem_t *sem = NULL; struct stat64 statbuf; semaddr_t *next = NULL; int fd = 0; int error = 0; int cr_flag = 0; uint_t value = 0; if (__pos4obj_check(path) == -1) return (SEM_FAILED); /* acquire semaphore lock to have atomic operation */ if (__pos4obj_lock(path, SEM_LOCK_TYPE) < 0) return (SEM_FAILED); /* modify oflag to have RDWR and filter CREATE mode only */ oflag = (oflag & (O_CREAT|O_EXCL)) | (O_RDWR); if (oflag & O_CREAT) { if (semvaluemax == 0 && (semvaluemax = _sysconf(_SC_SEM_VALUE_MAX)) <= 0) semvaluemax = -1; va_start(ap, oflag); crmode = va_arg(ap, mode_t); value = va_arg(ap, uint_t); va_end(ap); /* check value < the max for a named semaphore */ if (semvaluemax < 0 || (ulong_t)value > (ulong_t)semvaluemax) { errno = EINVAL; goto out; } } errno = 0; if ((fd = __pos4obj_open(path, SEM_DATA_TYPE, oflag, crmode, &cr_flag)) < 0) goto out; if (cr_flag) cr_flag = DFILE_CREATE | DFILE_OPEN; else cr_flag = DFILE_OPEN; /* find out inode # for the opened file */ if (fstat64(fd, &statbuf) < 0) goto out; /* if created, acquire total_size in the file */ if ((cr_flag & DFILE_CREATE) != 0) { if (ftruncate64(fd, (off64_t)sizeof (sem_t)) < 0) goto out; } else { /* * if this semaphore has already been opened, inode * will indicate then return the same semaphore address */ lmutex_lock(&semlock); for (next = semheadp; next != NULL; next = next->sad_next) { if (statbuf.st_ino == next->sad_inode && strcmp(path, next->sad_name) == 0) { (void) __close_nc(fd); lmutex_unlock(&semlock); (void) __pos4obj_unlock(path, SEM_LOCK_TYPE); return (next->sad_addr); } } lmutex_unlock(&semlock); } /* new sem descriptor to be allocated and new address to be mapped */ if ((next = malloc(sizeof (semaddr_t))) == NULL) { errno = ENOMEM; goto out; } cr_flag |= ALLOC_MEM; /* LINTED */ sem = (sem_t *)mmap64(NULL, sizeof (sem_t), PROT_READ|PROT_WRITE, MAP_SHARED, fd, (off64_t)0); (void) __close_nc(fd); cr_flag &= ~DFILE_OPEN; if (sem == MAP_FAILED) goto out; cr_flag |= DFILE_MMAP; /* if created, initialize */ if (cr_flag & DFILE_CREATE) { error = sema_init((sema_t *)sem, value, USYNC_PROCESS, 0); if (error) { errno = error; goto out; } } if (__pos4obj_unlock(path, SEM_LOCK_TYPE) == 0) { /* add to the list pointed by semheadp */ lmutex_lock(&semlock); next->sad_next = semheadp; semheadp = next; next->sad_addr = sem; next->sad_inode = statbuf.st_ino; (void) strcpy(next->sad_name, path); lmutex_unlock(&semlock); return (sem); } /* fall into the error case */ out: error = errno; if ((cr_flag & DFILE_OPEN) != 0) (void) __close_nc(fd); if ((cr_flag & DFILE_CREATE) != 0) (void) __pos4obj_unlink(path, SEM_DATA_TYPE); if ((cr_flag & ALLOC_MEM) != 0) free(next); if ((cr_flag & DFILE_MMAP) != 0) (void) munmap((caddr_t)sem, sizeof (sem_t)); (void) __pos4obj_unlock(path, SEM_LOCK_TYPE); errno = error; return (SEM_FAILED); }
char * gettxt(const char *msg_id, const char *dflt_str) { struct db_cache *dbc; struct db_list *dbl; char msgfile[DB_NAME_LEN]; /* name of static shared library */ int msgnum; /* message number */ char pathname[PATH_MAX]; /* full pathname to message file */ int fd; struct stat64 sb; void *addr; char *tokp; size_t name_len; char *curloc; if ((msg_id == NULL) || (*msg_id == '\0')) { return (handle_return(dflt_str)); } /* parse msg_id */ if (((tokp = strchr(msg_id, ':')) == NULL) || *(tokp+1) == '\0') return (handle_return(dflt_str)); if ((name_len = (tokp - msg_id)) >= DB_NAME_LEN) return (handle_return(dflt_str)); if (name_len > 0) { (void) strncpy(msgfile, msg_id, name_len); msgfile[name_len] = '\0'; } else { lrw_rdlock(&_rw_cur_cat); if (cur_cat == NULL || *cur_cat == '\0') { lrw_unlock(&_rw_cur_cat); return (handle_return(dflt_str)); } /* * We know the following strcpy is safe. */ (void) strcpy(msgfile, cur_cat); lrw_unlock(&_rw_cur_cat); } while (*++tokp) { if (!isdigit((unsigned char)*tokp)) return (handle_return(dflt_str)); } msgnum = atoi(msg_id + name_len + 1); curloc = setlocale(LC_MESSAGES, NULL); lmutex_lock(&gettxt_lock); try_C: dbc = db_cache; while (dbc) { if (strcmp(curloc, dbc->loc) == 0) { dbl = dbc->info; while (dbl) { if (strcmp(msgfile, dbl->db_name) == 0) { /* msgfile found */ lmutex_unlock(&gettxt_lock); goto msgfile_found; } dbl = dbl->next; } /* not found */ break; } dbc = dbc->next; } if (dbc == NULL) { /* new locale */ if ((dbc = lmalloc(sizeof (struct db_cache))) == NULL) { lmutex_unlock(&gettxt_lock); return (handle_return(dflt_str)); } if ((dbc->loc = lmalloc(strlen(curloc) + 1)) == NULL) { lfree(dbc, sizeof (struct db_cache)); lmutex_unlock(&gettxt_lock); return (handle_return(dflt_str)); } dbc->info = NULL; (void) strcpy(dbc->loc, curloc); /* connect dbc to the dbc list */ dbc->next = db_cache; db_cache = dbc; } if ((dbl = lmalloc(sizeof (struct db_list))) == NULL) { lmutex_unlock(&gettxt_lock); return (handle_return(dflt_str)); } if (snprintf(pathname, sizeof (pathname), _DFLT_LOC_PATH "%s" MESSAGES "%s", dbc->loc, msgfile) >= sizeof (pathname)) { lfree(dbl, sizeof (struct db_list)); lmutex_unlock(&gettxt_lock); return (handle_return(dflt_str)); } if ((fd = open(pathname, O_RDONLY)) == -1 || fstat64(fd, &sb) == -1 || (addr = mmap(NULL, (size_t)sb.st_size, PROT_READ, MAP_SHARED, fd, 0L)) == MAP_FAILED) { if (fd != -1) (void) close(fd); lfree(dbl, sizeof (struct db_list)); if (strcmp(dbc->loc, "C") == 0) { lmutex_unlock(&gettxt_lock); return (handle_return(dflt_str)); } /* Change locale to C */ curloc = (char *)loc_C; goto try_C; } (void) close(fd); /* save file name, memory address, fd and size */ (void) strcpy(dbl->db_name, msgfile); dbl->addr = (uintptr_t)addr; /* connect dbl to the dbc->info list */ dbl->next = dbc->info; dbc->info = dbl; lmutex_unlock(&gettxt_lock); msgfile_found: /* check if msgnum out of domain */ if (msgnum <= 0 || msgnum > *(int *)dbl->addr) return (handle_return(dflt_str)); /* return pointer to message */ return ((char *)(dbl->addr + *(int *)(dbl->addr + msgnum * sizeof (int)))); }
/* * int (*alowpc)(), (*ahighpc)(); boundaries of text to be monitored * WORD *buffer; ptr to space for monitor data(WORDs) * size_t bufsize; size of above space(in WORDs) * size_t nfunc; max no. of functions whose calls are counted * (default nfunc is 300 on PDP11, 600 on others) */ void monitor(int (*alowpc)(void), int (*ahighpc)(void), WORD *buffer, size_t bufsize, size_t nfunc) { uint_t scale; long text; char *s; struct hdr *hdrp; ANCHOR *newanchp; size_t ssiz; int error; char *lowpc = (char *)alowpc; char *highpc = (char *)ahighpc; lmutex_lock(&mon_lock); if (lowpc == NULL) { /* true only at the end */ error = 0; if (curAnchor != NULL) { /* if anything was collected!.. */ profil(NULL, 0, 0, 0); if (writeBlocks() == 0) error = errno; } lmutex_unlock(&mon_lock); if (error) { errno = error; perror(mon_out); } return; } /* * Ok - they want to submit a block for immediate use, for * function call count consumption, and execution profile * histogram computation. * If the block fails sanity tests, just bag it. * Next thing - get name to use. If PROFDIR is NULL, let's * get out now - they want No Profiling done. * * Otherwise: * Set the block hdr cells. * Get an anchor for the block, and link the anchor+block onto * the end of the chain. * Init the grabba-cell externs (countbase/limit) for this block. * Finally, call profil and return. */ ssiz = ((sizeof (struct hdr) + nfunc * sizeof (struct cnt)) / sizeof (WORD)); if (ssiz >= bufsize || lowpc >= highpc) { lmutex_unlock(&mon_lock); return; } if ((s = getenv(PROFDIR)) == NULL) { /* PROFDIR not in environment */ mon_out = MON_OUT; /* use default "mon.out" */ } else if (*s == '\0') { /* value of PROFDIR is NULL */ lmutex_unlock(&mon_lock); return; /* no profiling on this run */ } else { /* construct "PROFDIR/pid.progname" */ int n; pid_t pid; char *name; size_t len; len = strlen(s); /* 15 is space for /pid.mon.out\0, if necessary */ if ((mon_out = libc_malloc(len + strlen(___Argv[0]) + 15)) == NULL) { lmutex_unlock(&mon_lock); perror(""); return; } (void) strcpy(mon_out, s); name = mon_out + len; *name++ = '/'; /* two slashes won't hurt */ if ((pid = getpid()) <= 0) /* extra test just in case */ pid = 1; /* getpid returns something inappropriate */ /* suppress leading zeros */ for (n = 10000; n > pid; n /= 10) ; for (; ; n /= 10) { *name++ = pid/n + '0'; if (n == 1) break; pid %= n; } *name++ = '.'; if (___Argv != NULL) { /* mcrt0.s executed */ if ((s = strrchr(___Argv[0], '/')) != NULL) (void) strcpy(name, s + 1); else (void) strcpy(name, ___Argv[0]); } else { (void) strcpy(name, MON_OUT); } } hdrp = (struct hdr *)(uintptr_t)buffer; /* initialize 1st region */ hdrp->lpc = lowpc; hdrp->hpc = highpc; hdrp->nfns = nfunc; /* get an anchor for the block */ newanchp = (curAnchor == NULL) ? &firstAnchor : (ANCHOR *)libc_malloc(sizeof (ANCHOR)); if (newanchp == NULL) { lmutex_unlock(&mon_lock); perror("monitor"); return; } /* link anchor+block into chain */ newanchp->monBuffer = hdrp; /* new, down. */ newanchp->next = NULL; /* new, forward to NULL. */ newanchp->prior = curAnchor; /* new, backward. */ if (curAnchor != NULL) curAnchor->next = newanchp; /* old, forward to new. */ newanchp->flags = HAS_HISTOGRAM; /* note it has a histgm area */ /* got it - enable use by mcount() */ countbase = (char *)buffer + sizeof (struct hdr); _countlimit = countbase + (nfunc * sizeof (struct cnt)); /* (set size of region 3) */ newanchp->histSize = (int) (bufsize * sizeof (WORD) - (_countlimit - (char *)buffer)); /* done w/regions 1 + 2: setup 3 to activate profil processing. */ buffer += ssiz; /* move ptr past 2'nd region */ bufsize -= ssiz; /* no. WORDs in third region */ /* no. WORDs of text */ text = (highpc - lowpc + sizeof (WORD) - 1) / sizeof (WORD); /* * scale is a 16 bit fixed point fraction with the decimal * point at the left */ if (bufsize < text) { /* make sure cast is done first! */ double temp = (double)bufsize; scale = (uint_t)((temp * (long)0200000L) / text); } else { /* scale must be less than 1 */ scale = 0xffff; } bufsize *= sizeof (WORD); /* bufsize into # bytes */ profil(buffer, bufsize, (ulong_t)lowpc, scale); curAnchor = newanchp; /* make latest addition, the cur anchor */ lmutex_unlock(&mon_lock); }
extern char * regex(const char *regexp, const char *stringp, ...) { va_list arg_listp; int char_size; const char *end_of_matchp; wchar_t regex_wchar; char *return_argp[NSUBSTRINGS]; char *returned_substringp; int substringn; const char *substringp; wchar_t string_wchar; if (____loc1() == (char **)0) { return ((char *)0); } else { lmutex_lock(®ex_lock); __loc1 = (char *)0; } if ((stringp == (char *)0) || (regexp == (char *)0)) { lmutex_unlock(®ex_lock); return ((char *)0); } /* INITIALIZE SUBSTRINGS THAT MIGHT BE RETURNED IN VARARGS */ substringn = 0; va_start(arg_listp, stringp); while (substringn < NSUBSTRINGS) { return_argp[substringn] = va_arg(arg_listp, char *); substring_startp[substringn] = (char *)0; return_arg_number[substringn] = -1; substringn++; } va_end(arg_listp); /* TEST THE STRING AGAINST THE REGULAR EXPRESSION */ end_of_matchp = (char *)0; stringp_stackp = &stringp_stack[STRINGP_STACK_SIZE]; if ((int)*regexp == (int)START_OF_STRING_MARK) { /* * the match must start at the beginning of the string */ __loc1 = (char *)stringp; regexp++; end_of_matchp = test_string(stringp, regexp); } else if ((int)*regexp == (int)ASCII_CHAR) { /* * test a string against a regular expression * that starts with a single ASCII character: * * move to each character in the string that matches * the first character in the regular expression * and test the remaining string */ while ((*stringp != *(regexp + 1)) && (*stringp != '\0')) { stringp++; } while ((end_of_matchp == (char *)0) && (*stringp != '\0')) { end_of_matchp = test_string(stringp, regexp); if (end_of_matchp != (char *)0) { __loc1 = (char *)stringp; } else { stringp++; while ((*stringp != *(regexp + 1)) && (*stringp != '\0')) { stringp++; } } } } else if (!multibyte) { /* * if the value of the "multibyte" macro defined in <euc.h> * is false, regex() is running in an ASCII locale; * test an ASCII string against an ASCII regular expression * that doesn't start with a single ASCII character: * * move forward in the string one byte at a time, testing * the remaining string against the regular expression */ end_of_matchp = test_string(stringp, regexp); while ((end_of_matchp == (char *)0) && (*stringp != '\0')) { stringp++; end_of_matchp = test_string(stringp, regexp); } if (end_of_matchp != (char *)0) { __loc1 = (char *)stringp; } } else if ((int)*regexp == (int)MULTIBYTE_CHAR) { /* * test a multibyte string against a multibyte regular expression * that starts with a single multibyte character: * * move to each character in the string that matches * the first character in the regular expression * and test the remaining string */ (void) get_wchar(®ex_wchar, regexp + 1); char_size = get_wchar(&string_wchar, stringp); while ((string_wchar != regex_wchar) && (char_size > 0)) { stringp += char_size; char_size = get_wchar(&string_wchar, stringp); } while ((end_of_matchp == (char *)0) && (char_size > 0)) { end_of_matchp = test_string(stringp, regexp); if (end_of_matchp != (char *)0) { __loc1 = (char *)stringp; } else { stringp += char_size; char_size = get_wchar(&string_wchar, stringp); while ((string_wchar != regex_wchar) && (char_size > 0)) { stringp += char_size; char_size = get_wchar(&string_wchar, stringp); } } } } else { /* * test a multibyte string against a multibyte regular expression * that doesn't start with a single multibyte character * * move forward in the string one multibyte character at a time, * testing the remaining string against the regular expression */ end_of_matchp = test_string(stringp, regexp); char_size = get_wchar(&string_wchar, stringp); while ((end_of_matchp == (char *)0) && (char_size > 0)) { stringp += char_size; end_of_matchp = test_string(stringp, regexp); char_size = get_wchar(&string_wchar, stringp); } if (end_of_matchp != (char *)0) { __loc1 = (char *)stringp; } } /* * Return substrings that matched subexpressions for which * matching substrings are to be returned. * * NOTE: * * According to manual page regcmp(3G), regex() returns substrings * that match subexpressions even when no substring matches the * entire regular expression. */ substringn = 0; while (substringn < NSUBSTRINGS) { substringp = substring_startp[substringn]; if ((substringp != (char *)0) && (return_arg_number[substringn] >= 0)) { returned_substringp = return_argp[return_arg_number[substringn]]; if (returned_substringp != (char *)0) { while (substringp < substring_endp[substringn]) { *returned_substringp = (char)*substringp; returned_substringp++; substringp++; } *returned_substringp = '\0'; } } substringn++; } lmutex_unlock(®ex_lock); return ((char *)end_of_matchp); } /* regex() */