/* * Register the function described by 'fptr' to be called at application * exit or owning shared object unload time. This is a helper function * for atexit and __cxa_atexit. */ static int atexit_register(struct atexit_fn *fptr) { static struct atexit __atexit0; /* one guaranteed table */ struct atexit *p; _MUTEX_LOCK(&atexit_mutex); if ((p = __atexit) == NULL) __atexit = p = &__atexit0; else while (p->ind >= ATEXIT_SIZE) { struct atexit *old__atexit; old__atexit = __atexit; _MUTEX_UNLOCK(&atexit_mutex); if ((p = (struct atexit *)malloc(sizeof(*p))) == NULL) return (-1); _MUTEX_LOCK(&atexit_mutex); if (old__atexit != __atexit) { /* Lost race, retry operation */ _MUTEX_UNLOCK(&atexit_mutex); free(p); _MUTEX_LOCK(&atexit_mutex); p = __atexit; continue; } p->ind = 0; p->next = __atexit; __atexit = p; } p->fns[p->ind++] = *fptr; _MUTEX_UNLOCK(&atexit_mutex); return 0; }
/* * Call all handlers registered with __cxa_atexit for the shared * object owning 'dso'. Note: if 'dso' is NULL, then all remaining * handlers are called. */ void __cxa_finalize(void *dso) { struct dl_phdr_info phdr_info; struct atexit *p; struct atexit_fn fn; int n, has_phdr; if (dso != NULL) { has_phdr = _rtld_addr_phdr(dso, &phdr_info); } else { has_phdr = 0; global_exit = 1; } _MUTEX_LOCK(&atexit_mutex); for (p = __atexit; p; p = p->next) { for (n = p->ind; --n >= 0;) { if (p->fns[n].fn_type == ATEXIT_FN_EMPTY) continue; /* already been called */ fn = p->fns[n]; if (dso != NULL && dso != fn.fn_dso) { /* wrong DSO ? */ if (!has_phdr || global_exit || !__elf_phdr_match_addr(&phdr_info, fn.fn_ptr.cxa_func)) continue; } /* Mark entry to indicate that this particular handler has already been called. */ p->fns[n].fn_type = ATEXIT_FN_EMPTY; _MUTEX_UNLOCK(&atexit_mutex); /* Call the function of correct type. */ if (fn.fn_type == ATEXIT_FN_CXA) fn.fn_ptr.cxa_func(fn.fn_arg); else if (fn.fn_type == ATEXIT_FN_STD) fn.fn_ptr.std_func(); _MUTEX_LOCK(&atexit_mutex); } } _MUTEX_UNLOCK(&atexit_mutex); if (dso == NULL) _MUTEX_DESTROY(&atexit_mutex); if (has_phdr && !global_exit && &__pthread_cxa_finalize != NULL) __pthread_cxa_finalize(&phdr_info); }
static struct tm * gmtsub(const time_t * const timep, const long offset, struct tm * const tmp) { struct tm * result; if (!gmt_is_set) { _MUTEX_LOCK(&gmt_mutex); if (!gmt_is_set) { gmtload(gmtptr); gmt_is_set = TRUE; } _MUTEX_UNLOCK(&gmt_mutex); } result = timesub(timep, offset, gmtptr, tmp); #ifdef TM_ZONE /* ** Could get fancy here and deliver something such as ** "UTC+xxxx" or "UTC-xxxx" if offset is non-zero, ** but this is no time for a treasure hunt. */ if (offset != 0) tmp->TM_ZONE = wildabbr; else tmp->TM_ZONE = gmtptr->chars; #endif /* defined TM_ZONE */ return result; }
/* * pclose -- * Pclose returns -1 if stream is not associated with a `popened' command, * if already `pclosed', or waitpid returns an error. */ int pclose(FILE *iop) { struct pid *cur, *last; int pstat; pid_t pid; /* Find the appropriate file pointer. */ _MUTEX_LOCK(&pidlist_lock); for (last = NULL, cur = pidlist; cur; last = cur, cur = cur->next) if (cur->fp == iop) break; if (cur == NULL) { _MUTEX_UNLOCK(&pidlist_lock); return (-1); } /* Remove the entry from the linked list. */ if (last == NULL) pidlist = cur->next; else last->next = cur->next; _MUTEX_UNLOCK(&pidlist_lock); (void)fclose(iop); do { pid = waitpid(cur->pid, &pstat, 0); } while (pid == -1 && errno == EINTR); free(cur); return (pid == -1 ? -1 : pstat); }
struct dirent * readdir(DIR *dirp) { struct dirent *dp; _MUTEX_LOCK(&dirp->dd_lock); _readdir_unlocked(dirp, &dp, 1); _MUTEX_UNLOCK(&dirp->dd_lock); return (dp); }
/* * return a pointer into a directory */ long telldir(DIR *dirp) { long i; _MUTEX_LOCK(&dirp->dd_lock); i = dirp->dd_curpos; _MUTEX_UNLOCK(&dirp->dd_lock); return (i); }
/* * Find a free FILE for fopen et al. */ FILE * __sfp(void) { FILE *fp; int n; struct glue *g; if (!__sdidinit) __sinit(); _MUTEX_LOCK(&sfp_mutex); for (g = &__sglue; g != NULL; g = g->next) { for (fp = g->iobs, n = g->niobs; --n >= 0; fp++) if (fp->_flags == 0) goto found; } /* release lock while mallocing */ _MUTEX_UNLOCK(&sfp_mutex); if ((g = moreglue(NDYNAMIC)) == NULL) return (NULL); _MUTEX_LOCK(&sfp_mutex); lastglue->next = g; lastglue = g; fp = g->iobs; found: fp->_flags = 1; /* reserve this slot; caller sets real flags */ _MUTEX_UNLOCK(&sfp_mutex); fp->_p = NULL; /* no current pointer */ fp->_w = 0; /* nothing to read or write */ fp->_r = 0; fp->_bf._base = NULL; /* no buffer */ fp->_bf._size = 0; fp->_lbfsize = 0; /* not line buffered */ fp->_file = -1; /* no file */ /* fp->_cookie = <any>; */ /* caller sets cookie, _read/_write etc */ fp->_lb._base = NULL; /* no line buffer */ fp->_lb._size = 0; _FILEEXT_INIT(fp); return (fp); }
int my_rwlock_read_unlock(my_rwlock_t * my_rwlock) { _MUTEX_LOCK(my_rwlock->protect_reader_count); if (--my_rwlock->reader_count == 0) { sem_post(&(my_rwlock->no_processes)); } if(my_rwlock->reader_count < 0) { LOCK_DEBUG("RWLOCK RUNLOCK"); debug_crash(); } _MUTEX_UNLOCK(my_rwlock->protect_reader_count); return 0; }
int my_rwlock_read_lock(my_rwlock_t * my_rwlock) { sem_wait(&(my_rwlock->allow_readers)); _MUTEX_LOCK(my_rwlock->protect_reader_count); if (++my_rwlock->reader_count == 1) { sem_wait(&(my_rwlock->no_processes)); } _MUTEX_UNLOCK(my_rwlock->protect_reader_count); sem_post(&(my_rwlock->allow_readers)); return 0; }
/* * Call all handlers registered with __cxa_atexit for the shared * object owning 'dso'. Note: if 'dso' is NULL, then all remaining * handlers are called. */ void __cxa_finalize(void* dso) { struct atexit* p; struct atexit_fn fn; int n; _MUTEX_LOCK(&atexit_mutex); for (p = __atexit; p; p = p->next) { for (n = p->ind; --n >= 0;) { if (p->fns[n].fn_type == ATEXIT_FN_EMPTY) { continue; /* already been called */ } if (dso != NULL && dso != p->fns[n].fn_dso) { continue; /* wrong DSO */ } fn = p->fns[n]; /* Mark entry to indicate that this particular handler has already been called. */ p->fns[n].fn_type = ATEXIT_FN_EMPTY; _MUTEX_UNLOCK(&atexit_mutex); /* Call the function of correct type. */ if (fn.fn_type == ATEXIT_FN_CXA) { fn.fn_ptr.cxa_func(fn.fn_arg); } else if (fn.fn_type == ATEXIT_FN_STD) { fn.fn_ptr.std_func(); } _MUTEX_LOCK(&atexit_mutex); } } _MUTEX_UNLOCK(&atexit_mutex); }
void watchdog_remove_watched(struct watched_s *watched) { struct watchdog_s *w; daemon_assert(invariant(watched->watchdog)); w = watched->watchdog; _MUTEX_LOCK(w->mutex); delete(w, watched); _MUTEX_UNLOCK(w->mutex); daemon_assert(invariant(w)); }
void watchdog_add_watched(struct watchdog_s *w, struct watched_s *watched) { daemon_assert(invariant(w)); _MUTEX_LOCK(w->mutex); watched->watched_thread = pthread_self(); watched->watchdog = w; insert(w, watched); _MUTEX_UNLOCK(w->mutex); daemon_assert(invariant(w)); }
/* * __sinit() is called whenever stdio's internal variables must be set up. */ void __sinit(void) { static void *sinit_mutex; int i; _MUTEX_LOCK(&sinit_mutex); if (__sdidinit) goto out; /* bail out if caller lost the race */ for (i = 0; i < FOPEN_MAX - 3; i++) { _FILEEXT_SETUP(usual+i, usualext+i); } /* make sure we clean up on exit */ __atexit_register_cleanup(_cleanup); /* conservative */ __sdidinit = 1; out: _MUTEX_UNLOCK(&sinit_mutex); }
GOOD_OR_BAD Netlink_Parse_Get( struct netlink_parse * nlp ) { struct nlmsghdr peek_nlm ; // first peek at message to get length and details LEVEL_DEBUG("Wait to peek at message"); int recv_len = recv( Inbound_Control.w1_monitor->pown->file_descriptor, &peek_nlm, W1_NLM_LENGTH, MSG_PEEK ); // Set time of last read _MUTEX_LOCK(Inbound_Control.w1_monitor->master.w1_monitor.read_mutex) ; timernow( &(Inbound_Control.w1_monitor->master.w1_monitor.last_read) ); _MUTEX_UNLOCK(Inbound_Control.w1_monitor->master.w1_monitor.read_mutex) ; LEVEL_DEBUG("Pre-parse header: %u bytes len=%u type=%u seq=%u|%u pid=%u",recv_len,peek_nlm.nlmsg_len,peek_nlm.nlmsg_type,NL_BUS(peek_nlm.nlmsg_seq),NL_SEQ(peek_nlm.nlmsg_seq),peek_nlm.nlmsg_pid); if (recv_len < 0) { ERROR_DEBUG("Netlink (w1) recv header error"); return gbBAD ; } // allocate space nlp->nlm = owmalloc( peek_nlm.nlmsg_len ) ; if ( nlp->nlm == NULL ) { LEVEL_DEBUG("Netlink (w1) Cannot allocate %d byte buffer for data",peek_nlm.nlmsg_len) ; return gbBAD ; } // read whole packet recv_len = recv( Inbound_Control.w1_monitor->pown->file_descriptor, &(nlp->nlm[0]), peek_nlm.nlmsg_len, 0 ); if (recv_len == -1) { ERROR_DEBUG("Netlink (w1) recv body error"); return gbBAD ; } if ( GOOD( Netlink_Parse_Buffer( nlp )) ) { LEVEL_DEBUG("Netlink read -----------------"); Netlink_Parse_Show( nlp ) ; return gbGOOD ; } return gbBAD ; }
FILE * popen(const char *program, const char *type) { struct pid * volatile cur; FILE *iop; int pdes[2]; pid_t pid; debug("popen(\"%s\", \"%s\")", program, type); if ((*type != 'r' && *type != 'w') || type[1] != '\0') { errno = EINVAL; return (NULL); } if ((cur = malloc(sizeof(struct pid))) == NULL) return (NULL); if (pipe(pdes) < 0) { free(cur); return (NULL); } _MUTEX_LOCK(&pidlist_lock); switch (pid = vfork()) { case -1: /* Error. */ _MUTEX_UNLOCK(&pidlist_lock); (void)close(pdes[0]); (void)close(pdes[1]); free(cur); return (NULL); /* NOTREACHED */ case 0: /* Child. */ { struct pid *pcur; /* * because vfork() instead of fork(), must leak FILE *, * but luckily we are terminally headed for an execl() */ for (pcur = pidlist; pcur; pcur = pcur->next) close(fileno(pcur->fp)); if (*type == 'r') { int tpdes1 = pdes[1]; (void) close(pdes[0]); /* * We must NOT modify pdes, due to the * semantics of vfork. */ if (tpdes1 != STDOUT_FILENO) { (void)dup2(tpdes1, STDOUT_FILENO); (void)close(tpdes1); tpdes1 = STDOUT_FILENO; } } else { (void)close(pdes[1]); if (pdes[0] != STDIN_FILENO) { (void)dup2(pdes[0], STDIN_FILENO); (void)close(pdes[0]); } } execl(_PATH_BSHELL, "sh", "-c", program, (char *)NULL); _exit(127); /* NOTREACHED */ } break; } _MUTEX_UNLOCK(&pidlist_lock); /* Parent; assume fdopen can't fail. */ if (*type == 'r') { iop = fdopen(pdes[0], type); (void)close(pdes[1]); } else { iop = fdopen(pdes[1], type); (void)close(pdes[0]); } /* Link into list of file descriptors. */ cur->fp = iop; cur->pid = pid; _MUTEX_LOCK(&pidlist_lock); cur->next = pidlist; pidlist = cur; _MUTEX_UNLOCK(&pidlist_lock); return (iop); }
int res_init(void) { static void *resinit_mutex; struct asr_ctx *ac; int i; ac = _asr_use_resolver(NULL); /* * The first thread to call res_init() will setup the global _res * structure from the async context, not overriding fields set early * by the user. */ _MUTEX_LOCK(&resinit_mutex); if (!(_res.options & RES_INIT)) { if (_res.retry == 0) _res.retry = ac->ac_nsretries; if (_res.options == 0) _res.options = ac->ac_options; if (_res.lookups[0] == '\0') strlcpy(_res.lookups, ac->ac_db, sizeof(_res.lookups)); for (i = 0; i < ac->ac_nscount && i < MAXNS; i++) { /* * No need to check for length since we copy to a * struct sockaddr_storage with a size of 256 bytes * and sa_len has only 8 bits. */ memcpy(&_res_ext.nsaddr_list[i], ac->ac_ns[i], ac->ac_ns[i]->sa_len); if (ac->ac_ns[i]->sa_len <= sizeof(_res.nsaddr_list[i])) memcpy(&_res.nsaddr_list[i], ac->ac_ns[i], ac->ac_ns[i]->sa_len); else memset(&_res.nsaddr_list[i], 0, sizeof(_res.nsaddr_list[i])); } _res.nscount = i; _res.options |= RES_INIT; } _MUTEX_UNLOCK(&resinit_mutex); /* * If the program is not threaded, we want to reflect (some) changes * made by the user to the global _res structure. * This is a bit of a hack: if there is already an async query on * this context, it might change things in its back. It is ok * as long as the user only uses the blocking resolver API. * If needed we could consider cloning the context if there is * a running query. */ if (!__isthreaded) { ac->ac_nsretries = _res.retry; ac->ac_options = _res.options; strlcpy(ac->ac_db, _res.lookups, sizeof(ac->ac_db)); ac->ac_dbcount = strlen(ac->ac_db); } _asr_ctx_unref(ac); return (0); }
/* The device locks (devlock) are kept in a tree */ ZERO_OR_ERROR DeviceLockGet(struct parsedname *pn) { struct devlock *local_devicelock; struct devlock *tree_devicelock; struct dev_opaque *opaque; if (pn->selected_device == DeviceSimultaneous) { /* Shouldn't call DeviceLockGet() on DeviceSimultaneous. No sn exists */ return 0; } /* Cannot lock without knowing which bus since the device trees are bus-specific */ if (pn->selected_connection == NO_CONNECTION) { return -EINVAL ; } /* Need locking? */ /* Exclude external */ if ( pn->selected_filetype->read == FS_r_external || pn->selected_filetype->write == FS_w_external ) { return 0 ; } // Test type switch (pn->selected_filetype->format) { case ft_directory: case ft_subdir: return 0; default: break; } // Ignore static and atomic switch (pn->selected_filetype->change) { case fc_static: case fc_statistic: return 0; default: break; } // Create a devlock block to add to the tree local_devicelock = owmalloc(sizeof(struct devlock)) ; if ( local_devicelock == NULL ) { return -ENOMEM; } memcpy(local_devicelock->sn, pn->sn, SERIAL_NUMBER_SIZE); DEVTREE_LOCK(pn); /* in->dev_db points to the root of a tree of queries that are using this device */ opaque = (struct dev_opaque *)tsearch(local_devicelock, &(pn->selected_connection->dev_db), dev_compare) ; if ( opaque == NULL ) { // unfound and uncreatable DEVTREE_UNLOCK(pn); owfree(local_devicelock); // kill the allocated devlock return -ENOMEM; } tree_devicelock = opaque->key ; if ( local_devicelock == tree_devicelock) { // new device slot // No longer "local" -- the local_device lock now belongs to the device_tree // It will need to be freed later, when the user count returns to zero. _MUTEX_INIT(tree_devicelock->lock); // create a mutex tree_devicelock->users = 0 ; } else { // existing device slot owfree(local_devicelock); // kill the locally allocated devlock (since there already is a matching devlock) } ++(tree_devicelock->users); // add our claim to the device DEVTREE_UNLOCK(pn); _MUTEX_LOCK(tree_devicelock->lock); // now grab the device pn->lock = tree_devicelock; // use this new devlock return 0; }