struct CLB *cmu_getclb(cmi_descriptor *node, cmi_descriptor *task) { cmi_status_t status; struct CLB *p; que_ent_ptr_t qp; sigset_t oset; struct sockaddr_in in; int rc; status = cmj_resolve_nod_tnd(node, task, &in); if (CMI_ERROR(status)) return NULL; if (ntd_root) { SIGPROCMASK(SIG_BLOCK, &ntd_root->mutex_set, &oset, rc); for (qp = RELQUE2PTR(ntd_root->cqh.fl) ; qp != &ntd_root->cqh ; qp = RELQUE2PTR(p->cqe.fl)) { p = QUEENT2CLB(qp, cqe); if (p->peer.sin_port == in.sin_port && 0 == memcmp(&p->peer.sin_addr, &in.sin_addr, SIZEOF(in.sin_addr))) { /* (port, address) pair is necessary and sufficient for uniqueness. There might be other fields in sockaddr_in that might be implementation dependent. So, compare only (port, address) pair. */ sigprocmask(SIG_SETMASK, &oset, NULL); return p; } } SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); } return NULL; }
cmi_status_t cmi_write(struct CLB *lnk) { sigset_t oset; cmi_status_t status; struct NTD *tsk = lnk->ntd; int rc; CMI_DPRINT(("ENTER CMI_WRITE, AST 0x%x\n", lnk->ast)); SIGPROCMASK(SIG_BLOCK, &tsk->mutex_set, &oset, rc); status = cmj_write_start(lnk); if (CMI_ERROR(status)) { SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); CMI_DPRINT(("EXIT CMI_WRITE ERROR %d\n", status)); return status; } cmj_housekeeping(); while (lnk->sta == CM_CLB_WRITE && !lnk->ast) { sigsuspend(&oset); cmj_housekeeping(); /* recover status */ if (lnk->sta != CM_CLB_WRITE) status = CMI_CLB_IOSTATUS(lnk); } SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); CMI_DPRINT(("EXIT CMI_WRITE sta = %d\n", lnk->sta)); return status; }
struct CLB *cmu_getclb(cmi_descriptor *node, cmi_descriptor *task) { cmi_status_t status; struct CLB *p; que_ent_ptr_t qp; sigset_t oset; struct addrinfo *ai_ptr; int rc; status = cmj_getsockaddr(node, task, &ai_ptr); if (CMI_ERROR(status)) return NULL; if (ntd_root) { SIGPROCMASK(SIG_BLOCK, &ntd_root->mutex_set, &oset, rc); for (qp = RELQUE2PTR(ntd_root->cqh.fl) ; qp != &ntd_root->cqh ; qp = RELQUE2PTR(p->cqe.fl)) { p = QUEENT2CLB(qp, cqe); if (0 == memcpy(ai_ptr->ai_addr, (sockaddr_ptr)(&p->peer_sas), ai_ptr->ai_addrlen)) { SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); return p; } } SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); } return NULL; }
static inline int validate_cache (unw_addr_space_t as) { intrmask_t saved_mask; int ret; SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (check_callback, as); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); return ret; }
void Scm_ProfilerCountBufferFlush(ScmVM *vm) { if (vm->prof == NULL) return; /* for safety */ if (vm->prof->currentCount == 0) return; /* suspend itimer during hash table operation */ #if !defined(GAUCHE_WINDOWS) sigset_t set; sigemptyset(&set); sigaddset(&set, SIGPROF); SIGPROCMASK(SIG_BLOCK, &set, NULL); #endif /* !GAUCHE_WINDOWS */ int ncounts = vm->prof->currentCount; for (int i=0; i<ncounts; i++) { ScmObj e; int cnt; ScmObj func = vm->prof->counts[i].func; if (SCM_METHODP(func) && SCM_METHOD(func)->func == NULL) { /* func is Scheme-defined method. Record the code of method body, so that we can match it with sampling profiler later. */ func = SCM_OBJ(SCM_METHOD(func)->data); } e = Scm_HashTableSet(vm->prof->statHash, vm->prof->counts[i].func, SCM_FALSE, SCM_DICT_NO_OVERWRITE); if (SCM_FALSEP(e)) { e = Scm_HashTableSet(vm->prof->statHash, vm->prof->counts[i].func, Scm_Cons(SCM_MAKE_INT(0), SCM_MAKE_INT(0)), 0); } SCM_ASSERT(SCM_PAIRP(e)); cnt = SCM_INT_VALUE(SCM_CAR(e)) + 1; SCM_SET_CAR(e, SCM_MAKE_INT(cnt)); } vm->prof->currentCount = 0; /* resume itimer */ #if !defined(GAUCHE_WINDOWS) SIGPROCMASK(SIG_UNBLOCK, &set, NULL); #endif /* !GAUCHE_WINDOWS */ }
void context_save (void) { int action; register struct node *np; FILE *out; sigset_t set, oset; /* No context in use -- silently ignore any changes! */ if (!ctxpath) return; if (!(ctxflags & CTXMOD)) return; ctxflags &= ~CTXMOD; if ((action = m_chkids ()) > 0) return; /* child did it for us */ /* block a few signals */ sigemptyset (&set); sigaddset (&set, SIGHUP); sigaddset (&set, SIGINT); sigaddset (&set, SIGQUIT); sigaddset (&set, SIGTERM); SIGPROCMASK (SIG_BLOCK, &set, &oset); if (!(out = lkfopen (ctxpath, "w"))) adios (ctxpath, "unable to write"); for (np = m_defs; np; np = np->n_next) if (np->n_context) fprintf (out, "%s: %s\n", np->n_name, np->n_field); lkfclose (out, ctxpath); SIGPROCMASK (SIG_SETMASK, &oset, &set); /* reset the signal mask */ if (action == 0) _exit (0); /* we are child, time to die */ }
cmi_status_t cmi_read(struct CLB *lnk) { sigset_t oset; cmi_status_t status; struct NTD *tsk = lnk->ntd; int rc; CMI_DPRINT(("ENTER CMI_READ, AST 0x%x\n", lnk->ast)); lnk->cbl = lnk->mbl; SIGPROCMASK(SIG_BLOCK, &tsk->mutex_set, &oset, rc); status = cmj_read_start(lnk); if (CMI_ERROR(status)) { SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); CMI_DPRINT(("EXIT CMI_READ ERROR CODE %d\n", status)); return status; } /* * At this point see if the I/O has completed * by probing the sta CLB field. */ cmj_housekeeping(); while (lnk->sta == CM_CLB_READ && !lnk->ast) { sigsuspend(&oset); cmj_housekeeping(); /* recover status */ if (lnk->sta != CM_CLB_READ) status = CMI_CLB_IOSTATUS(lnk); } SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); CMI_DPRINT(("EXIT CMI_READ sta = %d\n", lnk->sta)); return status; }
HIDDEN int tdep_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, int need_unwind_info, void *arg) { # if defined(HAVE_DL_ITERATE_PHDR) unw_dyn_info_t di, *dip = &di; intrmask_t saved_mask; int ret; di.u.ti.segbase = ip; /* this is cheap... */ SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (callback, &di); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); if (ret <= 0) { if (!kernel_table.u.ti.table_data) { if ((ret = get_kernel_table (&kernel_table)) < 0) return ret; } if (ip < kernel_table.start_ip || ip >= kernel_table.end_ip) return -UNW_ENOINFO; dip = &kernel_table; } # elif defined(HAVE_DLMODINFO) # define UNWIND_TBL_32BIT 0x8000000000000000 struct load_module_desc lmd; unw_dyn_info_t di, *dip = &di; struct unwind_header { uint64_t header_version; uint64_t start_offset; uint64_t end_offset; } *uhdr; if (!dlmodinfo (ip, &lmd, sizeof (lmd), NULL, 0, 0)) return -UNW_ENOINFO; di.format = UNW_INFO_FORMAT_TABLE; di.start_ip = lmd.text_base; di.end_ip = lmd.text_base + lmd.text_size; di.gp = lmd.linkage_ptr; di.u.ti.name_ptr = 0; /* no obvious table-name available */ di.u.ti.segbase = lmd.text_base; uhdr = (struct unwind_header *) lmd.unwind_base; if ((uhdr->header_version & ~UNWIND_TBL_32BIT) != 1 && (uhdr->header_version & ~UNWIND_TBL_32BIT) != 2) { Debug (1, "encountered unknown unwind header version %ld\n", (long) (uhdr->header_version & ~UNWIND_TBL_32BIT)); return -UNW_EBADVERSION; } if (uhdr->header_version & UNWIND_TBL_32BIT) { Debug (1, "32-bit unwind tables are not supported yet\n"); return -UNW_EINVAL; } di.u.ti.table_data = (unw_word_t *) (di.u.ti.segbase + uhdr->start_offset); di.u.ti.table_len = ((uhdr->end_offset - uhdr->start_offset) / sizeof (unw_word_t)); Debug (16, "found table `%s': segbase=%lx, len=%lu, gp=%lx, " "table_data=%p\n", (char *) di.u.ti.name_ptr, di.u.ti.segbase, di.u.ti.table_len, di.gp, di.u.ti.table_data); # endif /* now search the table: */ return tdep_search_unwind_table (as, ip, dip, pi, need_unwind_info, arg); }
cmi_status_t cmi_init(cmi_descriptor *tnd, unsigned char tnr, void (*err)(struct NTD *, struct CLB *, cmi_reason_t reason), void (*crq)(struct CLB *), bool (*acc)(struct CLB *), void (*urg)(struct CLB *, unsigned char data), size_t pool_size, size_t usr_size, size_t mbl) { cmi_status_t status = SS_NORMAL; char *envvar; struct protoent *p; unsigned short myport; struct sockaddr_in in; sigset_t oset; int on = 1; int rval, rc, save_errno; status = cmj_netinit(); if (CMI_ERROR(status)) return status; status = cmj_getsockaddr(tnd, &in); if (CMI_ERROR(status)) return status; ntd_root->pool_size = pool_size; ntd_root->usr_size = usr_size; ntd_root->mbl = mbl; p = getprotobyname(GTCM_SERVER_PROTOCOL); endprotoent(); if (!p) return CMI_NETFAIL; /* create the listening socket */ ntd_root->listen_fd = socket(AF_INET, SOCK_STREAM, p->p_proto); if (FD_INVALID == ntd_root->listen_fd) return errno; /* make sure we can re-run quickly w/o reuse problems */ status = setsockopt(ntd_root->listen_fd, SOL_SOCKET, SO_REUSEADDR, (void*)&on, SIZEOF(on)); if (-1 == status) { save_errno = errno; CLOSEFILE_RESET(ntd_root->listen_fd, rc); /* resets "ntd_root->listen_fd" to FD_INVALID */ return save_errno; } status = bind(ntd_root->listen_fd, (struct sockaddr*)&in, SIZEOF(in)); if (-1 == status) { save_errno = errno; CLOSEFILE_RESET(ntd_root->listen_fd, rc); /* resets "ntd_root->listen_fd" to FD_INVALID */ return save_errno; } status = cmj_setupfd(ntd_root->listen_fd); if (CMI_ERROR(status)) { CLOSEFILE_RESET(ntd_root->listen_fd, rc); /* resets "ntd_root->listen_fd" to FD_INVALID */ return status; } SIGPROCMASK(SIG_BLOCK, &ntd_root->mutex_set, &oset, rc); rval = listen(ntd_root->listen_fd, MAX_CONN_IND); if (-1 == rval) { save_errno = errno; CLOSEFILE_RESET(ntd_root->listen_fd, rc); /* resets "ntd_root->listen_fd" to FD_INVALID */ SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); return save_errno; } status = cmj_set_async(ntd_root->listen_fd); if (CMI_ERROR(status)) { CLOSEFILE_RESET(ntd_root->listen_fd, rc); /* resets "ntd_root->listen_fd" to FD_INVALID */ SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); return status; } FD_SET(ntd_root->listen_fd, &ntd_root->rs); FD_SET(ntd_root->listen_fd, &ntd_root->es); ntd_root->err = err; ntd_root->crq = crq; ntd_root->acc = acc; ntd_root->urg = urg; if (ntd_root->listen_fd > ntd_root->max_fd) ntd_root->max_fd = ntd_root->listen_fd; cmj_housekeeping(); /* will establish listening pools */ SIGPROCMASK(SIG_SETMASK, &oset, NULL, rc); return SS_NORMAL; }