static void rf_ShutdownEngine(void *arg) { RF_Raid_t *raidPtr; int ks; raidPtr = (RF_Raid_t *) arg; /* Tell the rf_RaidIOThread to shutdown */ simple_lock(&(raidPtr->iodone_lock)); raidPtr->shutdown_raidio = 1; wakeup(&(raidPtr->iodone)); /* ...and wait for it to tell us it has finished */ while (raidPtr->shutdown_raidio) ltsleep(&(raidPtr->shutdown_raidio), PRIBIO, "raidshutdown", 0, &(raidPtr->iodone_lock)); simple_unlock(&(raidPtr->iodone_lock)); /* Now shut down the DAG execution engine. */ DO_LOCK(raidPtr); raidPtr->shutdown_engine = 1; DO_SIGNAL(raidPtr); DO_UNLOCK(raidPtr); }
egg_secure_rec * egg_secure_records (unsigned int *count) { egg_secure_rec *records = NULL; Block *block = NULL; unsigned int total; *count = 0; DO_LOCK (); for (block = all_blocks; block != NULL; block = block->next) { total = 0; records = records_for_ring (block->unused_cells, records, count, &total); if (records == NULL) break; records = records_for_ring (block->used_cells, records, count, &total); if (records == NULL) break; /* Make sure this actualy accounts for all memory */ ASSERT (total == block->n_words); } DO_UNLOCK (); return records; }
void* egg_secure_alloc_full (const char *tag, size_t length, int flags) { Block *block; void *memory = NULL; if (tag == NULL) tag = "?"; if (length > 0xFFFFFFFF / 2) { if (egg_secure_warnings) fprintf (stderr, "tried to allocate an insane amount of memory: %lu\n", (unsigned long)length); return NULL; } /* Can't allocate zero bytes */ if (length == 0) return NULL; DO_LOCK (); for (block = all_blocks; block; block = block->next) { memory = sec_alloc (block, tag, length); if (memory) break; } /* None of the current blocks have space, allocate new */ if (!memory) { block = sec_block_create (length, tag); if (block) memory = sec_alloc (block, tag, length); } #ifdef WITH_VALGRIND if (memory != NULL) VALGRIND_MALLOCLIKE_BLOCK (memory, length, sizeof (void*), 1); #endif DO_UNLOCK (); if (!memory && (flags & EGG_SECURE_USE_FALLBACK)) { memory = egg_memory_fallback (NULL, length); if (memory) /* Our returned memory is always zeroed */ memset (memory, 0, length); } if (!memory) errno = ENOMEM; return memory; }
void egg_secure_validate (void) { Block *block = NULL; DO_LOCK (); for (block = all_blocks; block; block = block->next) sec_validate (block); DO_UNLOCK (); }
int profileValue(void** id, char* file, int line, int64_t value, ...) { DO_LOCK (&glock); entry_t e = (entry_t) *id; if (notInitialized) { atexit (dumpProfile); notInitialized = false; } if (e == NULL) { e = findEntry (file, line); if (e) { *id = e; } } if (e == NULL) { va_list va; e = (entry_t) malloc (sizeof(entry)); e->lock = LOCK_IS_FREE; e->file = file; e->line = line; e->value = value; e->sum = value; e->count = 1; e->min = value; e->max = value; va_start (va, value); e->func = (void (__cdecl*)(void*)) va_arg (va, void*); va_end (va); e->h = NULL; e->genptr = NULL; memset (&e->ivar, 0, sizeof(e->ivar)); memset (&e->i64var, 0, sizeof(e->i64var)); memset (&e->dvar, 0, sizeof(e->dvar)); e->next = entries; entries = e; if (e->func) e->func (e); *id = e; } else {
void egg_secure_dump_blocks (void) { Block *block = NULL; DO_LOCK (); /* Find out where it belongs to */ for (block = all_blocks; block; block = block->next) { fprintf (stderr, "----------------------------------------------------\n"); fprintf (stderr, " BLOCK at: 0x%08lx len: %lu\n", (unsigned long)block, (unsigned long)block->n_words * sizeof (word_t)); fprintf (stderr, "\n"); } DO_UNLOCK (); }
int egg_secure_check (const void *memory) { Block *block = NULL; DO_LOCK (); /* Find out where it belongs to */ for (block = all_blocks; block; block = block->next) { if (sec_is_valid_word (block, (word_t*)memory)) break; } DO_UNLOCK (); return block == NULL ? 0 : 1; }
/* The cancellation handler. */ static void cancel_handler (void *arg) { pid_t child = *(pid_t *) arg; INTERNAL_SYSCALL_DECL (err); INTERNAL_SYSCALL (kill, err, 2, child, SIGKILL); TEMP_FAILURE_RETRY (__waitpid (child, NULL, 0)); DO_LOCK (); if (SUB_REF () == 0) { (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL); (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL); } DO_UNLOCK (); }
void egg_secure_free_full (void *memory, int flags) { Block *block = NULL; if (memory == NULL) return; DO_LOCK (); /* Find out where it belongs to */ for (block = all_blocks; block; block = block->next) { if (sec_is_valid_word (block, memory)) break; } #ifdef WITH_VALGRIND /* We like valgrind's warnings, so give it a first whack at checking for errors */ if (block != NULL || !(flags & GKR_SECURE_USE_FALLBACK)) VALGRIND_FREELIKE_BLOCK (memory, sizeof (word_t)); #endif if (block != NULL) { sec_free (block, memory); if (block->used == 0) sec_block_destroy (block); } DO_UNLOCK (); if (!block) { if ((flags & GKR_SECURE_USE_FALLBACK)) { egg_memory_fallback (memory, 0); } else { if (egg_secure_warnings) fprintf (stderr, "memory does not belong to mate-keyring: 0x%08lx\n", (unsigned long)memory); ASSERT (0 && "memory does does not belong to mate-keyring"); } } }
int initValueProfile(void** id, char* file, int line, ...) { DO_LOCK (&glock); entry_t e = (entry_t) *id; if (notInitialized) { atexit (dumpProfile); notInitialized = false; } if (e == NULL) { e = findEntry (file, line); if (e) { *id = e; } } if (e == NULL) { va_list va; e = (entry_t) malloc (sizeof(entry)); e->lock = LOCK_IS_FREE; e->file = file; e->line = line; e->value = 0; e->sum = 0; e->count = 0; e->min = 0; e->max = 0; // optional probe function argument va_start (va, line); e->func = (void (__cdecl*)(void*)) va_arg (va, void*); va_end (va); e->h = NULL; e->genptr = NULL; VMPI_memset (&e->ivar, 0, sizeof(e->ivar)); VMPI_memset (&e->i64var, 0, sizeof(e->i64var)); VMPI_memset (&e->dvar, 0, sizeof(e->dvar)); e->next = entries; entries = e; *id = e; }
/* Execute LINE as a shell command, returning its status. */ static int do_system (const char *line) { int status, save; pid_t pid; struct sigaction sa; #ifndef _LIBC_REENTRANT struct sigaction intr, quit; #endif sigset_t omask; sa.sa_handler = SIG_IGN; sa.sa_flags = 0; __sigemptyset (&sa.sa_mask); DO_LOCK (); if (ADD_REF () == 0) { if (__sigaction (SIGINT, &sa, &intr) < 0) { (void) SUB_REF (); goto out; } if (__sigaction (SIGQUIT, &sa, &quit) < 0) { save = errno; (void) SUB_REF (); goto out_restore_sigint; } } DO_UNLOCK (); /* We reuse the bitmap in the 'sa' structure. */ __sigaddset (&sa.sa_mask, SIGCHLD); save = errno; if (__sigprocmask (SIG_BLOCK, &sa.sa_mask, &omask) < 0) { #ifndef _LIBC if (errno == ENOSYS) __set_errno (save); else #endif { DO_LOCK (); if (SUB_REF () == 0) { save = errno; (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL); out_restore_sigint: (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL); __set_errno (save); } out: DO_UNLOCK (); return -1; } } #ifdef CLEANUP_HANDLER CLEANUP_HANDLER; #endif #ifdef FORK pid = FORK (); #else pid = __fork (); #endif if (pid == (pid_t) 0) { /* Child side. */ const char *new_argv[4]; new_argv[0] = SHELL_NAME; new_argv[1] = "-c"; new_argv[2] = line; new_argv[3] = NULL; /* Restore the signals. */ (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL); (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL); (void) __sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL); INIT_LOCK (); /* Exec the shell. */ (void) __execve (SHELL_PATH, (char *const *) new_argv, __environ); _exit (127); } else if (pid < (pid_t) 0) /* The fork failed. */ status = -1; else /* Parent side. */ { /* Note the system() is a cancellation point. But since we call waitpid() which itself is a cancellation point we do not have to do anything here. */ if (TEMP_FAILURE_RETRY (__waitpid (pid, &status, 0)) != pid) status = -1; } #ifdef CLEANUP_HANDLER CLEANUP_RESET; #endif save = errno; DO_LOCK (); if ((SUB_REF () == 0 && (__sigaction (SIGINT, &intr, (struct sigaction *) NULL) | __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL)) != 0) || __sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL) != 0) { #ifndef _LIBC /* glibc cannot be used on systems without waitpid. */ if (errno == ENOSYS) __set_errno (save); else #endif status = -1; } DO_UNLOCK (); return status; }
void* egg_secure_realloc_full (void *memory, size_t length, int flags) { Block *block = NULL; size_t previous = 0; int donew = 0; void *alloc = NULL; if (length > 0xFFFFFFFF / 2) { if (egg_secure_warnings) fprintf (stderr, "tried to allocate an insane amount of memory: %lu\n", (unsigned long)length); return NULL; } if (memory == NULL) return egg_secure_alloc_full (length, flags); if (!length) { egg_secure_free_full (memory, flags); return NULL; } DO_LOCK (); /* Find out where it belongs to */ for (block = all_blocks; block; block = block->next) { if (sec_is_valid_word (block, memory)) { previous = sec_allocated (block, memory); #ifdef WITH_VALGRIND /* Let valgrind think we are unallocating so that it'll validate */ VALGRIND_FREELIKE_BLOCK (memory, sizeof (word_t)); #endif alloc = sec_realloc (block, memory, length); #ifdef WITH_VALGRIND /* Now tell valgrind about either the new block or old one */ VALGRIND_MALLOCLIKE_BLOCK (alloc ? alloc : memory, alloc ? length : previous, sizeof (word_t), 1); #endif break; } } /* If it didn't work we may need to allocate a new block */ if (block && !alloc) donew = 1; if (block && block->used == 0) sec_block_destroy (block); DO_UNLOCK (); if (!block) { if ((flags & GKR_SECURE_USE_FALLBACK)) { /* * In this case we can't zero the returned memory, * because we don't know what the block size was. */ return egg_memory_fallback (memory, length); } else { if (egg_secure_warnings) fprintf (stderr, "memory does not belong to mate-keyring: 0x%08lx\n", (unsigned long)memory); ASSERT (0 && "memory does does not belong to mate-keyring"); return NULL; } } if (donew) { alloc = egg_secure_alloc_full (length, flags); if (alloc) { memcpy (alloc, memory, previous); egg_secure_free_full (memory, flags); } } if (!alloc) errno = ENOMEM; return alloc; }
/* Execute LINE as a shell command, returning its status. */ static int do_system (const char *line) { int status, save; pid_t pid; struct sigaction sa; sigset_t omask; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; /*sa.sa_flags = 0; - done by memset */ /*__sigemptyset (&sa.sa_mask); - done by memset */ DO_LOCK (); if (ADD_REF () == 0) { if (sigaction (SIGINT, &sa, &intr) < 0) { SUB_REF (); goto out; } if (sigaction (SIGQUIT, &sa, &quit) < 0) { save = errno; SUB_REF (); goto out_restore_sigint; } } DO_UNLOCK (); /* We reuse the bitmap in the 'sa' structure. */ __sigaddset (&sa.sa_mask, SIGCHLD); save = errno; if (sigprocmask (SIG_BLOCK, &sa.sa_mask, &omask) < 0) { { DO_LOCK (); if (SUB_REF () == 0) { save = errno; (void) sigaction (SIGQUIT, &quit, (struct sigaction *) NULL); out_restore_sigint: (void) sigaction (SIGINT, &intr, (struct sigaction *) NULL); __set_errno (save); } out: DO_UNLOCK (); return -1; } } CLEANUP_HANDLER; pid = FORK (); if (pid == (pid_t) 0) { /* Child side. */ const char *new_argv[4]; new_argv[0] = "/bin/sh"; new_argv[1] = "-c"; new_argv[2] = line; new_argv[3] = NULL; /* Restore the signals. */ (void) sigaction (SIGINT, &intr, (struct sigaction *) NULL); (void) sigaction (SIGQUIT, &quit, (struct sigaction *) NULL); (void) sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL); INIT_LOCK (); /* Exec the shell. */ (void) execve ("/bin/sh", (char *const *) new_argv, __environ); _exit (127); } else if (pid < (pid_t) 0) /* The fork failed. */ status = -1; else /* Parent side. */ { /* Note the system() is a cancellation point. But since we call waitpid() which itself is a cancellation point we do not have to do anything here. */ if (TEMP_FAILURE_RETRY (waitpid (pid, &status, 0)) != pid) status = -1; } CLEANUP_RESET; save = errno; DO_LOCK (); if ((SUB_REF () == 0 && (sigaction (SIGINT, &intr, (struct sigaction *) NULL) | sigaction (SIGQUIT, &quit, (struct sigaction *) NULL)) != 0) || sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL) != 0) { status = -1; } DO_UNLOCK (); return status; }
static void DAGExecutionThread(RF_ThreadArg_t arg) { RF_DagNode_t *nd, *local_nq, *term_nq, *fire_nq; RF_Raid_t *raidPtr; int ks; int s; raidPtr = (RF_Raid_t *) arg; #if RF_DEBUG_ENGINE if (rf_engineDebug) { printf("raid%d: Engine thread is running\n", raidPtr->raidid); } #endif s = splbio(); DO_LOCK(raidPtr); while (!raidPtr->shutdown_engine) { while (raidPtr->node_queue != NULL) { local_nq = raidPtr->node_queue; fire_nq = NULL; term_nq = NULL; raidPtr->node_queue = NULL; DO_UNLOCK(raidPtr); /* first, strip out the terminal nodes */ while (local_nq) { nd = local_nq; local_nq = local_nq->next; switch (nd->dagHdr->status) { case rf_enable: case rf_rollForward: if (nd->numSuccedents == 0) { /* end of the dag, add to * callback list */ nd->next = term_nq; term_nq = nd; } else { /* not the end, add to the * fire queue */ nd->next = fire_nq; fire_nq = nd; } break; case rf_rollBackward: if (nd->numAntecedents == 0) { /* end of the dag, add to the * callback list */ nd->next = term_nq; term_nq = nd; } else { /* not the end, add to the * fire queue */ nd->next = fire_nq; fire_nq = nd; } break; default: RF_PANIC(); break; } } /* execute callback of dags which have reached the * terminal node */ while (term_nq) { nd = term_nq; term_nq = term_nq->next; nd->next = NULL; (nd->dagHdr->cbFunc) (nd->dagHdr->cbArg); raidPtr->dags_in_flight--; /* debug only */ } /* fire remaining nodes */ FireNodeList(fire_nq); DO_LOCK(raidPtr); } while (!raidPtr->shutdown_engine && raidPtr->node_queue == NULL) { DO_WAIT(raidPtr); } } DO_UNLOCK(raidPtr); splx(s); kthread_exit(0); }
/* interrupt context: * for each succedent * propagate required results from node to succedent * increment succedent's numAntDone * place newly-enable nodes on node queue for firing * * To save context switches, we don't place NIL nodes on the node queue, * but rather just process them as if they had fired. Note that NIL nodes * that are the direct successors of the header will actually get fired by * DispatchDAG, which is fine because no context switches are involved. * * Important: when running at user level, this can be called by any * disk thread, and so the increment and check of the antecedent count * must be locked. I used the node queue mutex and locked down the * entire function, but this is certainly overkill. */ static void PropagateResults(RF_DagNode_t *node, int context) { RF_DagNode_t *s, *a; RF_Raid_t *raidPtr; int i, ks; RF_DagNode_t *finishlist = NULL; /* a list of NIL nodes to be * finished */ RF_DagNode_t *skiplist = NULL; /* list of nodes with failed truedata * antecedents */ RF_DagNode_t *firelist = NULL; /* a list of nodes to be fired */ RF_DagNode_t *q = NULL, *qh = NULL, *next; int j, skipNode; raidPtr = node->dagHdr->raidPtr; DO_LOCK(raidPtr); /* debug - validate fire counts */ for (i = 0; i < node->numAntecedents; i++) { a = *(node->antecedents + i); RF_ASSERT(a->numSuccFired >= a->numSuccDone); RF_ASSERT(a->numSuccFired <= a->numSuccedents); a->numSuccDone++; } switch (node->dagHdr->status) { case rf_enable: case rf_rollForward: for (i = 0; i < node->numSuccedents; i++) { s = *(node->succedents + i); RF_ASSERT(s->status == rf_wait); (s->numAntDone)++; if (s->numAntDone == s->numAntecedents) { /* look for NIL nodes */ if (s->doFunc == rf_NullNodeFunc) { /* don't fire NIL nodes, just process * them */ s->next = finishlist; finishlist = s; } else { /* look to see if the node is to be * skipped */ skipNode = RF_FALSE; for (j = 0; j < s->numAntecedents; j++) if ((s->antType[j] == rf_trueData) && (s->antecedents[j]->status == rf_bad)) skipNode = RF_TRUE; if (skipNode) { /* this node has one or more * failed true data * dependencies, so skip it */ s->next = skiplist; skiplist = s; } else /* add s to list of nodes (q) * to execute */ if (context != RF_INTR_CONTEXT) { /* we only have to * enqueue if we're at * intr context */ /* put node on a list to be fired after we unlock */ s->next = firelist; firelist = s; } else { /* enqueue the node for the dag exec thread to fire */ RF_ASSERT(NodeReady(s)); if (q) { q->next = s; q = s; } else { qh = q = s; qh->next = NULL; } } } } } if (q) { /* xfer our local list of nodes to the node queue */ q->next = raidPtr->node_queue; raidPtr->node_queue = qh; DO_SIGNAL(raidPtr); } DO_UNLOCK(raidPtr); for (; skiplist; skiplist = next) { next = skiplist->next; skiplist->status = rf_skipped; for (i = 0; i < skiplist->numAntecedents; i++) { skiplist->antecedents[i]->numSuccFired++; } if (skiplist->commitNode) { skiplist->dagHdr->numCommits++; } rf_FinishNode(skiplist, context); } for (; finishlist; finishlist = next) { /* NIL nodes: no need to fire them */ next = finishlist->next; finishlist->status = rf_good; for (i = 0; i < finishlist->numAntecedents; i++) { finishlist->antecedents[i]->numSuccFired++; } if (finishlist->commitNode) finishlist->dagHdr->numCommits++; /* * Okay, here we're calling rf_FinishNode() on * nodes that have the null function as their * work proc. Such a node could be the * terminal node in a DAG. If so, it will * cause the DAG to complete, which will in * turn free memory used by the DAG, which * includes the node in question. Thus, we * must avoid referencing the node at all * after calling rf_FinishNode() on it. */ rf_FinishNode(finishlist, context); /* recursive call */ } /* fire all nodes in firelist */ FireNodeList(firelist); break; case rf_rollBackward: for (i = 0; i < node->numAntecedents; i++) { a = *(node->antecedents + i); RF_ASSERT(a->status == rf_good); RF_ASSERT(a->numSuccDone <= a->numSuccedents); RF_ASSERT(a->numSuccDone <= a->numSuccFired); if (a->numSuccDone == a->numSuccFired) { if (a->undoFunc == rf_NullNodeFunc) { /* don't fire NIL nodes, just process * them */ a->next = finishlist; finishlist = a; } else { if (context != RF_INTR_CONTEXT) { /* we only have to enqueue if * we're at intr context */ /* put node on a list to be fired after we unlock */ a->next = firelist; firelist = a; } else { /* enqueue the node for the dag exec thread to fire */ RF_ASSERT(NodeReady(a)); if (q) { q->next = a; q = a; } else { qh = q = a; qh->next = NULL; } } } } } if (q) { /* xfer our local list of nodes to the node queue */ q->next = raidPtr->node_queue; raidPtr->node_queue = qh; DO_SIGNAL(raidPtr); } DO_UNLOCK(raidPtr); for (; finishlist; finishlist = next) { /* NIL nodes: no need to fire them */ next = finishlist->next; finishlist->status = rf_good; /* * Okay, here we're calling rf_FinishNode() on * nodes that have the null function as their * work proc. Such a node could be the first * node in a DAG. If so, it will cause the DAG * to complete, which will in turn free memory * used by the DAG, which includes the node in * question. Thus, we must avoid referencing * the node at all after calling * rf_FinishNode() on it. */ rf_FinishNode(finishlist, context); /* recursive call */ } /* fire all nodes in firelist */ FireNodeList(firelist); break; default: printf("Engine found illegal DAG status in PropagateResults()\n"); RF_PANIC(); break; } }