void NaClFileLockManagerUnlock(struct NaClFileLockManager *self,
                               int desc) {
  struct NaClFileLockEntry key;
  struct NaClFileLockEntry **existing;
  struct NaClFileLockEntry *entry;

  (*self->set_file_identity_data)(&key, desc);

  NaClXMutexLock(&self->mu);
  existing = NaClFileLockManagerFindEntryMu(self, &key);
  CHECK(NULL != existing);
  entry = *existing;
  NaClXMutexLock(&entry->mu);
  entry->holding_lock = 0;
  if (0 == entry->num_waiting) {
    *existing = entry->next;
    NaClXMutexUnlock(&entry->mu);
    NaClXMutexUnlock(&self->mu);
    NaClFileLockManagerFileEntryRecycler(&entry);
  } else {
    NaClXMutexUnlock(&self->mu);
    /* tell waiting threads that they can now compete for the lock */
    NaClXCondVarBroadcast(&entry->cv);
    NaClXMutexUnlock(&entry->mu);
  }
  (*self->drop_file_lock)(desc);
}
void NaClFileLockManagerLock(struct NaClFileLockManager *self,
                             int desc) {
  struct NaClFileLockEntry key;
  struct NaClFileLockEntry **existing;
  struct NaClFileLockEntry *entry;

  (*self->set_file_identity_data)(&key, desc);

  NaClXMutexLock(&self->mu);
  existing = NaClFileLockManagerFindEntryMu(self, &key);
  if (NULL == existing) {
    /* make new entry */
    entry = NaClFileLockManagerEntryFactory(self, desc);
    entry->next = self->head;
    self->head = entry;
    NaClXMutexUnlock(&self->mu);
  } else {
    entry = *existing;
    NaClXMutexLock(&entry->mu);
    entry->num_waiting++;
    /* arithmetic overflow */
    CHECK(0 != entry->num_waiting);
    /* drop container lock after ensuring that the entry will not be deleted */
    NaClXMutexUnlock(&self->mu);
    while (entry->holding_lock) {
      NaClXCondVarWait(&entry->cv, &entry->mu);
    }
    entry->holding_lock = 1;
    entry->num_waiting--;
    NaClXMutexUnlock(&entry->mu);
  }
  (*self->take_file_lock)(desc);
}
int NaClGetTimeOfDayIntern(struct nacl_abi_timeval *tv,
                           struct NaClTimeState    *ntsp) {
  FILETIME  ft_now;
  DWORD     ms_counter_now;
  uint64_t  t_ms;
  DWORD     ms_counter_at_ft_now;
  uint32_t  ms_counter_diff;
  uint64_t  unix_time_ms;

  if (ntsp->can_use_qpc)
    return NaClGetTimeOfDayInternQpc(tv, ntsp, 1);

  GetSystemTimeAsFileTime(&ft_now);
  ms_counter_now = timeGetTime();
  t_ms = NaClFileTimeToMs(&ft_now);

  NaClXMutexLock(&ntsp->mu);

  if (!ntsp->allow_low_resolution) {
    NaClLog(5, "ms_counter_now       %"NACL_PRIu32"\n",
            (uint32_t) ms_counter_now);
    NaClLog(5, "t_ms                 %"NACL_PRId64"\n", t_ms);
    NaClLog(5, "system_time_start_ms %"NACL_PRIu64"\n",
            ntsp->system_time_start_ms);

    ms_counter_at_ft_now = (DWORD)
        (ntsp->ms_counter_start +
         (uint32_t) (t_ms - ntsp->system_time_start_ms));

    NaClLog(5, "ms_counter_at_ft_now %"NACL_PRIu32"\n",
            (uint32_t) ms_counter_at_ft_now);

    ms_counter_diff = ms_counter_now - (uint32_t) ms_counter_at_ft_now;

    NaClLog(5, "ms_counter_diff      %"NACL_PRIu32"\n", ms_counter_diff);

    if (ms_counter_diff <= kMaxMillsecondDriftBeforeRecalibration) {
      t_ms = t_ms + ms_counter_diff;
    } else {
      NaClCalibrateWindowsClockMu(ntsp);
      t_ms = ntsp->system_time_start_ms;
    }

    NaClLog(5, "adjusted t_ms =      %"NACL_PRIu64"\n", t_ms);
  }

  unix_time_ms = t_ms - ntsp->epoch_start_ms;

  NaClXMutexUnlock(&ntsp->mu);

  NaClLog(5, "unix_time_ms  =      %"NACL_PRId64"\n", unix_time_ms);
  /*
   * Unix time is measured relative to a different epoch, Jan 1, 1970.
   * See the module initialization for epoch_start_ms.
   */

  tv->nacl_abi_tv_sec = (nacl_abi_time_t) (unix_time_ms / 1000);
  tv->nacl_abi_tv_usec = (nacl_abi_suseconds_t) ((unix_time_ms % 1000) * 1000);
  return 0;
}
示例#4
0
/*
 * Mark a selector as available for future reuse.
 */
void NaClLdtDeleteSelector(uint16_t selector) {
    int retval;
    union {
        struct LdtEntry entry;
        DWORD dwords[2];
    } u;
    retval = 0;
    u.entry.base_00to15 = 0;
    u.entry.base_16to23 = 0;
    u.entry.base_24to31 = 0;
    u.entry.limit_00to15 = 0;
    u.entry.limit_16to19 = 0;
    u.entry.type = 0x10;
    u.entry.descriptor_privilege = 3;
    u.entry.present = 0;
    u.entry.available = 0;
    u.entry.code_64_bit = 0;
    u.entry.op_size_32 = 1;
    u.entry.granularity = 1;

    NaClXMutexLock(&nacl_ldt_mutex);
    if (NULL != set_ldt_entries) {
        retval = (*set_ldt_entries)(selector, u.dwords[0], u.dwords[1], 0, 0, 0);
    }

    if ((NULL == set_ldt_entries) || (0 != retval)) {
        LdtInfo info;
        info.byte_offset = selector & ~0x7;
        info.size = sizeof(struct LdtEntry);
        info.entries[0] = u.entry;
        retval = (*set_information_process)((HANDLE)-1, 10, (void*)&info, 16);
    }
    NaClXMutexUnlock(&nacl_ldt_mutex);
}
示例#5
0
int NaClNameServiceDeleteName(struct NaClNameService *nnsp,
                              char const             *name) {
  struct NaClNameServiceEntry **nnsepp;
  struct NaClNameServiceEntry *to_free = NULL;
  int                         status = NACL_NAME_SERVICE_NAME_NOT_FOUND;

  NaClXMutexLock(&nnsp->mu);
  nnsepp = NameServiceSearch(&nnsp->head, name);
  if (NULL != *nnsepp) {
    to_free = *nnsepp;
    *nnsepp = to_free->next;
    status = NACL_NAME_SERVICE_SUCCESS;
  }
  NaClXMutexUnlock(&nnsp->mu);

  /* do the free operations w/o holding the lock */
  if (NULL != to_free) {
    NaClDescSafeUnref(to_free->entry);
    if (NULL != to_free->factory) {
      (void) (*to_free->factory)(to_free->state,
                                 to_free->name,
                                 0,
                                 (struct NaClDesc **) NULL);
    }
    free((void *) to_free->name);
    free(to_free);
  }
  return status;
}
struct NaClDescInvalid const *NaClDescInvalidMake(void) {
  NaClXMutexLock(mutex);
  if (NULL == singleton) {
    do {
      /* Allocate an instance. */
      singleton = (struct NaClDescInvalid *) malloc(sizeof(*singleton));
      if (NULL == singleton) {
        break;
      }
      /* Do the base class construction. */
      if (!NaClDescCtor(&(singleton->base))) {
        free(singleton);
        singleton = NULL;
        break;
      }
      /* Construct the derived class (simply set the vtbl). */
      singleton->base.base.vtbl =
          (struct NaClRefCountVtbl const *) &kNaClDescInvalidVtbl;
    } while (0);
  }
  NaClXMutexUnlock(mutex);
  /* If we reached this point and still have NULL == singleton there was an
   * error in allocation or construction.  Return NULL to indicate error.
   */
  if (NULL == singleton) {
    return NULL;
  }

  return (struct NaClDescInvalid *) NaClDescRef(&(singleton->base));
}
示例#7
0
uint32_t NaClGlobalSecureRngUint32(void) {
    uint32_t rv;
    NaClXMutexLock(&nacl_global_rng_mu);
    rv = (*nacl_grngp->base.vtbl->GenUint32)(&nacl_grngp->base);
    NaClXMutexUnlock(&nacl_global_rng_mu);
    return rv;
}
int NaClWaitForMainThreadToExit(struct NaClApp  *nap) {
  struct NaClClosure        *work;

  while (NULL != (work = NaClSyncQueueDequeue(&nap->work_queue))) {
    NaClLog(3, "NaClWaitForMainThreadToExit: got work %08"NACL_PRIxPTR"\n",
            (uintptr_t) work);
    NaClLog(3, " invoking Run fn %08"NACL_PRIxPTR"\n",
            (uintptr_t) work->vtbl->Run);

    (*work->vtbl->Run)(work);
    NaClLog(3, "... done\n");
  }

  NaClLog(3, " taking NaClApp lock\n");
  NaClXMutexLock(&nap->mu);
  NaClLog(3, " waiting for exit status\n");
  while (nap->running) {
    NaClCondVarWait(&nap->cv, &nap->mu);
    NaClLog(3, " wakeup, nap->running %d, nap->exit_status %d\n",
            nap->running, nap->exit_status);
  }
  NaClXMutexUnlock(&nap->mu);
  /*
   * Some thread invoked the exit (exit_group) syscall.
   */

  NaClDebugStop(nap->exit_status);

  return (nap->exit_status);
}
示例#9
0
int NaClClockGetTime(nacl_clockid_t           clk_id,
                     struct nacl_abi_timespec *tp) {
  int                     rv = -NACL_ABI_EINVAL;
  struct nacl_abi_timeval tv;
  uint64_t                t_mono_prev_us;
  uint64_t                t_mono_cur_us;

  if (!g_NaClClock_is_initialized) {
    NaClLog(LOG_FATAL,
            "NaClClockGetTime invoked without successful NaClClockInit\n");
  }
  switch (clk_id) {
    case NACL_CLOCK_REALTIME:
      rv = NaClGetTimeOfDay(&tv);
      if (0 == rv) {
        tp->tv_sec = tv.nacl_abi_tv_sec;
        tp->tv_nsec = tv.nacl_abi_tv_usec * 1000;
      }
      break;
    case NACL_CLOCK_MONOTONIC:
      /*
       * Get real time, compare with last monotonic time.  If later
       * than last monotonic time, set last monotonic time to real
       * time timestamp; otherwise we leave last monotonoic time
       * alone.  In either case, return last monotonic time.
       *
       * The interpretation used here is that "monotonic" means
       * monotonic non-decreasing, as opposed to monotonic increasing.
       * We don't assume that GetTimeOfDay only yields high-order bits
       * so we can replace low-order bits of the time value with a
       * counter to fake monotonicity.  We are dangerously close to
       * the resolution limit of 1ns imposed by the timespec structure
       * already -- it's only a few Moore's Law generations away where
       * we may have to return the same time stamp for repeated calls
       * to clock_gettime (if CPU frequency clock is continued to be
       * used to drive performance counters; RTDSC is moving to a
       * fixed rate [constant_tsc], fortunately).
       */
      rv = NaClGetTimeOfDay(&tv);
      if (0 == rv) {
        NaClXMutexLock(&g_nacl_clock_mu);
        t_mono_prev_us = g_nacl_clock_tv.nacl_abi_tv_sec * 1000000
            + g_nacl_clock_tv.nacl_abi_tv_usec;
        t_mono_cur_us  = tv.nacl_abi_tv_sec * 1000000
            + tv.nacl_abi_tv_usec;
        if (t_mono_cur_us >= t_mono_cur_us) {
          g_nacl_clock_tv = tv;
        }
        tp->tv_sec = g_nacl_clock_tv.nacl_abi_tv_sec + MAGIC_OFFSET;
        tp->tv_nsec = g_nacl_clock_tv.nacl_abi_tv_usec * 1000;
        NaClXMutexUnlock(&g_nacl_clock_mu);
        rv = 0;
      }
      break;
    case NACL_CLOCK_PROCESS_CPUTIME_ID:
    case NACL_CLOCK_THREAD_CPUTIME_ID:
      break;
  }
  return rv;
}
void TestAbsWait(void *arg) {
  uint64_t                  sleep_usec;
  struct nacl_abi_timeval   now;
  struct nacl_abi_timespec  t;

  sleep_usec = ((struct TestFunctorArg *) arg)->sleep_usec;
  (void) NaClGetTimeOfDay(&now);
  t.tv_sec = (nacl_abi_time_t) (now.nacl_abi_tv_sec + sleep_usec / kMicroXinX);
  t.tv_nsec = (long int) (kNanoXinMicroX * (now.nacl_abi_tv_usec
                                            + (sleep_usec % kMicroXinX)));
  while (t.tv_nsec > kNanoXinX) {
    t.tv_nsec -= kNanoXinX;
    ++t.tv_sec;
  }
  if (gVerbosity > 1) {
    printf("TestAbsWait: locking\n");
  }
  NaClXMutexLock(&gMu);
  if (gVerbosity > 1) {
    printf("TestAbsWait: waiting\n");
  }
  NaClXCondVarTimedWaitAbsolute(&gCv, &gMu, &t);
  if (gVerbosity > 1) {
    printf("TestAbsWait: unlocking\n");
  }
  NaClXMutexUnlock(&gMu);
}
示例#11
0
void NaClManifestProxyConnectionRevHandleConnect(
    struct NaClManifestProxyConnection  *self,
    struct NaClDesc                     *rev) {
  NaClLog(4, "Entered NaClManifestProxyConnectionRevHandleConnect\n");
  NaClXMutexLock(&self->mu);
  if (self->channel_initialized) {
    NaClLog(LOG_FATAL,
            "NaClManifestProxyConnectionRevHandleConnect: double connect?\n");
  }
  /*
   * If NaClSrpcClientCtor proves to take too long, we should spin off
   * another thread to do the initialization so that the reverse
   * client can accept additional reverse channels.
   */
  NaClLog(4,
          "NaClManifestProxyConnectionRevHandleConnect: Creating SrpcClient\n");
  if (NaClSrpcClientCtor(&self->client_channel, rev)) {
    NaClLog(4,
            ("NaClManifestProxyConnectionRevHandleConnect: SrpcClientCtor"
             " succeded, announcing.\n"));
    self->channel_initialized = 1;
    NaClXCondVarBroadcast(&self->cv);
    /* ownership of rev taken */
  } else {
    NaClLog(4,
            ("NaClManifestProxyConnectionRevHandleConnect: NaClSrpcClientCtor"
             " failed\n"));
  }
  NaClXMutexUnlock(&self->mu);
  NaClLog(4, "Leaving NaClManifestProxyConnectionRevHandleConnect\n");
}
int NaClReverseHostInterfaceReportExitStatus(
    struct NaClRuntimeHostInterface *vself,
    int                             exit_status) {
  struct NaClReverseHostInterface *self =
      (struct NaClReverseHostInterface *) vself;
  NaClSrpcError           rpc_result;
  int                     status = 0;

  NaClLog(3,
          "NaClReverseHostInterfaceReportExitStatus:"
          " self 0x%08"NACL_PRIxPTR", exit_status 0x%x)\n",
          (uintptr_t) self, exit_status);

  NaClXMutexLock(&self->server->mu);
  if (NACL_REVERSE_CHANNEL_INITIALIZED ==
      self->server->reverse_channel_initialization_state) {
    rpc_result = NaClSrpcInvokeBySignature(&self->server->reverse_channel,
                                           NACL_REVERSE_CONTROL_REPORT_STATUS,
                                           exit_status);
    if (NACL_SRPC_RESULT_OK != rpc_result) {
      NaClLog(LOG_FATAL, "NaClReverseHostInterfaceReportExitStatus:"
              " RPC failed, result %d\n",
              rpc_result);
    }
  } else {
    NaClLog(4, "NaClReverseHostInterfaceReportExitStatus: no reverse channel"
            ", no plugin to talk to.\n");
    status = -NACL_ABI_ENODEV;
  }
  NaClXMutexUnlock(&self->server->mu);
  return status;
}
int NaClReverseHostInterfaceStartupInitializationComplete(
    struct NaClRuntimeHostInterface *vself) {
  struct NaClReverseHostInterface *self =
      (struct NaClReverseHostInterface *) vself;
  NaClSrpcError           rpc_result;
  int                     status = 0;

  NaClLog(3,
          ("NaClReverseHostInterfaceStartupInitializationComplete(0x%08"
           NACL_PRIxPTR")\n"),
          (uintptr_t) self);

  NaClXMutexLock(&self->server->mu);
  if (NACL_REVERSE_CHANNEL_INITIALIZED ==
      self->server->reverse_channel_initialization_state) {
    rpc_result = NaClSrpcInvokeBySignature(&self->server->reverse_channel,
                                           NACL_REVERSE_CONTROL_INIT_DONE);
    if (NACL_SRPC_RESULT_OK != rpc_result) {
      NaClLog(LOG_FATAL,
              "NaClReverseHostInterfaceStartupInitializationComplete:"
              " RPC failed, result %d\n",
              rpc_result);
    }
  } else {
    NaClLog(4, "NaClReverseHostInterfaceStartupInitializationComplete:"
            " no reverse channel, no plugin to talk to.\n");
    status = -NACL_ABI_ENODEV;
  }
  NaClXMutexUnlock(&self->server->mu);
  return status;
}
示例#14
0
static void NaClManifestProxyConnectionDtor(struct NaClRefCount *vself) {
  struct NaClManifestProxyConnection *self =
      (struct NaClManifestProxyConnection *) vself;
  NaClLog(4,
          "Entered NaClManifestProxyConnectionDtor: self 0x%"NACL_PRIxPTR"\n",
          (uintptr_t) self);
  NaClXMutexLock(&self->mu);
  while (!self->channel_initialized) {
    NaClLog(4,
            "NaClManifestProxyConnectionDtor:"
            " waiting for connection initialization\n");
    NaClXCondVarWait(&self->cv, &self->mu);
  }
  NaClXMutexUnlock(&self->mu);

  NaClLog(4, "NaClManifestProxyConnectionDtor: dtoring\n");

  NaClCondVarDtor(&self->cv);
  NaClMutexDtor(&self->mu);

  NaClSrpcDtor(&self->client_channel);
  NACL_VTBL(NaClSimpleServiceConnection, self) =
      &kNaClSimpleServiceConnectionVtbl;
  (*NACL_VTBL(NaClRefCount, self)->Dtor)(vself);
}
示例#15
0
int NaClMinimumThreadGeneration(struct NaClApp *nap) {
  size_t index;
  int rv = INT_MAX;
  NaClXMutexLock(&nap->threads_mu);
  for (index = 0; index < nap->threads.num_entries; ++index) {
    struct NaClAppThread *thread = NaClGetThreadMu(nap, (int) index);
    if (thread != NULL) {
      NaClXMutexLock(&thread->mu);
      if (rv > thread->dynamic_delete_generation) {
        rv = thread->dynamic_delete_generation;
      }
      NaClXMutexUnlock(&thread->mu);
    }
  }
  NaClXMutexUnlock(&nap->threads_mu);
  return rv;
}
示例#16
0
static void PrintVmmap(struct NaClApp  *nap) {
  printf("In PrintVmmap\n");
  fflush(stdout);
  NaClXMutexLock(&nap->mu);
  NaClVmmapVisit(&nap->mem_map, VmentryPrinter, (void *) 0);

  NaClXMutexUnlock(&nap->mu);
}
/*
 * This spins until any previous NaClAppThread has exited to the point
 * where it is removed from the thread array, so that it will not be
 * encountered by a subsequent call to GetOnlyThread().  This is
 * necessary because the threads hosting NaClAppThreads are unjoined.
 */
static void WaitForThreadToExitFully(struct NaClApp *nap) {
  int done;
  do {
    NaClXMutexLock(&nap->threads_mu);
    done = (nap->num_threads == 0);
    NaClXMutexUnlock(&nap->threads_mu);
  } while (!done);
}
void NaClUntrustedThreadResume(struct NaClAppThread *natp) {
  if (natp->suspend_state == NACL_APP_THREAD_UNTRUSTED) {
    if (ResumeThread(GetHostThreadHandle(natp)) == (DWORD) -1) {
      NaClLog(LOG_FATAL, "NaClUntrustedThreadResume: "
              "ResumeThread() call failed\n");
    }
  }
  NaClXMutexUnlock(&natp->suspend_mu);
}
void NaClUntrustedThreadResume(struct NaClAppThread *natp) {
  if (natp->suspend_state == NACL_APP_THREAD_UNTRUSTED) {
    kern_return_t result = thread_resume(GetHostThreadPort(natp));
    if (result != KERN_SUCCESS) {
      NaClLog(LOG_FATAL, "NaClUntrustedThreadResume: "
              "thread_resume() call failed: error %d\n", (int) result);
    }
  }
  NaClXMutexUnlock(&natp->suspend_mu);
}
/*
 * Reset the interruptible mutex, presumably after the condition
 * causing the interrupt has been cleared.  In our case, this would be
 * an E_MOVE_ADDRESS_SPACE induced address space move.
 *
 * This is safe to invoke only after all threads are known to be in a
 * quiescent state -- i.e., will no longer call
 * NaClIntrMutex{Try,}Lock on the interruptible mutex -- since there
 * is no guarntee that all the threads awaken by NaClIntrMutexIntr
 * have actually been run yet.
 */
void NaClIntrMutexReset(struct NaClIntrMutex *mp) {
  NaClXMutexLock(&mp->mu);
  if (NACL_INTR_LOCK_INTERRUPTED != mp->lock_state) {
    NaClLog(LOG_FATAL,
            "NaClIntrMutexReset: lock at 0x%08"NACL_PRIxPTR" not interrupted\n",
            (uintptr_t) mp);
  }
  mp->lock_state = NACL_INTR_LOCK_FREE;
  NaClXMutexUnlock(&mp->mu);
}
示例#21
0
struct NaClDesc *NaClDescRef(struct NaClDesc *ndp) {
    NaClLog(4, "NaClDescRef(0x%08"NACL_PRIxPTR").\n",
            (uintptr_t) ndp);
    NaClXMutexLock(&ndp->mu);
    if (0 == ++ndp->ref_count) {
        NaClLog(LOG_FATAL, "NaClDescRef integer overflow\n");
    }
    NaClXMutexUnlock(&ndp->mu);
    return ndp;
}
static ssize_t NaClStreamDirents(struct NaClHostDir *d,
                                 void               *buf,
                                 size_t             len) {
  ssize_t retval;
  size_t  xferred = 0;
  ssize_t entry_size;

  NaClXMutexLock(&d->mu);
  while (len > 0) {
    NaClLog(4, "NaClStreamDirents: loop, xferred = %"NACL_PRIuS"\n", xferred);
    entry_size = NaClCopyDirent(d, buf, len);
    if (0 == entry_size) {
      CHECK(d->cur_byte == d->nbytes);
      retval = getdents(d->fd,
                        (struct dirent *) d->dirent_buf,
                        sizeof d->dirent_buf);
      if (-1 == retval) {
        if (xferred > 0) {
          /* next time through, we'll pick up the error again */
          goto cleanup;
        } else {
          xferred = -NaClXlateErrno(errno);
          goto cleanup;
        }
      } else if (0 == retval) {
        goto cleanup;
      }
      d->cur_byte = 0;
      d->nbytes = retval;
    } else if (entry_size < 0) {
      /*
       * The only error return from NaClCopyDirent is NACL_ABI_EINVAL
       * due to destinaton buffer too small for the current entry.  If
       * we had copied some entries before, we were successful;
       * otherwise report that the buffer is too small for the next
       * directory entry.
       */
      if (xferred > 0) {
        goto cleanup;
      } else {
        xferred = entry_size;
        goto cleanup;
      }
    }
    /* entry_size > 0, maybe copy another */
    buf = (void *) ((char *) buf + entry_size);
    CHECK(len >= (size_t) entry_size);
    len -= entry_size;
    xferred += entry_size;
  }
  /* perfect fit! */
 cleanup:
  NaClXMutexUnlock(&d->mu);
  return xferred;
}
示例#23
0
int NaClFaultInjectionFaultP(char const *site_name) {
  int                                 rv;
  struct NaClFaultInjectInfo const    *entry = NULL;
  size_t                              ix;
  struct NaClFaultInjectCallSiteCount *counter;
  struct NaClFaultExpr                *expr;

  for (ix = 0; ix < gNaClNumFaultInjectInfo; ++ix) {
    if (!strcmp(site_name, gNaClFaultInjectInfo[ix].call_site_name)) {
      NaClLog(6, "NaClFaultInject: found %s\n", site_name);
      break;
    }
  }
  if (ix == gNaClNumFaultInjectInfo) {
    return 0;
  }
  entry = &gNaClFaultInjectInfo[ix];
  if (entry->thread_specific_p) {
    NaClLog(6, "NaClFaultInject: thread-specific counter\n");
    counter = NaClFaultInjectFindThreadCounter(ix);
  } else {
    NaClLog(6, "NaClFaultInject: global counter\n");
    NaClXMutexLock(&gNaClFaultInjectMu[ix]);
    counter = &gNaClFaultInjectCallSites[ix];
  }
  /*
   * check counter against entry, and if a fault should be injected,
   * set Value for NaClFaultInjectionValue and set return value to
   * true; otherwise set return value false.  bump counter.
   */
  NaClLog(6, "NaClFaultInject: counter(%"NACL_PRIxS",%"NACL_PRIxS")\n",
          counter->expr_ix, counter->count_in_expr);
  if (counter->expr_ix >= entry->num_expr) {
    rv = 0;
  } else {
    expr = &entry->expr[counter->expr_ix];
    if (expr->pass) {
      rv = 0;
    } else {
      NaClLog(6, "NaClFaultInject: should fail, value %"NACL_PRIxPTR"\n",
              expr->fault_value);
      rv = 1;
      NaClFaultInjectionSetValue(expr->fault_value);
    }
    /* bump counter, possibly carry */
    if (++counter->count_in_expr >= expr->count) {
      counter->count_in_expr = 0;
      ++counter->expr_ix;
    }
  }
  if (!entry->thread_specific_p) {
    NaClXMutexUnlock(&gNaClFaultInjectMu[ix]);
  }
  return rv;
}
void NaClReverseServiceThreadCountIncr(
    struct NaClReverseService *self) {
  NaClLog(5, "NaClReverseServiceThreadCountIncr\n");
  NaClXMutexLock(&self->mu);
  if (0 == ++self->thread_count) {
    NaClLog(LOG_FATAL,
            "NaClReverseServiceThreadCountIncr: "
            "thread count overflow!\n");
  }
  NaClXMutexUnlock(&self->mu);
}
示例#25
0
void NaClReverseServiceWaitForServiceThreadsToExit(
    struct NaClReverseService *self) {
  NaClLog(4, "NaClReverseServiceWaitForServiceThreadsToExit\n");
  NaClXMutexLock(&self->mu);
  while (0 != self->thread_count) {
    NaClXCondVarWait(&self->cv, &self->mu);
    NaClLog(5, "NaClReverseServiceWaitForServiceThreadsToExit: woke up\n");
  }
  NaClXMutexUnlock(&self->mu);
  NaClLog(4, "NaClReverseServiceWaitForServiceThreadsToExit: all done\n");
}
void NaClIntrMutexIntr(struct NaClIntrMutex *mp) {
  NaClXMutexLock(&mp->mu);
  if (NACL_INTR_LOCK_HELD == mp->lock_state) {
    /* potentially there are threads waiting for this thread */
    mp->lock_state = NACL_INTR_LOCK_INTERRUPTED;
    NaClXCondVarBroadcast(&mp->cv);
  } else {
    mp->lock_state = NACL_INTR_LOCK_INTERRUPTED;
  }
  NaClXMutexUnlock(&mp->mu);
}
int NaClReportExitStatus(struct NaClApp *nap, int exit_status) {
  NaClXMutexLock(&nap->mu);
  /*
   * If several threads are exiting/reporting signals at once, we should
   * let only one thread to pass through. This way we can use exit code
   * without synchronization once we know that running==0.
   */
  if (!nap->running) {
    NaClXMutexUnlock(&nap->mu);
    return 0;
  }

  nap->exit_status = exit_status;
  nap->running = 0;
  NaClXCondVarSignal(&nap->cv);

  NaClXMutexUnlock(&nap->mu);

  return 0;
}
示例#28
0
void NaClUntrustedThreadsResumeAll(struct NaClApp *nap) {
  size_t index;
  for (index = 0; index < nap->threads.num_entries; index++) {
    struct NaClAppThread *natp = NaClGetThreadMu(nap, (int) index);
    if (natp != NULL) {
      NaClUntrustedThreadResume(natp);
    }
  }

  NaClXMutexUnlock(&nap->threads_mu);
}
示例#29
0
void NaClTlsFree(struct NaClAppThread *natp) {
  uint32_t idx = NaClGetThreadIdx(natp);
  NaClLog(2, "NaClTlsFree: old idx %d\n", idx);

  NaClXMutexLock(&gNaClTlsMu);
  gNaClThreadIdxInUse[idx] = 0;
  NaClXMutexUnlock(&gNaClTlsMu);

  natp->user.r9 = 0;
  natp->user.guard_token = 0;
}
示例#30
0
void NaClSetThreadGeneration(struct NaClAppThread *natp, int generation) {
  /*
   * outer check handles fast case (no change)
   * since threads only set their own generation it is safe
   */
  if (natp->dynamic_delete_generation != generation)  {
    NaClXMutexLock(&natp->mu);
    CHECK(natp->dynamic_delete_generation <= generation);
    natp->dynamic_delete_generation = generation;
    NaClXMutexUnlock(&natp->mu);
  }
}