void WINAPI ThreadMain(void *personality) {
  int       thread_num = (int) (uintptr_t) personality;
  uint64_t  sleep_count;
  int       got_sem = 0;
  int       failed = 0;

  for (sleep_count = 0; sleep_count < gNumTriesSufficient; ++sleep_count) {
    /* the sem_trywait should not succeed the first time through */
    if (NACL_SYNC_BUSY != NaClSemTryWait(&gSem)) {
      got_sem = 1;
      break;
    }
    if (0 == sleep_count) {
      NaClMutexLock(&gMu);
      ++gNumThreadsTried;
      NaClMutexUnlock(&gMu);
      NaClCondVarSignal(&gCv);
    }
    PauseSpinningThread();
  }

  if (got_sem) {
    printf("Thread %d: NaClSemTryWait succeeded at %"NACL_PRId64"\n",
           thread_num,
           sleep_count);
  } else {
    /* gNumThreadsTried == sleep_count */
    printf("Thread %d: NaClSemWait\n", thread_num);
    if (NACL_SYNC_OK != NaClSemWait(&gSem)) {
      printf("FAILED\n");
      printf("NaClSemWait failed!?!\n");
      failed = 1;
    }
  }

  if (0 == sleep_count) {
    printf("FAILED\n");
    printf("Thread %d never actually waited at NaClSemTryWait\n", thread_num);
    failed = 1;
  } else {
    printf("OK -- thread %d\n", thread_num);
  }

  NaClMutexLock(&gMu);
  gFailure += failed;
  ++gNumThreadsDone;
  NaClMutexUnlock(&gMu);
  NaClCondVarSignal(&gCv);
}
/*
 * Mark a selector as available for future reuse.
 */
void NaClLdtDeleteSelector(uint16_t selector) {
  int retval;
  union {
    struct LdtEntry entry;
    DWORD dwords[2];
  } u;
  retval = 0;
  u.entry.base_00to15 = 0;
  u.entry.base_16to23 = 0;
  u.entry.base_24to31 = 0;
  u.entry.limit_00to15 = 0;
  u.entry.limit_16to19 = 0;
  u.entry.type = 0x10;
  u.entry.descriptor_privilege = 3;
  u.entry.present = 0;
  u.entry.available = 0;
  u.entry.code_64_bit = 0;
  u.entry.op_size_32 = 1;
  u.entry.granularity = 1;

  NaClMutexLock(&nacl_ldt_mutex);
  if (NULL != set_ldt_entries) {
    retval = (*set_ldt_entries)(selector, u.dwords[0], u.dwords[1], 0, 0, 0);
  }

  if ((NULL == set_ldt_entries) || (0 != retval)) {
    LdtInfo info;
    info.byte_offset = selector & ~0x7;
    info.size = sizeof(struct LdtEntry);
    info.entries[0] = u.entry;
    retval = (*set_information_process)((HANDLE)-1, 10, (void*)&info, 16);
  }
  NaClMutexUnlock(&nacl_ldt_mutex);
}
Exemple #3
0
void NaClXMutexLock(struct NaClMutex *mp) {
  NaClSyncStatus  status;

  if (NACL_SYNC_OK == (status = NaClMutexLock(mp))) {
    return;
  }
  NaClLog(LOG_FATAL, "NaClMutexLock returned %d\n", status);
}
int NaClGetTimeOfDayIntern(struct nacl_abi_timeval *tv,
                           struct NaClTimeState    *ntsp) {
  FILETIME  ft_now;
  DWORD     ms_counter_now;
  uint64_t  t_ms;
  DWORD     ms_counter_at_ft_now;
  uint32_t  ms_counter_diff;
  uint64_t  unix_time_ms;

  GetSystemTimeAsFileTime(&ft_now);
  ms_counter_now = timeGetTime();
  t_ms = NaClFileTimeToMs(&ft_now);

  NaClMutexLock(&ntsp->mu);

  NaClLog(5, "ms_counter_now       %"NACL_PRIu32"\n",
          (uint32_t) ms_counter_now);
  NaClLog(5, "t_ms                 %"NACL_PRId64"\n", t_ms);
  NaClLog(5, "system_time_start_ms %"NACL_PRIu64"\n",
          ntsp->system_time_start_ms);

  ms_counter_at_ft_now = (DWORD)
      (ntsp->ms_counter_start +
       (uint32_t) (t_ms - ntsp->system_time_start_ms));

  NaClLog(5, "ms_counter_at_ft_now %"NACL_PRIu32"\n",
          (uint32_t) ms_counter_at_ft_now);

  ms_counter_diff = ms_counter_now - (uint32_t) ms_counter_at_ft_now;

  NaClLog(5, "ms_counter_diff      %"NACL_PRIu32"\n", ms_counter_diff);

  if (ms_counter_diff <= kMaxMillsecondDriftBeforeRecalibration) {
    t_ms = t_ms + ms_counter_diff;
  } else {
    NaClCalibrateWindowsClockMu(ntsp);
    t_ms = ntsp->system_time_start_ms;
  }

  NaClLog(5, "adjusted t_ms =      %"NACL_PRIu64"\n", t_ms);

  unix_time_ms = t_ms - ntsp->epoch_start_ms;

  /*
   * Time is monotonically non-decreasing.
   */
  if (unix_time_ms < ntsp->last_reported_time_ms) {
    unix_time_ms = ntsp->last_reported_time_ms;
  } else {
    ntsp->last_reported_time_ms = unix_time_ms;
  }

  NaClMutexUnlock(&ntsp->mu);

  NaClLog(5, "unix_time_ms  =      %"NACL_PRId64"\n", unix_time_ms);
  /*
   * Unix time is measured relative to a different epoch, Jan 1, 1970.
   * See the module initialization for epoch_start_ms.
   */

  tv->nacl_abi_tv_sec = (nacl_abi_time_t) (unix_time_ms / 1000);
  tv->nacl_abi_tv_usec = (nacl_abi_suseconds_t) ((unix_time_ms % 1000) * 1000);

  NaClLog(5, "nacl_avi_tv_sec =    %"NACL_PRIdNACL_TIME"\n",
          tv->nacl_abi_tv_sec);
  NaClLog(5, "nacl_avi_tv_usec =   %"NACL_PRId32"\n", tv->nacl_abi_tv_usec);

  return 0;
}
int main(int ac, char **av) {
  int exit_status = -1;
  int opt;
  size_t num_threads = 16;
  size_t n;
  struct NaClThread thr;

  while (EOF != (opt = getopt(ac, av, "n:s:t:"))) {
    switch (opt) {
      case 'n':
        num_threads = strtoul(optarg, (char **) NULL, 0);
        break;
      case 't':
        gNumTriesSufficient = strtoul(optarg, (char **) NULL, 0);
        break;
      default:
        fprintf(stderr,
                "Usage: nacl_semaphore_test [args]\n"
                "  -n n   number of threads used to test semaphore\n"
                "  -t n   number of TryWait operations before blocking Try\n");
        goto cleanup0;
    }
  }

  NaClPlatformInit();

  if (!NaClSemCtor(&gSem, 0)) {
    fprintf(stderr, "nacl_semaphore_test: NaClSemCtor failed!\n");
    goto cleanup1;
  }
  if (!NaClMutexCtor(&gMu)) {
    fprintf(stderr, "nacl_semaphore_test: NaClMutexCtor failed!\n");
    goto cleanup2;
  }
  if (!NaClCondVarCtor(&gCv)) {
    fprintf(stderr, "nacl_semaphore_test: NaClCondVarCtor failed!\n");
    goto cleanup3;
  }

  for (n = 0; n < num_threads; ++n) {
    if (!NaClThreadCtor(&thr, ThreadMain, (void *) (uintptr_t) n,
                        STACK_SIZE_BYTES)) {
      fprintf(stderr,
              "nacl_semaphore_test: could not create thread %"NACL_PRIdS"\n",
              n);
      goto cleanup4;  /* osx leak semaphore otherwise */
    }
  }

  NaClMutexLock(&gMu);
  while (gNumThreadsTried != num_threads) {
    NaClCondVarWait(&gCv, &gMu);
  }
  NaClMutexUnlock(&gMu);

  for (n = 0; n < num_threads; ++n) {
    NaClSemPost(&gSem);  /* let a thread go */
  }

  NaClMutexLock(&gMu);
  while (gNumThreadsDone != num_threads) {
    NaClCondVarWait(&gCv, &gMu);
  }
  exit_status = gFailure;
  NaClMutexUnlock(&gMu);

  if (0 == exit_status) {
    printf("SUCCESS\n");
  }
 cleanup4:
  /* single exit with (ah hem) simulation of RAII via cleanup sled */
  NaClCondVarDtor(&gCv);
 cleanup3:
  NaClMutexDtor(&gMu);
 cleanup2:
  NaClSemDtor(&gSem);
 cleanup1:
  NaClPlatformFini();
 cleanup0:
  return exit_status;
}
/*
 * Find and allocate an available selector, inserting an LDT entry with the
 * appropriate permissions.
 */
uint16_t NaClLdtAllocateSelector(int entry_number,
                                 int size_is_in_pages,
                                 NaClLdtDescriptorType type,
                                 int read_exec_only,
                                 void* base_addr,
                                 uint32_t size_minus_one) {
  int retval;
  struct LdtEntry ldt;

  retval = 0;
  NaClMutexLock(&nacl_ldt_mutex);

  if (-1 == entry_number) {
    entry_number = NaClFindUnusedEntryNumber();
    if (-1 == entry_number) {
      /*
       * No free entries were available.
       */
      NaClMutexUnlock(&nacl_ldt_mutex);
      return 0;
    }
  }

  switch (type) {
   case NACL_LDT_DESCRIPTOR_DATA:
    if (read_exec_only) {
      ldt.type = 0x10;  /* Data read only */
    } else {
      ldt.type = 0x12;  /* Data read/write */
    }
    break;
   case NACL_LDT_DESCRIPTOR_CODE:
    if (read_exec_only) {
      ldt.type = 0x18;  /* Code execute */
    } else {
      ldt.type = 0x1a;  /* Code execute/read */
    }
    break;
   default:
    NaClMutexUnlock(&nacl_ldt_mutex);
    return 0;
  }
  ldt.descriptor_privilege = 3;
  ldt.present = 1;
  ldt.available = 1;   /* TODO(dcs) */
  ldt.code_64_bit = 0;
  ldt.op_size_32 = 1;

  if (size_is_in_pages && ((unsigned long) base_addr & 0xfff)) {
    /*
     * The base address needs to be page aligned.
     */
    NaClMutexUnlock(&nacl_ldt_mutex);
    return 0;
  };
  ldt.base_00to15 = ((unsigned long) base_addr) & 0xffff;
  ldt.base_16to23 = (((unsigned long) base_addr) >> 16) & 0xff;
  ldt.base_24to31 = (((unsigned long) base_addr) >> 24) & 0xff;

  if (size_minus_one > 0xfffff) {
    /*
     * If the size is in pages, no more than 2**20 pages can be protected.
     * If the size is in bytes, no more than 2**20 bytes can be protected.
     */
    NaClMutexUnlock(&nacl_ldt_mutex);
    return 0;
  }
  ldt.limit_00to15 = size_minus_one & 0xffff;
  ldt.limit_16to19 = (size_minus_one >> 16) & 0xf;
  ldt.granularity = size_is_in_pages;

  /*
   * Install the LDT entry.
   */
  if (NULL != set_ldt_entries) {
    union {
      struct LdtEntry ldt;
      DWORD dwords[2];
    } u;
    u.ldt = ldt;

    retval = (*set_ldt_entries)((entry_number << 3) | 0x7,
                                u.dwords[0],
                                u.dwords[1],
                                0,
                                0,
                                0);
  }

  if ((NULL == set_ldt_entries) || (0 != retval)) {
    LdtInfo info;
    info.byte_offset = entry_number << 3;
    info.size = sizeof(struct LdtEntry);
    info.entries[0] = ldt;
    retval = (*set_information_process)((HANDLE)-1, 10, (void*)&info, 16);
  }

  if (0 != retval) {
    NaClMutexUnlock(&nacl_ldt_mutex);
    return 0;
  }

  /*
   * Return an LDT selector with a requested privilege level of 3.
   */
  NaClMutexUnlock(&nacl_ldt_mutex);
  return (uint16_t)((entry_number << 3) | 0x7);
}
void NaClLogLock(void) {
  NaClMutexLock(&log_mu);
  NaClLogTagNext_mu();
}
int32_t NaClSysBrk(struct NaClAppThread *natp,
                   uintptr_t            new_break) {
  struct NaClApp        *nap = natp->nap;
  uintptr_t             break_addr;
  int32_t               rv = -NACL_ABI_EINVAL;
  struct NaClVmmapIter  iter;
  struct NaClVmmapEntry *ent;
  struct NaClVmmapEntry *next_ent;
  uintptr_t             sys_break;
  uintptr_t             sys_new_break;
  uintptr_t             usr_last_data_page;
  uintptr_t             usr_new_last_data_page;
  uintptr_t             last_internal_data_addr;
  uintptr_t             last_internal_page;
  uintptr_t             start_new_region;
  uintptr_t             region_size;

  /*
   * The sysbrk() IRT interface is deprecated and is not enabled for
   * ABI-stable PNaCl pexes, so for security hardening, disable the
   * syscall under PNaCl too.
   */
  if (nap->pnacl_mode)
    return -NACL_ABI_ENOSYS;

  break_addr = nap->break_addr;

  NaClLog(3, "Entered NaClSysBrk(new_break 0x%08"NACL_PRIxPTR")\n",
          new_break);

  sys_new_break = NaClUserToSysAddr(nap, new_break);
  NaClLog(3, "sys_new_break 0x%08"NACL_PRIxPTR"\n", sys_new_break);

  if (kNaClBadAddress == sys_new_break) {
    goto cleanup_no_lock;
  }
  if (NACL_SYNC_OK != NaClMutexLock(&nap->mu)) {
    NaClLog(LOG_ERROR, "Could not get app lock for 0x%08"NACL_PRIxPTR"\n",
            (uintptr_t) nap);
    goto cleanup_no_lock;
  }
  if (new_break < nap->data_end) {
    NaClLog(4, "new_break before data_end (0x%"NACL_PRIxPTR")\n",
            nap->data_end);
    goto cleanup;
  }
  if (new_break <= nap->break_addr) {
    /* freeing memory */
    NaClLog(4, "new_break before break (0x%"NACL_PRIxPTR"); freeing\n",
            nap->break_addr);
    nap->break_addr = new_break;
    break_addr = new_break;
  } else {
    /*
     * See if page containing new_break is in mem_map; if so, we are
     * essentially done -- just update break_addr.  Otherwise, we
     * extend the VM map entry from the page containing the current
     * break to the page containing new_break.
     */

    sys_break = NaClUserToSys(nap, nap->break_addr);

    usr_last_data_page = (nap->break_addr - 1) >> NACL_PAGESHIFT;

    usr_new_last_data_page = (new_break - 1) >> NACL_PAGESHIFT;

    last_internal_data_addr = NaClRoundAllocPage(new_break) - 1;
    last_internal_page = last_internal_data_addr >> NACL_PAGESHIFT;

    NaClLog(4, ("current break sys addr 0x%08"NACL_PRIxPTR", "
                "usr last data page 0x%"NACL_PRIxPTR"\n"),
            sys_break, usr_last_data_page);
    NaClLog(4, "new break usr last data page 0x%"NACL_PRIxPTR"\n",
            usr_new_last_data_page);
    NaClLog(4, "last internal data addr 0x%08"NACL_PRIxPTR"\n",
            last_internal_data_addr);

    if (NULL == NaClVmmapFindPageIter(&nap->mem_map,
                                      usr_last_data_page,
                                      &iter)
        || NaClVmmapIterAtEnd(&iter)) {
      NaClLog(LOG_FATAL, ("current break (0x%08"NACL_PRIxPTR", "
                          "sys 0x%08"NACL_PRIxPTR") "
                          "not in address map\n"),
              nap->break_addr, sys_break);
    }
    ent = NaClVmmapIterStar(&iter);
    NaClLog(4, ("segment containing current break"
                ": page_num 0x%08"NACL_PRIxPTR", npages 0x%"NACL_PRIxS"\n"),
            ent->page_num, ent->npages);
    if (usr_new_last_data_page < ent->page_num + ent->npages) {
      NaClLog(4, "new break within break segment, just bumping addr\n");
      nap->break_addr = new_break;
      break_addr = new_break;
    } else {
      NaClVmmapIterIncr(&iter);
      if (!NaClVmmapIterAtEnd(&iter)
          && ((next_ent = NaClVmmapIterStar(&iter))->page_num
              <= last_internal_page)) {
        /* ran into next segment! */
        NaClLog(4,
                ("new break request of usr address "
                 "0x%08"NACL_PRIxPTR" / usr page 0x%"NACL_PRIxPTR
                 " runs into next region, page_num 0x%"NACL_PRIxPTR", "
                 "npages 0x%"NACL_PRIxS"\n"),
                new_break, usr_new_last_data_page,
                next_ent->page_num, next_ent->npages);
        goto cleanup;
      }
      NaClLog(4,
              "extending segment: page_num 0x%08"NACL_PRIxPTR", "
              "npages 0x%"NACL_PRIxS"\n",
              ent->page_num, ent->npages);
      /* go ahead and extend ent to cover, and make pages accessible */
      start_new_region = (ent->page_num + ent->npages) << NACL_PAGESHIFT;
      ent->npages = (last_internal_page - ent->page_num + 1);
      region_size = (((last_internal_page + 1) << NACL_PAGESHIFT)
                     - start_new_region);
      if (0 != NaClMprotect((void *) NaClUserToSys(nap, start_new_region),
                            region_size,
                            PROT_READ | PROT_WRITE)) {
        NaClLog(LOG_FATAL,
                ("Could not mprotect(0x%08"NACL_PRIxPTR", "
                 "0x%08"NACL_PRIxPTR", "
                 "PROT_READ|PROT_WRITE)\n"),
                start_new_region,
                region_size);
      }
      NaClLog(4, "segment now: page_num 0x%08"NACL_PRIxPTR", "
              "npages 0x%"NACL_PRIxS"\n",
              ent->page_num, ent->npages);
      nap->break_addr = new_break;
      break_addr = new_break;
    }
    /*
     * Zero out memory between old break and new break.
     */
    ASSERT(sys_new_break > sys_break);
    memset((void *) sys_break, 0, sys_new_break - sys_break);
  }



cleanup:
  NaClXMutexUnlock(&nap->mu);
cleanup_no_lock:

  /*
   * This cast is safe because the incoming value (new_break) cannot
   * exceed the user address space--even though its type (uintptr_t)
   * theoretically allows larger values.
   */
  rv = (int32_t) break_addr;

  NaClLog(3, "NaClSysBrk: returning 0x%08"NACL_PRIx32"\n", rv);
  return rv;
}