예제 #1
0
int32_t NaClSysGetTimeOfDay(struct NaClAppThread      *natp,
                            struct nacl_abi_timeval   *tv,
                            struct nacl_abi_timezone  *tz) {
  int32_t                 retval;
  uintptr_t               sysaddr;
  UNREFERENCED_PARAMETER(tz);

  NaClSysCommonThreadSyscallEnter(natp);

  sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) tv, sizeof tv);

  /*
   * tz is not supported in linux, nor is it supported by glibc, since
   * tzset(3) and the zoneinfo file should be used instead.
   *
   * TODO(bsy) Do we make the zoneinfo directory available to
   * applications?
   */

  if (kNaClBadAddress == sysaddr) {
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  retval = NaClGetTimeOfDay((struct nacl_abi_timeval *) sysaddr);
cleanup:
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #2
0
int32_t NaClSysSysconf(struct NaClAppThread *natp,
                       int32_t name,
                       int32_t *result) {
  int32_t         retval = -NACL_ABI_EINVAL;
  static int32_t  number_of_workers = -1;
  uintptr_t       sysaddr;

  NaClLog(3,
          ("Entered NaClSysSysconf(%08"NACL_PRIxPTR
           "x, %d, 0x%08"NACL_PRIxPTR")\n"),
          (uintptr_t) natp, name, (uintptr_t) result);

  NaClSysCommonThreadSyscallEnter(natp);

  sysaddr = NaClUserToSysAddrRange(natp->nap,
                                   (uintptr_t) result,
                                   sizeof(*result));
  if (kNaClBadAddress == sysaddr) {
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  switch (name) {
#ifdef _SC_NPROCESSORS_ONLN
    case NACL_ABI__SC_NPROCESSORS_ONLN: {
      if (-1 == number_of_workers) {
        number_of_workers = sysconf(_SC_NPROCESSORS_ONLN);
      }
      if (-1 == number_of_workers) {
        /* failed to get the number of processors */
        retval = -NACL_ABI_EINVAL;
        goto cleanup;
      }
      *(int32_t*)sysaddr = number_of_workers;
      break;
    }
#endif
    case NACL_ABI__SC_SENDMSG_MAX_SIZE: {
      /* TODO(sehr,bsy): this value needs to be determined at run time. */
      const int32_t kImcSendMsgMaxSize = 1 << 16;
      *(int32_t*)sysaddr = kImcSendMsgMaxSize;
      break;
    }
    default:
      retval = -NACL_ABI_EINVAL;
      goto cleanup;
  }
  retval = 0;
cleanup:
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #3
0
파일: zmq_syscalls.c 프로젝트: camuel/zvm
/* ###
 * get portion of data from opened channel (NaClAppThread object)
 * put (map) it to given memory region (NaClAppThread object)
 * return count of read bytes when success, otherwise - nacl error code
 */
int ZMQSysRead(struct NaClAppThread *natp, int d, void *buf, uint32_t count)
{
	ssize_t read_result;
	uintptr_t sysaddr;
	struct NaClDesc *ndp;

	/* ### log to remove. i only use it to debug this class */
	NaClLog(1, "int ZMQSysRead(struct NaClAppThread  *natp, int d, void *buf, size_t count) -- entered\n");

	ndp = NaClGetDesc(natp->nap, d);
	if (NULL == ndp)
		return -NACL_ABI_EINVAL;

	/* ###
	 * dummy code. just to test if class work proper
	 * delete it after zmq integration
	 */
	sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) buf, count);
	if (kNaClBadAddress == sysaddr)
	{
		NaClDescUnref(ndp);
		return -NACL_ABI_EFAULT;
	}

	/*
	 * The maximum length for read and write is INT32_MAX--anything larger and
	 * the return value would overflow. Passing larger values isn't an error--
	 * we'll just clamp the request size if it's too large.
	 */
	if (count > INT32_MAX)
	{
		count = INT32_MAX;
	}

	read_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->Read)
			(ndp, (void *) sysaddr,	count);
	if (read_result > 0)
	{
		NaClLog(4, "read returned %"NACL_PRIdS" bytes\n", read_result);
		NaClLog(8, "read result: %.*s\n", (int) read_result, (char *) sysaddr);
	}
	else
	{
		NaClLog(4, "read returned %"NACL_PRIdS"\n", read_result);
	}
	NaClDescUnref(ndp);

	/* This cast is safe because we clamped count above.*/
	return (int32_t) read_result;
}
예제 #4
0
int32_t NaClSysGetTimeOfDay(struct NaClAppThread      *natp,
                            struct nacl_abi_timeval   *tv,
                            struct nacl_abi_timezone  *tz) {
  uintptr_t               sysaddr;
  int                     retval;
  struct nacl_abi_timeval now;

  UNREFERENCED_PARAMETER(tz);

  NaClLog(3,
          ("Entered NaClSysGetTimeOfDay(%08"NACL_PRIxPTR
           ", 0x%08"NACL_PRIxPTR", 0x%08"NACL_PRIxPTR")\n"),
          (uintptr_t) natp, (uintptr_t) tv, (uintptr_t) tz);
  NaClSysCommonThreadSyscallEnter(natp);

  sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) tv, sizeof tv);

  /*
   * tz is not supported in linux, nor is it supported by glibc, since
   * tzset(3) and the zoneinfo file should be used instead.
   *
   * TODO(bsy) Do we make the zoneinfo directory available to
   * applications?
   */

  if (kNaClBadAddress == sysaddr) {
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  retval = NaClGetTimeOfDay(&now);
  if (0 == retval) {
    /*
     * To make it harder to distinguish Linux platforms from Windows,
     * coarsen the time to the same level we get on Windows -
     * milliseconds, unless in "debug" mode.
     */
    if (!NaClHighResolutionTimerEnabled()) {
      now.nacl_abi_tv_usec = (now.nacl_abi_tv_usec / 1000) * 1000;
    }
    CHECK(now.nacl_abi_tv_usec >= 0);
    CHECK(now.nacl_abi_tv_usec < NACL_MICROS_PER_UNIT);
    *(struct nacl_abi_timeval *) sysaddr = now;
  }
cleanup:
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #5
0
int NaClCopyInFromUserAndDropLock(struct NaClApp *nap,
                                  void           *dst_sys_ptr,
                                  uintptr_t      src_usr_addr,
                                  size_t         num_bytes) {
  uintptr_t src_sys_addr;

  src_sys_addr = NaClUserToSysAddrRange(nap, src_usr_addr, num_bytes);
  if (kNaClBadAddress == src_sys_addr) {
    return 0;
  }

  memcpy((void *) dst_sys_ptr, (void *) src_sys_addr, num_bytes);
  NaClXMutexUnlock(&nap->mu);

  return 1;
}
예제 #6
0
int NaClCopyOutToUser(struct NaClApp  *nap,
                      uintptr_t       dst_usr_addr,
                      void            *src_sys_ptr,
                      size_t          num_bytes) {
  uintptr_t dst_sys_addr;

  dst_sys_addr = NaClUserToSysAddrRange(nap, dst_usr_addr, num_bytes);
  if (kNaClBadAddress == dst_sys_addr) {
    return 0;
  }
  NaClXMutexLock(&nap->mu);
  memcpy((void *) dst_sys_addr, src_sys_ptr, num_bytes);
  NaClXMutexUnlock(&nap->mu);

  return 1;
}
예제 #7
0
/* tls create */
int32_t NaClSysTls_Init(struct NaClApp *nap, void *thread_ptr)
{
  uintptr_t sys_tls;

  ZLOGS(LOG_DEBUG, "tls init with 0x%08lx", (uintptr_t)thread_ptr);

  /*
   * Verify that the address in the app's range and translated from
   * nacl module address to service runtime address - a nop on ARM
   */
  sys_tls = NaClUserToSysAddrRange(nap, (uintptr_t) thread_ptr, 4);
  ZLOGS(LOG_INSANE, "thread_ptr 0x%p, sys_tls 0x%lx", thread_ptr, sys_tls);

  if(kNaClBadAddress == sys_tls) return -EFAULT;

  nap->sys_tls = sys_tls;
  return 0;
}
예제 #8
0
/*
 * This syscall copies freshly-generated random data into the supplied
 * buffer.
 *
 * Ideally this operation would be provided via a NaClDesc rather than via
 * a dedicated syscall.  However, if random data is read using the read()
 * syscall, it is too easy for an innocent-but-buggy application to
 * accidentally close() a random-data-source file descriptor, and then
 * read() from a file that is subsequently opened with the same FD number.
 * If the application relies on random data being unguessable, this could
 * make the application vulnerable (e.g. see https://crbug.com/374383).
 * This could be addressed by having a NaClDesc operation that can't
 * accidentally be confused with a read(), but that would be more
 * complicated.
 *
 * Providing a dedicated syscall is simple and removes that risk.
 */
int32_t NaClSysGetRandomBytes(struct NaClAppThread *natp,
                              uint32_t buf_addr, uint32_t buf_size) {
  struct NaClApp *nap = natp->nap;

  uintptr_t sysaddr = NaClUserToSysAddrRange(nap, buf_addr, buf_size);
  if (sysaddr == kNaClBadAddress)
    return -NACL_ABI_EFAULT;

  /*
   * Since we don't use NaClCopyOutToUser() for writing the data into the
   * sandbox, we use NaClVmIoWillStart()/NaClVmIoHasEnded() to ensure that
   * no mmap hole is opened up while we write the data.
   */
  NaClVmIoWillStart(nap, buf_addr, buf_addr + buf_size - 1);
  NaClGlobalSecureRngGenerateBytes((uint8_t *) sysaddr, buf_size);
  NaClVmIoHasEnded(nap, buf_addr, buf_addr + buf_size - 1);

  return 0;
}
예제 #9
0
int32_t NaClSysDyncodeCreate(struct NaClAppThread *natp,
                             uint32_t             dest,
                             uint32_t             src,
                             uint32_t             size) {
  struct NaClApp              *nap = natp->nap;
  uintptr_t                   src_addr;
  uint8_t                     *code_copy;
  int32_t                     retval = -NACL_ABI_EINVAL;

  if (!nap->enable_dyncode_syscalls) {
    NaClLog(LOG_WARNING,
            "NaClSysDyncodeCreate: Dynamic code syscalls are disabled\n");
    return -NACL_ABI_ENOSYS;
  }

  src_addr = NaClUserToSysAddrRange(nap, src, size);
  if (kNaClBadAddress == src_addr) {
    NaClLog(1, "NaClSysDyncodeCreate: Source address out of range\n");
    return -NACL_ABI_EFAULT;
  }

  /*
   * Make a private copy of the code, so that we can validate it
   * without a TOCTTOU race condition.
   */
  code_copy = malloc(size);
  if (NULL == code_copy) {
    return -NACL_ABI_ENOMEM;
  }
  memcpy(code_copy, (uint8_t*) src_addr, size);

  /* Unknown data source, no metadata. */
  retval = NaClTextDyncodeCreate(nap, dest, code_copy, size, NULL);

  free(code_copy);
  return retval;
}
예제 #10
0
int32_t NaClSysNanosleep(struct NaClAppThread     *natp,
                         struct nacl_abi_timespec *req,
                         struct nacl_abi_timespec *rem) {
  uintptr_t                 sys_req;
  int                       retval = -NACL_ABI_EINVAL;
  struct nacl_abi_timespec  host_req;

  UNREFERENCED_PARAMETER(rem);

  NaClLog(4, "NaClSysNanosleep(%08x)\n", (uintptr_t) req);

  NaClSysCommonThreadSyscallEnter(natp);

  sys_req = NaClUserToSysAddrRange(natp->nap, (uintptr_t) req, sizeof *req);
  if (kNaClBadAddress == sys_req) {
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  /* copy once */
  host_req = *(struct nacl_abi_timespec *) sys_req;
  /*
   * We assume that we do not need to normalize the time request values.
   *
   * If bogus values can cause the underlying OS to get into trouble,
   * then we need more checking here.
   */

  NaClLog(4, "NaClSysNanosleep(time = %d.%09ld S)\n",
          host_req.tv_sec, host_req.tv_nsec);

  retval = NaClNanosleep(&host_req, NULL);

cleanup:
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #11
0
int32_t NaClSysSysconf(struct NaClAppThread *natp,
                       int32_t              name,
                       int32_t              *result) {
  int32_t         retval = -NACL_ABI_EINVAL;
  static int32_t  number_of_workers = 0;
  uintptr_t       sysaddr;

  NaClSysCommonThreadSyscallEnter(natp);

  sysaddr = NaClUserToSysAddrRange(natp->nap,
                                   (uintptr_t) result,
                                   sizeof(*result));
  if (kNaClBadAddress == sysaddr) {
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  switch (name) {
    case NACL_ABI__SC_NPROCESSORS_ONLN: {
      if (0 == number_of_workers) {
        SYSTEM_INFO si;
        GetSystemInfo(&si);
        number_of_workers = (int32_t)si.dwNumberOfProcessors;
      }
      *(int32_t*)sysaddr = number_of_workers;
      break;
    }
    default:
      retval = -NACL_ABI_EINVAL;
      goto cleanup;
  }
  retval = 0;
cleanup:
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #12
0
/*
 * preconditions:
 *  * argc is the length of the argv array
 *  * envv may be NULL (this happens on MacOS/Cocoa and in tests)
 *  * if envv is non-NULL it is 'consistent', null terminated etc.
 */
int NaClCreateMainThread(struct NaClApp     *nap,
                         int                argc,
                         char               **argv,
                         char const *const  *envv) {
  /*
   * Compute size of string tables for argv and envv
   */
  int                   retval;
  int                   envc;
  size_t                size;
  int                   auxv_entries;
  size_t                ptr_tbl_size;
  int                   i;
  uint32_t              *p;
  char                  *strp;
  size_t                *argv_len;
  size_t                *envv_len;
  uintptr_t             stack_ptr;

  retval = 0;  /* fail */
  CHECK(argc >= 0);
  CHECK(NULL != argv || 0 == argc);

  envc = 0;
  if (NULL != envv) {
    char const *const *pp;
    for (pp = envv; NULL != *pp; ++pp) {
      ++envc;
    }
  }
  envv_len = 0;
  argv_len = malloc(argc * sizeof argv_len[0]);
  envv_len = malloc(envc * sizeof envv_len[0]);
  if (NULL == argv_len) {
    goto cleanup;
  }
  if (NULL == envv_len && 0 != envc) {
    goto cleanup;
  }

  size = 0;

  /*
   * The following two loops cannot overflow.  The reason for this is
   * that they are counting the number of bytes used to hold the
   * NUL-terminated strings that comprise the argv and envv tables.
   * If the entire address space consisted of just those strings, then
   * the size variable would overflow; however, since there's the code
   * space required to hold the code below (and we are not targetting
   * Harvard architecture machines), at least one page holds code, not
   * data.  We are assuming that the caller is non-adversarial and the
   * code does not look like string data....
   */
  for (i = 0; i < argc; ++i) {
    argv_len[i] = strlen(argv[i]) + 1;
    size += argv_len[i];
  }
  for (i = 0; i < envc; ++i) {
    envv_len[i] = strlen(envv[i]) + 1;
    size += envv_len[i];
  }

  /*
   * NaCl modules are ILP32, so the argv, envv pointers, as well as
   * the terminating NULL pointers at the end of the argv/envv tables,
   * are 32-bit values.  We also have the auxv to take into account.
   *
   * The argv and envv pointer tables came from trusted code and is
   * part of memory.  Thus, by the same argument above, adding in
   * "ptr_tbl_size" cannot possibly overflow the "size" variable since
   * it is a size_t object.  However, the extra pointers for auxv and
   * the space for argv could cause an overflow.  The fact that we
   * used stack to get here etc means that ptr_tbl_size could not have
   * overflowed.
   *
   * NB: the underlying OS would have limited the amount of space used
   * for argv and envv -- on linux, it is ARG_MAX, or 128KB -- and
   * hence the overflow check is for obvious auditability rather than
   * for correctness.
   */
  auxv_entries = 1;
  if (0 != nap->user_entry_pt) {
    auxv_entries++;
  }
  if (0 != nap->dynamic_text_start) {
    auxv_entries++;
  }
  ptr_tbl_size = (((NACL_STACK_GETS_ARG ? 1 : 0) +
                   (3 + argc + 1 + envc + 1 + auxv_entries * 2)) *
                  sizeof(uint32_t));

  if (SIZE_T_MAX - size < ptr_tbl_size) {
    NaClLog(LOG_WARNING,
            "NaClCreateMainThread: ptr_tbl_size cause size of"
            " argv / environment copy to overflow!?!\n");
    retval = 0;
    goto cleanup;
  }
  size += ptr_tbl_size;

  size = (size + NACL_STACK_ALIGN_MASK) & ~NACL_STACK_ALIGN_MASK;

  if (size > nap->stack_size) {
    retval = 0;
    goto cleanup;
  }

  /*
   * Write strings and char * arrays to stack.
   */
  stack_ptr = NaClUserToSysAddrRange(nap, NaClGetInitialStackTop(nap) - size,
                                     size);
  if (stack_ptr == kNaClBadAddress) {
    retval = 0;
    goto cleanup;
  }

  NaClLog(2, "setting stack to : %016"NACL_PRIxPTR"\n", stack_ptr);

  VCHECK(0 == (stack_ptr & NACL_STACK_ALIGN_MASK),
         ("stack_ptr not aligned: %016"NACL_PRIxPTR"\n", stack_ptr));

  p = (uint32_t *) stack_ptr;
  strp = (char *) stack_ptr + ptr_tbl_size;

  /*
   * For x86-32, we push an initial argument that is the address of
   * the main argument block.  For other machines, this is passed
   * in a register and that's set in NaClStartThreadInApp.
   */
  if (NACL_STACK_GETS_ARG) {
    uint32_t *argloc = p++;
    *argloc = (uint32_t) NaClSysToUser(nap, (uintptr_t) p);
  }

  *p++ = 0;  /* Cleanup function pointer, always NULL.  */
  *p++ = envc;
  *p++ = argc;

  for (i = 0; i < argc; ++i) {
    *p++ = (uint32_t) NaClSysToUser(nap, (uintptr_t) strp);
    NaClLog(2, "copying arg %d  %p -> %p\n",
            i, argv[i], strp);
    strcpy(strp, argv[i]);
    strp += argv_len[i];
  }
  *p++ = 0;  /* argv[argc] is NULL.  */

  for (i = 0; i < envc; ++i) {
    *p++ = (uint32_t) NaClSysToUser(nap, (uintptr_t) strp);
    NaClLog(2, "copying env %d  %p -> %p\n",
            i, envv[i], strp);
    strcpy(strp, envv[i]);
    strp += envv_len[i];
  }
  *p++ = 0;  /* envp[envc] is NULL.  */

  /* Push an auxv */
  if (0 != nap->user_entry_pt) {
    *p++ = AT_ENTRY;
    *p++ = (uint32_t) nap->user_entry_pt;
  }
  if (0 != nap->dynamic_text_start) {
    *p++ = AT_BASE;
    *p++ = (uint32_t) nap->dynamic_text_start;
  }
  *p++ = AT_NULL;
  *p++ = 0;

  CHECK((char *) p == (char *) stack_ptr + ptr_tbl_size);

  /* now actually spawn the thread */
  NaClXMutexLock(&nap->mu);
  /*
   * Unreference the main nexe and irt at this point if no debug stub callbacks
   * have been registered, as these references to the main nexe and irt
   * descriptors are only used when providing file access to the debugger.
   * In the debug case, let shutdown take care of cleanup.
   */
  if (NULL == nap->debug_stub_callbacks) {
    if (NULL != nap->main_nexe_desc) {
      NaClDescUnref(nap->main_nexe_desc);
      nap->main_nexe_desc = NULL;
    }
    if (NULL != nap->irt_nexe_desc) {
      NaClDescUnref(nap->irt_nexe_desc);
      nap->irt_nexe_desc = NULL;
    }
  }
  nap->running = 1;
  NaClXMutexUnlock(&nap->mu);

  NaClVmHoleWaitToStartThread(nap);

  /*
   * For x86, we adjust the stack pointer down to push a dummy return
   * address.  This happens after the stack pointer alignment.
   * We avoid the otherwise harmless call for the zero case because
   * _FORTIFY_SOURCE memset can warn about zero-length calls.
   */
  if (NACL_STACK_PAD_BELOW_ALIGN != 0) {
    stack_ptr -= NACL_STACK_PAD_BELOW_ALIGN;
    memset((void *) stack_ptr, 0, NACL_STACK_PAD_BELOW_ALIGN);
  }

  NaClLog(2, "system stack ptr : %016"NACL_PRIxPTR"\n", stack_ptr);
  NaClLog(2, "  user stack ptr : %016"NACL_PRIxPTR"\n",
          NaClSysToUserStackAddr(nap, stack_ptr));

  /* e_entry is user addr */
  retval = NaClAppThreadSpawn(nap,
                              nap->initial_entry_pt,
                              NaClSysToUserStackAddr(nap, stack_ptr),
                              /* user_tls1= */ (uint32_t) nap->break_addr,
                              /* user_tls2= */ 0);

cleanup:
  free(argv_len);
  free(envv_len);

  return retval;
}
예제 #13
0
int32_t NaClSysDyncodeDelete(struct NaClAppThread *natp,
                             uint32_t             dest,
                             uint32_t             size) {
  struct NaClApp              *nap = natp->nap;
  uintptr_t                    dest_addr;
  uint8_t                     *mapped_addr;
  int32_t                     retval = -NACL_ABI_EINVAL;
  struct NaClDynamicRegion    *region;

  if (!nap->enable_dyncode_syscalls) {
    NaClLog(LOG_WARNING,
            "NaClSysDyncodeDelete: Dynamic code syscalls are disabled\n");
    return -NACL_ABI_ENOSYS;
  }

  if (NULL == nap->text_shm) {
    NaClLog(1, "NaClSysDyncodeDelete: Dynamic loading not enabled\n");
    return -NACL_ABI_EINVAL;
  }

  if (0 == size) {
    /* Nothing to delete.  Just update our generation. */
    int gen;
    /* fetch current generation */
    NaClXMutexLock(&nap->dynamic_load_mutex);
    gen = nap->dynamic_delete_generation;
    NaClXMutexUnlock(&nap->dynamic_load_mutex);
    /* set our generation */
    NaClSetThreadGeneration(natp, gen);
    return 0;
  }

  dest_addr = NaClUserToSysAddrRange(nap, dest, size);
  if (kNaClBadAddress == dest_addr) {
    NaClLog(1, "NaClSysDyncodeDelete: Address out of range\n");
    return -NACL_ABI_EFAULT;
  }

  NaClXMutexLock(&nap->dynamic_load_mutex);

  /*
   * this check ensures the to-be-deleted region is identical to a
   * previously inserted region, so no need to check for alignment/bounds/etc
   */
  region = NaClDynamicRegionFind(nap, dest_addr, size);
  if (NULL == region ||
      region->start != dest_addr ||
      region->size != size ||
      region->is_mmap) {
    NaClLog(1, "NaClSysDyncodeDelete: Can't find region to delete\n");
    retval = -NACL_ABI_EFAULT;
    goto cleanup_unlock;
  }


  if (region->delete_generation < 0) {
    /* first deletion request */

    if (nap->dynamic_delete_generation == INT32_MAX) {
      NaClLog(1, "NaClSysDyncodeDelete:"
                 "Overflow, can only delete INT32_MAX regions\n");
      retval = -NACL_ABI_EFAULT;
      goto cleanup_unlock;
    }

    if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
      retval = -NACL_ABI_ENOMEM;
      goto cleanup_unlock;
    }

    /* make it so no new threads can enter target region */
    ReplaceBundleHeadsWithHalts(mapped_addr, size, nap->bundle_size);

    /*
     * Flush the instruction cache.  In principle this is needed for
     * security on ARM so that, when new code is loaded, it is not
     * possible for it to jump to stale code that remains in the
     * icache.
     */
    NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);

    NaClTextMapClearCacheIfNeeded(nap, dest, size);

    /* increment and record the generation deletion was requested */
    region->delete_generation = ++nap->dynamic_delete_generation;
  }

  /* update our own generation */
  NaClSetThreadGeneration(natp, nap->dynamic_delete_generation);

  if (region->delete_generation <= NaClMinimumThreadGeneration(nap)) {
    /*
     * All threads have checked in since we marked region for deletion.
     * It is safe to remove the region.
     *
     * No need to memset the region to hlt since bundle heads are hlt
     * and thus the bodies are unreachable.
     */
    NaClDynamicRegionDelete(nap, region);
    retval = 0;
  } else {
    /*
     * Still waiting for some threads to report in...
     */
    retval = -NACL_ABI_EAGAIN;
  }

 cleanup_unlock:
  NaClXMutexUnlock(&nap->dynamic_load_mutex);
  return retval;
}
예제 #14
0
int32_t NaClSysDyncodeModify(struct NaClAppThread *natp,
                             uint32_t             dest,
                             uint32_t             src,
                             uint32_t             size) {
  struct NaClApp              *nap = natp->nap;
  uintptr_t                   dest_addr;
  uintptr_t                   src_addr;
  uintptr_t                   beginbundle;
  uintptr_t                   endbundle;
  uintptr_t                   offset;
  uint8_t                     *mapped_addr;
  uint8_t                     *code_copy = NULL;
  uint8_t                     code_copy_buf[NACL_INSTR_BLOCK_SIZE];
  int                         validator_result;
  int32_t                     retval = -NACL_ABI_EINVAL;
  struct NaClDynamicRegion    *region;

  if (!nap->validator->code_replacement) {
    NaClLog(LOG_WARNING,
            "NaClSysDyncodeModify: "
            "Dynamic code modification is not supported\n");
    return -NACL_ABI_ENOSYS;
  }

  if (!nap->enable_dyncode_syscalls) {
    NaClLog(LOG_WARNING,
            "NaClSysDyncodeModify: Dynamic code syscalls are disabled\n");
    return -NACL_ABI_ENOSYS;
  }

  if (NULL == nap->text_shm) {
    NaClLog(1, "NaClSysDyncodeModify: Dynamic loading not enabled\n");
    return -NACL_ABI_EINVAL;
  }

  if (0 == size) {
    /* Nothing to modify.  Succeed trivially. */
    return 0;
  }

  dest_addr = NaClUserToSysAddrRange(nap, dest, size);
  src_addr = NaClUserToSysAddrRange(nap, src, size);
  if (kNaClBadAddress == src_addr || kNaClBadAddress == dest_addr) {
    NaClLog(1, "NaClSysDyncodeModify: Address out of range\n");
    return -NACL_ABI_EFAULT;
  }

  NaClXMutexLock(&nap->dynamic_load_mutex);

  region = NaClDynamicRegionFind(nap, dest_addr, size);
  if (NULL == region ||
      region->start > dest_addr ||
      region->start + region->size < dest_addr + size ||
      region->is_mmap) {
    /*
     * target not a subregion of region or region is null, or came from a file.
     */
    NaClLog(1, "NaClSysDyncodeModify: Can't find region to modify\n");
    retval = -NACL_ABI_EFAULT;
    goto cleanup_unlock;
  }

  beginbundle = dest_addr & ~(nap->bundle_size - 1);
  endbundle   = (dest_addr + size - 1 + nap->bundle_size)
                  & ~(nap->bundle_size - 1);
  offset      = dest_addr &  (nap->bundle_size - 1);
  if (endbundle-beginbundle <= sizeof code_copy_buf) {
    /* usually patches are a single bundle, so stack allocate */
    code_copy = code_copy_buf;
  } else {
    /* in general case heap allocate */
    code_copy = malloc(endbundle-beginbundle);
    if (NULL == code_copy) {
      retval = -NACL_ABI_ENOMEM;
      goto cleanup_unlock;
    }
  }

  /* copy the bundles from already-inserted code */
  memcpy(code_copy, (uint8_t*) beginbundle, endbundle - beginbundle);

  /*
   * make the requested change in temporary location
   * this avoids TOTTOU race
   */
  memcpy(code_copy + offset, (uint8_t*) src_addr, size);

  /* update dest/size to refer to entire bundles */
  dest      &= ~(nap->bundle_size - 1);
  dest_addr &= ~((uintptr_t)nap->bundle_size - 1);
  /* since both are in sandbox memory this check should succeed */
  CHECK(endbundle-beginbundle < UINT32_MAX);
  size = (uint32_t)(endbundle - beginbundle);

  /* validate this code as a replacement */
  validator_result = NaClValidateCodeReplacement(nap,
                                                 dest,
                                                 (uint8_t*) dest_addr,
                                                 code_copy,
                                                 size);

  if (validator_result != LOAD_OK
      && nap->ignore_validator_result) {
    NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
                       "continuing anyway...\n");
    validator_result = LOAD_OK;
  }

  if (validator_result != LOAD_OK) {
    NaClLog(1, "NaClSysDyncodeModify: Validation of dynamic code failed\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup_unlock;
  }

  if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
    retval = -NACL_ABI_ENOMEM;
    goto cleanup_unlock;
  }

  if (LOAD_OK != NaClCopyCode(nap, dest, mapped_addr, code_copy, size)) {
    NaClLog(1, "NaClSysDyncodeModify: Copying of replacement code failed\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup_unlock;
  }
  retval = 0;

  NaClTextMapClearCacheIfNeeded(nap, dest, size);

 cleanup_unlock:
  NaClXMutexUnlock(&nap->dynamic_load_mutex);

  if (code_copy != code_copy_buf) {
    free(code_copy);
  }

  return retval;
}
예제 #15
0
int32_t NaClTextDyncodeCreate(struct NaClApp *nap,
                              uint32_t       dest,
                              void           *code_copy,
                              uint32_t       size,
                              const struct NaClValidationMetadata *metadata) {
  uintptr_t                   dest_addr;
  uint8_t                     *mapped_addr;
  int32_t                     retval = -NACL_ABI_EINVAL;
  int                         validator_result;
  struct NaClPerfCounter      time_dyncode_create;
  NaClPerfCounterCtor(&time_dyncode_create, "NaClTextDyncodeCreate");

  if (NULL == nap->text_shm) {
    NaClLog(1, "NaClTextDyncodeCreate: Dynamic loading not enabled\n");
    return -NACL_ABI_EINVAL;
  }
  if (0 != (dest & (nap->bundle_size - 1)) ||
      0 != (size & (nap->bundle_size - 1))) {
    NaClLog(1, "NaClTextDyncodeCreate: Non-bundle-aligned address or size\n");
    return -NACL_ABI_EINVAL;
  }
  dest_addr = NaClUserToSysAddrRange(nap, dest, size);
  if (kNaClBadAddress == dest_addr) {
    NaClLog(1, "NaClTextDyncodeCreate: Dest address out of range\n");
    return -NACL_ABI_EFAULT;
  }
  if (dest < nap->dynamic_text_start) {
    NaClLog(1, "NaClTextDyncodeCreate: Below dynamic code area\n");
    return -NACL_ABI_EFAULT;
  }
  /*
   * We ensure that the final HLTs of the dynamic code region cannot
   * be overwritten, just in case of CPU bugs.
   */
  if (dest + size > nap->dynamic_text_end - NACL_HALT_SLED_SIZE) {
    NaClLog(1, "NaClTextDyncodeCreate: Above dynamic code area\n");
    return -NACL_ABI_EFAULT;
  }
  if (0 == size) {
    /* Nothing to load.  Succeed trivially. */
    return 0;
  }

  NaClXMutexLock(&nap->dynamic_load_mutex);

  /*
   * Validate the code before trying to create the region.  This avoids the need
   * to delete the region if validation fails.
   * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
   */
  if (!nap->skip_validator) {
    validator_result = NaClValidateCode(nap, dest, code_copy, size, metadata);
  } else {
    NaClLog(LOG_ERROR, "VALIDATION SKIPPED.\n");
    validator_result = LOAD_OK;
  }

  NaClPerfCounterMark(&time_dyncode_create,
                      NACL_PERF_IMPORTANT_PREFIX "DynRegionValidate");
  NaClPerfCounterIntervalLast(&time_dyncode_create);

  if (validator_result != LOAD_OK
      && nap->ignore_validator_result) {
    NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
            "continuing anyway...\n");
    validator_result = LOAD_OK;
  }

  if (validator_result != LOAD_OK) {
    NaClLog(1, "NaClTextDyncodeCreate: "
            "Validation of dynamic code failed\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup_unlock;
  }

  if (NaClDynamicRegionCreate(nap, dest_addr, size, 0) != 1) {
    /* target addr is in use */
    NaClLog(1, "NaClTextDyncodeCreate: Code range already allocated\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup_unlock;
  }

  if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
    retval = -NACL_ABI_ENOMEM;
    goto cleanup_unlock;
  }

  CopyCodeSafelyInitial(mapped_addr, code_copy, size, nap->bundle_size);
  /*
   * Flush the processor's instruction cache.  This is not necessary
   * for security, because any old cached instructions will just be
   * safe halt instructions.  It is only necessary to ensure that
   * untrusted code runs correctly when it tries to execute the
   * dynamically-loaded code.
   */
  NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);

  retval = 0;

  NaClTextMapClearCacheIfNeeded(nap, dest, size);

 cleanup_unlock:
  NaClXMutexUnlock(&nap->dynamic_load_mutex);
  return retval;
}
예제 #16
0
int32_t NaClSysMunmap(struct NaClAppThread  *natp,
                      void                  *start,
                      size_t                length) {
  int32_t   retval = -NACL_ABI_EINVAL;
  uintptr_t sysaddr;
  uintptr_t addr;
  uintptr_t endaddr;
  int       holding_app_lock = 0;
  size_t    alloc_rounded_length;

  NaClLog(3, "NaClSysMunmap(0x%08x, 0x%08x, 0x%x)\n",
          natp, start, length);

  NaClSysCommonThreadSyscallEnter(natp);

  if (!NaClIsAllocPageMultiple((uintptr_t) start)) {
    NaClLog(4, "start addr not allocation multiple\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  if (0 == length) {
    /*
     * linux mmap of zero length yields a failure, but windows code
     * would just iterate through and do nothing, so does not yield a
     * failure.
     */
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  alloc_rounded_length = NaClRoundAllocPage(length);
  if (alloc_rounded_length != length) {
    length = alloc_rounded_length;
    NaClLog(LOG_WARNING,
            "munmap: rounded length to 0x%x\n", length);
  }
  sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length);
  if (kNaClBadAddress == sysaddr) {
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  NaClXMutexLock(&natp->nap->mu);
  holding_app_lock = 1;

  /*
   * User should be unable to unmap any executable pages.  We check here.
   */
  if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap,
                                                       (uintptr_t) start,
                                                       length)) {
    NaClLog(2, "NaClSysMunmap: region contains executable pages\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  endaddr = sysaddr + length;
  for (addr = sysaddr; addr < endaddr; addr += NACL_MAP_PAGESIZE) {
    struct NaClVmmapEntry const *entry;

    entry = NaClVmmapFindPage(&natp->nap->mem_map,
                              NaClSysToUser(natp->nap, addr)
                              >> NACL_PAGESHIFT);
    if (NULL == entry) {
      NaClLog(LOG_FATAL,
              "NaClSysMunmap: could not find VM map entry for addr 0x%08x\n",
              addr);
    }
    NaClLog(3,
            ("NaClSysMunmap: addr 0x%08x, nmop 0x%08x\n"),
            addr, entry->nmop);
    if (NULL == entry->nmop) {
      /* anonymous memory; we just decommit it and thus make it inaccessible */
      if (!VirtualFree((void *) addr,
                       NACL_MAP_PAGESIZE,
                       MEM_DECOMMIT)) {
        int error = GetLastError();
        NaClLog(LOG_FATAL,
                ("NaClSysMunmap: Could not VirtualFree MEM_DECOMMIT"
                 " addr 0x%08x, error %d (0x%x)\n"),
                addr, error, error);
      }
    } else {
      /*
       * This should invoke a "safe" version of unmap that fills the
       * memory hole as quickly as possible, and may return
       * -NACL_ABI_E_MOVE_ADDRESS_SPACE.  The "safe" version just
       * minimizes the size of the timing hole for any racers, plus
       * the size of the memory window is only 64KB, rather than
       * whatever size the user is unmapping.
       */
      retval = (*entry->nmop->ndp->vtbl->Unmap)(entry->nmop->ndp,
                                                natp->effp,
                                                (void*) addr,
                                                NACL_MAP_PAGESIZE);
      if (0 != retval) {
        NaClLog(LOG_FATAL,
                ("NaClSysMunmap: Could not unmap via ndp->Unmap 0x%08x"
                 " and cannot handle address space move\n"),
                addr);
      }
    }
    NaClVmmapUpdate(&natp->nap->mem_map,
                    (NaClSysToUser(natp->nap, (uintptr_t) addr)
                     >> NACL_PAGESHIFT),
                    NACL_PAGES_PER_MAP,
                    0,  /* prot */
                    (struct NaClMemObj *) NULL,
                    1);  /* delete */
  }
  retval = 0;
cleanup:
  if (holding_app_lock) {
    NaClXMutexUnlock(&natp->nap->mu);
  }
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #17
0
int32_t NaClSysGetdents(struct NaClAppThread *natp,
                        int                  d,
                        uint32_t             dirp,
                        size_t               count) {
  struct NaClApp  *nap = natp->nap;
  int32_t         retval = -NACL_ABI_EINVAL;
  ssize_t         getdents_ret;
  uintptr_t       sysaddr;
  struct NaClDesc *ndp;

  NaClLog(3,
          ("Entered NaClSysGetdents(0x%08"NACL_PRIxPTR", "
           "%d, 0x%08"NACL_PRIx32", "
           "%"NACL_PRIuS"[0x%"NACL_PRIxS"])\n"),
          (uintptr_t) natp, d, dirp, count, count);

  if (!NaClFileAccessEnabled()) {
    /*
     * Filesystem access is disabled, so disable the getdents() syscall.
     * We do this for security hardening, though it should be redundant,
     * because untrusted code should not be able to open any directory
     * descriptors (i.e. descriptors with a non-trivial Getdents()
     * implementation).
     */
    return -NACL_ABI_EACCES;
  }

  ndp = NaClAppGetDesc(nap, d);
  if (NULL == ndp) {
    retval = -NACL_ABI_EBADF;
    goto cleanup;
  }

  /*
   * Generic NaClCopyOutToUser is not sufficient, since buffer size
   * |count| is arbitrary and we wouldn't want to have to allocate
   * memory in trusted address space to match.
   */
  sysaddr = NaClUserToSysAddrRange(nap, dirp, count);
  if (kNaClBadAddress == sysaddr) {
    NaClLog(4, " illegal address for directory data\n");
    retval = -NACL_ABI_EFAULT;
    goto cleanup_unref;
  }

  /*
   * Clamp count to INT32_MAX to avoid the possibility of Getdents returning
   * a value that is outside the range of an int32.
   */
  if (count > INT32_MAX) {
    count = INT32_MAX;
  }
  /*
   * Grab addr space lock; getdents should not normally block, though
   * if the directory is on a networked filesystem this could, and
   * cause mmap to be slower on Windows.
   */
  NaClXMutexLock(&nap->mu);
  getdents_ret = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
                  Getdents)(ndp,
                            (void *) sysaddr,
                            count);
  NaClXMutexUnlock(&nap->mu);
  /* drop addr space lock */
  if ((getdents_ret < INT32_MIN && !NaClSSizeIsNegErrno(&getdents_ret))
      || INT32_MAX < getdents_ret) {
    /* This should never happen, because we already clamped the input count */
    NaClLog(LOG_FATAL, "Overflow in Getdents: return value is %"NACL_PRIxS,
            (size_t) getdents_ret);
  } else {
    retval = (int32_t) getdents_ret;
  }
  if (retval > 0) {
    NaClLog(4, "getdents returned %d bytes\n", retval);
    NaClLog(8, "getdents result: %.*s\n", retval, (char *) sysaddr);
  } else {
    NaClLog(4, "getdents returned %d\n", retval);
  }

cleanup_unref:
  NaClDescUnref(ndp);

cleanup:
  return retval;
}
예제 #18
0
int32_t NaClSysMunmap(struct NaClAppThread  *natp,
                      void                  *start,
                      size_t                length) {
  int32_t   retval = -NACL_ABI_EINVAL;
  uintptr_t sysaddr;
  int       holding_app_lock = 0;
  size_t    alloc_rounded_length;

  NaClLog(3, "Entered NaClSysMunmap(0x%08"NACL_PRIxPTR", "
          "0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n",
          (uintptr_t) natp, (uintptr_t) start, length);

  NaClSysCommonThreadSyscallEnter(natp);

  if (!NaClIsAllocPageMultiple((uintptr_t) start)) {
    NaClLog(4, "start addr not allocation multiple\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  if (0 == length) {
    /*
     * linux mmap of zero length yields a failure, but osx does not, leading
     * to a NaClVmmapUpdate of zero pages, which should not occur.
     */
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  alloc_rounded_length = NaClRoundAllocPage(length);
  if (alloc_rounded_length != length) {
    length = alloc_rounded_length;
    NaClLog(LOG_WARNING,
            "munmap: rounded length to 0x%"NACL_PRIxS"\n", length);
  }
  sysaddr = NaClUserToSysAddrRange(natp->nap, (uintptr_t) start, length);
  if (kNaClBadAddress == sysaddr) {
    NaClLog(4, "region not user addresses\n");
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  NaClXMutexLock(&natp->nap->mu);

  while (0 != natp->nap->threads_launching) {
    NaClXCondVarWait(&natp->nap->cv, &natp->nap->mu);
  }
  natp->nap->vm_hole_may_exist = 1;

  holding_app_lock = 1;
  /*
   * NB: windows (or generic) version would use Munmap virtual
   * function from the backing NaClDesc object obtained by iterating
   * through the address map for the region, and those Munmap virtual
   * functions may return -NACL_ABI_E_MOVE_ADDRESS_SPACE.
   *
   * We should hold the application lock while doing this iteration
   * and unmapping, so that the address space is consistent for other
   * threads.
   */

  /*
   * User should be unable to unmap any executable pages.  We check here.
   */
  if (NaClSysCommonAddrRangeContainsExecutablePages_mu(natp->nap,
                                                       (uintptr_t) start,
                                                       length)) {
    NaClLog(2, "NaClSysMunmap: region contains executable pages\n");
    retval = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  /*
   * Overwrite current mapping with inaccessible, anonymous
   * zero-filled pages, which should be copy-on-write and thus
   * relatively cheap.  Do not open up an address space hole.
   */
  NaClLog(4,
          ("NaClSysMunmap: mmap(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS","
           " 0x%x, 0x%x, -1, 0)\n"),
          sysaddr, length, PROT_NONE,
          MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED);
  if (MAP_FAILED == mmap((void *) sysaddr,
                         length,
                         PROT_NONE,
                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
                         -1,
                         (off_t) 0)) {
    NaClLog(4, "mmap to put in anonymous memory failed, errno = %d\n", errno);
    retval = -NaClXlateErrno(errno);
    goto cleanup;
  }
  NaClVmmapUpdate(&natp->nap->mem_map,
                  (NaClSysToUser(natp->nap, (uintptr_t) sysaddr)
                   >> NACL_PAGESHIFT),
                  length >> NACL_PAGESHIFT,
                  0,  /* prot */
                  (struct NaClMemObj *) NULL,
                  1);  /* Delete mapping */
  retval = 0;
cleanup:
  if (holding_app_lock) {
    natp->nap->vm_hole_may_exist = 0;
    NaClXCondVarBroadcast(&natp->nap->cv);
    NaClXMutexUnlock(&natp->nap->mu);
  }
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}
예제 #19
0
int32_t NaClSysImcRecvmsg(struct NaClAppThread *natp,
                          int                  d,
                          uint32_t             nanimhp,
                          int                  flags) {
    struct NaClApp                        *nap = natp->nap;
    int32_t                               retval = -NACL_ABI_EINVAL;
    ssize_t                               ssize_retval;
    uintptr_t                             sysaddr;
    size_t                                i;
    struct NaClDesc                       *ndp;
    struct NaClAbiNaClImcMsgHdr           kern_nanimh;
    struct NaClAbiNaClImcMsgIoVec         kern_naiov[NACL_ABI_IMC_IOVEC_MAX];
    struct NaClImcMsgIoVec                kern_iov[NACL_ABI_IMC_IOVEC_MAX];
    int32_t                               usr_desc[NACL_ABI_IMC_USER_DESC_MAX];
    struct NaClImcTypedMsgHdr             recv_hdr;
    struct NaClDesc                       *new_desc[NACL_ABI_IMC_DESC_MAX];
    nacl_abi_size_t                       num_user_desc;
    struct NaClDesc                       *invalid_desc = NULL;

    NaClLog(3,
            ("Entered NaClSysImcRecvMsg(0x%08"NACL_PRIxPTR", %d,"
             " 0x%08"NACL_PRIx32")\n"),
            (uintptr_t) natp, d, nanimhp);

    /*
     * First, we validate user-supplied message headers before
     * allocating a receive buffer.
     */
    if (!NaClCopyInFromUser(nap, &kern_nanimh, nanimhp, sizeof kern_nanimh)) {
        NaClLog(4, "NaClImcMsgHdr not in user address space\n");
        retval = -NACL_ABI_EFAULT;
        goto cleanup_leave;
    }
    /* copy before validating */

    if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) {
        NaClLog(4, "gather/scatter array too large: %"NACL_PRIdNACL_SIZE"\n",
                kern_nanimh.iov_length);
        retval = -NACL_ABI_EINVAL;
        goto cleanup_leave;
    }
    if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) {
        NaClLog(4, "handle vector too long: %"NACL_PRIdNACL_SIZE"\n",
                kern_nanimh.desc_length);
        retval = -NACL_ABI_EINVAL;
        goto cleanup_leave;
    }

    if (kern_nanimh.iov_length > 0) {
        /*
         * Copy IOV array into kernel space.  Validate this snapshot and do
         * user->kernel address conversions on this snapshot.
         */
        if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov,
                                (kern_nanimh.iov_length * sizeof kern_naiov[0]))) {
            NaClLog(4, "gather/scatter array not in user address space\n");
            retval = -NACL_ABI_EFAULT;
            goto cleanup_leave;
        }
        /*
         * Convert every IOV base from user to system address, validate
         * range of bytes are really in user address space.
         */

        for (i = 0; i < kern_nanimh.iov_length; ++i) {
            sysaddr = NaClUserToSysAddrRange(nap,
                                             (uintptr_t) kern_naiov[i].base,
                                             kern_naiov[i].length);
            if (kNaClBadAddress == sysaddr) {
                NaClLog(4, "iov number %"NACL_PRIuS" not entirely in user space\n", i);
                retval = -NACL_ABI_EFAULT;
                goto cleanup_leave;
            }
            kern_iov[i].base = (void *) sysaddr;
            kern_iov[i].length = kern_naiov[i].length;
        }
    }

    if (kern_nanimh.desc_length > 0) {
        sysaddr = NaClUserToSysAddrRange(nap,
                                         (uintptr_t) kern_nanimh.descv,
                                         kern_nanimh.desc_length * sizeof(int32_t));
        if (kNaClBadAddress == sysaddr) {
            retval = -NACL_ABI_EFAULT;
            goto cleanup_leave;
        }
    }

    ndp = NaClAppGetDesc(nap, d);
    if (NULL == ndp) {
        NaClLog(4, "receiving descriptor invalid\n");
        retval = -NACL_ABI_EBADF;
        goto cleanup_leave;
    }

    recv_hdr.iov = kern_iov;
    recv_hdr.iov_length = kern_nanimh.iov_length;

    recv_hdr.ndescv = new_desc;
    recv_hdr.ndesc_length = NACL_ARRAY_SIZE(new_desc);
    memset(new_desc, 0, sizeof new_desc);

    recv_hdr.flags = 0;  /* just to make it obvious; IMC will clear it for us */

    /* lock user memory ranges in kern_naiov */
    for (i = 0; i < kern_nanimh.iov_length; ++i) {
        NaClVmIoWillStart(nap,
                          kern_naiov[i].base,
                          kern_naiov[i].base + kern_naiov[i].length - 1);
    }
    ssize_retval = NACL_VTBL(NaClDesc, ndp)->RecvMsg(ndp, &recv_hdr, flags);
    /* unlock user memory ranges in kern_naiov */
    for (i = 0; i < kern_nanimh.iov_length; ++i) {
        NaClVmIoHasEnded(nap,
                         kern_naiov[i].base,
                         kern_naiov[i].base + kern_naiov[i].length - 1);
    }
    /*
     * retval is number of user payload bytes received and excludes the
     * header bytes.
     */
    NaClLog(3, "NaClSysImcRecvMsg: RecvMsg() returned %"NACL_PRIdS"\n",
            ssize_retval);
    if (NaClSSizeIsNegErrno(&ssize_retval)) {
        /* negative error numbers all have valid 32-bit representations,
         * so this cast is safe. */
        retval = (int32_t) ssize_retval;
        goto cleanup;
    } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) {
        retval = -NACL_ABI_EOVERFLOW;
        goto cleanup;
    } else {
        /* cast is safe due to range check above */
        retval = (int32_t) ssize_retval;
    }

    /*
     * NB: recv_hdr.flags may contain NACL_ABI_MESSAGE_TRUNCATED and/or
     * NACL_ABI_HANDLES_TRUNCATED.
     */

    kern_nanimh.flags = recv_hdr.flags;

    /*
     * Now internalize the NaClHandles as NaClDesc objects.
     */
    num_user_desc = recv_hdr.ndesc_length;

    if (kern_nanimh.desc_length < num_user_desc) {
        kern_nanimh.flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED;
        for (i = kern_nanimh.desc_length; i < num_user_desc; ++i) {
            NaClDescUnref(new_desc[i]);
            new_desc[i] = NULL;
        }
        num_user_desc = kern_nanimh.desc_length;
    }

    invalid_desc = (struct NaClDesc *) NaClDescInvalidMake();
    /* prepare to write out to user space the descriptor numbers */
    for (i = 0; i < num_user_desc; ++i) {
        if (invalid_desc == new_desc[i]) {
            usr_desc[i] = kKnownInvalidDescNumber;
            NaClDescUnref(new_desc[i]);
        } else {
            usr_desc[i] = NaClAppSetDescAvail(nap, new_desc[i]);
        }
        new_desc[i] = NULL;
    }
    if (0 != num_user_desc &&
            !NaClCopyOutToUser(nap, (uintptr_t) kern_nanimh.descv, usr_desc,
                               num_user_desc * sizeof usr_desc[0])) {
        NaClLog(LOG_FATAL,
                ("NaClSysImcRecvMsg: in/out ptr (descv %"NACL_PRIxPTR
                 ") became invalid at copyout?\n"),
                (uintptr_t) kern_nanimh.descv);
    }

    kern_nanimh.desc_length = num_user_desc;
    if (!NaClCopyOutToUser(nap, nanimhp, &kern_nanimh, sizeof kern_nanimh)) {
        NaClLog(LOG_FATAL,
                "NaClSysImcRecvMsg: in/out ptr (iov) became"
                " invalid at copyout?\n");
    }
    /* copy out updated desc count, flags */
cleanup:
    if (retval < 0) {
        for (i = 0; i < NACL_ARRAY_SIZE(new_desc); ++i) {
            if (NULL != new_desc[i]) {
                NaClDescUnref(new_desc[i]);
                new_desc[i] = NULL;
            }
        }
    }
    NaClDescUnref(ndp);
    NaClDescSafeUnref(invalid_desc);
    NaClLog(3, "NaClSysImcRecvMsg: returning %d\n", retval);
cleanup_leave:
    return retval;
}
예제 #20
0
/*
 * This function converts addresses from user addresses to system
 * addresses, copying into kernel space as needed to avoid TOCvTOU
 * races, then invokes the descriptor's SendMsg() method.
 */
int32_t NaClSysImcSendmsg(struct NaClAppThread *natp,
                          int                  d,
                          uint32_t             nanimhp,
                          int                  flags) {
    struct NaClApp                *nap = natp->nap;
    int32_t                       retval = -NACL_ABI_EINVAL;
    ssize_t                       ssize_retval;
    uintptr_t                     sysaddr;
    /* copy of user-space data for validation */
    struct NaClAbiNaClImcMsgHdr   kern_nanimh;
    struct NaClAbiNaClImcMsgIoVec kern_naiov[NACL_ABI_IMC_IOVEC_MAX];
    struct NaClImcMsgIoVec        kern_iov[NACL_ABI_IMC_IOVEC_MAX];
    int32_t                       usr_desc[NACL_ABI_IMC_USER_DESC_MAX];
    /* kernel-side representatin of descriptors */
    struct NaClDesc               *kern_desc[NACL_ABI_IMC_USER_DESC_MAX];
    struct NaClImcTypedMsgHdr     kern_msg_hdr;
    struct NaClDesc               *ndp;
    size_t                        i;

    NaClLog(3,
            ("Entered NaClSysImcSendmsg(0x%08"NACL_PRIxPTR", %d,"
             " 0x%08"NACL_PRIx32", 0x%x)\n"),
            (uintptr_t) natp, d, nanimhp, flags);

    if (!NaClCopyInFromUser(nap, &kern_nanimh, nanimhp, sizeof kern_nanimh)) {
        NaClLog(4, "NaClImcMsgHdr not in user address space\n");
        retval = -NACL_ABI_EFAULT;
        goto cleanup_leave;
    }
    /* copy before validating contents */

    /*
     * Some of these checks duplicate checks that will be done in the
     * nrd xfer library, but it is better to check before doing the
     * address translation of memory/descriptor vectors if those vectors
     * might be too long.  Plus, we need to copy and validate vectors
     * for TOCvTOU race protection, and we must prevent overflows.  The
     * nrd xfer library's checks should never fire when called from the
     * service runtime, but the nrd xfer library might be called from
     * other code.
     */
    if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) {
        NaClLog(4, "gather/scatter array too large\n");
        retval = -NACL_ABI_EINVAL;
        goto cleanup_leave;
    }
    if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) {
        NaClLog(4, "handle vector too long\n");
        retval = -NACL_ABI_EINVAL;
        goto cleanup_leave;
    }

    if (kern_nanimh.iov_length > 0) {
        if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov,
                                (kern_nanimh.iov_length * sizeof kern_naiov[0]))) {
            NaClLog(4, "gather/scatter array not in user address space\n");
            retval = -NACL_ABI_EFAULT;
            goto cleanup_leave;
        }

        for (i = 0; i < kern_nanimh.iov_length; ++i) {
            sysaddr = NaClUserToSysAddrRange(nap,
                                             (uintptr_t) kern_naiov[i].base,
                                             kern_naiov[i].length);
            if (kNaClBadAddress == sysaddr) {
                retval = -NACL_ABI_EFAULT;
                goto cleanup_leave;
            }
            kern_iov[i].base = (void *) sysaddr;
            kern_iov[i].length = kern_naiov[i].length;
        }
    }

    ndp = NaClAppGetDesc(nap, d);
    if (NULL == ndp) {
        retval = -NACL_ABI_EBADF;
        goto cleanup_leave;
    }

    /*
     * make things easier for cleaup exit processing
     */
    memset(kern_desc, 0, sizeof kern_desc);
    retval = -NACL_ABI_EINVAL;

    kern_msg_hdr.iov = kern_iov;
    kern_msg_hdr.iov_length = kern_nanimh.iov_length;

    if (0 == kern_nanimh.desc_length) {
        kern_msg_hdr.ndescv = 0;
        kern_msg_hdr.ndesc_length = 0;
    } else {
        if (!NaClCopyInFromUser(nap, usr_desc, kern_nanimh.descv,
                                kern_nanimh.desc_length * sizeof usr_desc[0])) {
            retval = -NACL_ABI_EFAULT;
            goto cleanup;
        }

        for (i = 0; i < kern_nanimh.desc_length; ++i) {
            if (kKnownInvalidDescNumber == usr_desc[i]) {
                kern_desc[i] = (struct NaClDesc *) NaClDescInvalidMake();
            } else {
                /* NaCl modules are ILP32, so this works on ILP32 and LP64 systems */
                kern_desc[i] = NaClAppGetDesc(nap, usr_desc[i]);
            }
            if (NULL == kern_desc[i]) {
                retval = -NACL_ABI_EBADF;
                goto cleanup;
            }
        }
        kern_msg_hdr.ndescv = kern_desc;
        kern_msg_hdr.ndesc_length = kern_nanimh.desc_length;
    }
    kern_msg_hdr.flags = kern_nanimh.flags;

    /* lock user memory ranges in kern_naiov */
    for (i = 0; i < kern_nanimh.iov_length; ++i) {
        NaClVmIoWillStart(nap,
                          kern_naiov[i].base,
                          kern_naiov[i].base + kern_naiov[i].length - 1);
    }
    ssize_retval = NACL_VTBL(NaClDesc, ndp)->SendMsg(ndp, &kern_msg_hdr, flags);
    /* unlock user memory ranges in kern_naiov */
    for (i = 0; i < kern_nanimh.iov_length; ++i) {
        NaClVmIoHasEnded(nap,
                         kern_naiov[i].base,
                         kern_naiov[i].base + kern_naiov[i].length - 1);
    }

    if (NaClSSizeIsNegErrno(&ssize_retval)) {
        /*
         * NaClWouldBlock uses TSD (for both the errno-based and
         * GetLastError()-based implementations), so this is threadsafe.
         */
        if (0 != (flags & NACL_DONT_WAIT) && NaClWouldBlock()) {
            retval = -NACL_ABI_EAGAIN;
        } else if (-NACL_ABI_EMSGSIZE == ssize_retval) {
            /*
             * Allow the caller to handle the case when imc_sendmsg fails because
             * the message is too large for the system to send in one piece.
             */
            retval = -NACL_ABI_EMSGSIZE;
        } else {
            /*
             * TODO(bsy): the else case is some mysterious internal error.
             * Should we destroy the ndp or otherwise mark it as bad?  Was
             * the failure atomic?  Did it send some partial data?  Linux
             * implementation appears okay.
             */
            retval = -NACL_ABI_EIO;
        }
    } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) {
        retval = -NACL_ABI_EOVERFLOW;
    } else {
        /* cast is safe due to range checks above */
        retval = (int32_t)ssize_retval;
    }

cleanup:
    for (i = 0; i < kern_nanimh.desc_length; ++i) {
        if (NULL != kern_desc[i]) {
            NaClDescUnref(kern_desc[i]);
            kern_desc[i] = NULL;
        }
    }
    NaClDescUnref(ndp);
cleanup_leave:
    NaClLog(3, "NaClSysImcSendmsg: returning %d\n", retval);
    return retval;
}
예제 #21
0
int32_t NaClSysWrite(struct NaClAppThread *natp,
                     int                  d,
                     uint32_t             buf,
                     uint32_t             count) {
  struct NaClApp  *nap = natp->nap;
  int32_t         retval = -NACL_ABI_EINVAL;
  ssize_t         write_result = -NACL_ABI_EINVAL;
  uintptr_t       sysaddr;
  char const      *ellipsis = "";
  struct NaClDesc *ndp;
  size_t          log_bytes;

  NaClLog(3,
          "Entered NaClSysWrite(0x%08"NACL_PRIxPTR", "
          "%d, 0x%08"NACL_PRIx32", "
          "%"NACL_PRIu32"[0x%"NACL_PRIx32"])\n",
          (uintptr_t) natp, d, buf, count, count);

  ndp = NaClAppGetDesc(nap, d);
  NaClLog(4, " ndp = %"NACL_PRIxPTR"\n", (uintptr_t) ndp);
  if (NULL == ndp) {
    retval = -NACL_ABI_EBADF;
    goto cleanup;
  }

  sysaddr = NaClUserToSysAddrRange(nap, buf, count);
  if (kNaClBadAddress == sysaddr) {
    NaClDescUnref(ndp);
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }

  log_bytes = count;
  if (log_bytes > INT32_MAX) {
    log_bytes = INT32_MAX;
    ellipsis = "...";
  }
  if (NaClLogGetVerbosity() < 10) {
    if (log_bytes > kdefault_io_buffer_bytes_to_log) {
      log_bytes = kdefault_io_buffer_bytes_to_log;
      ellipsis = "...";
    }
  }
  NaClLog(8, "In NaClSysWrite(%d, %.*s%s, %"NACL_PRIu32")\n",
          d, (int) log_bytes, (char *) sysaddr, ellipsis, count);

  /*
   * The maximum length for read and write is INT32_MAX--anything larger and
   * the return value would overflow. Passing larger values isn't an error--
   * we'll just clamp the request size if it's too large.
   */
  if (count > INT32_MAX) {
    count = INT32_MAX;
  }

  NaClVmIoWillStart(nap, buf, buf + count - 1);
  write_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
                  Write)(ndp, (void *) sysaddr, count);
  NaClVmIoHasEnded(nap, buf, buf + count - 1);

  NaClDescUnref(ndp);

  /* This cast is safe because we clamped count above.*/
  retval = (int32_t) write_result;

cleanup:
  return retval;
}
예제 #22
0
int32_t NaClSysNanosleep(struct NaClAppThread     *natp,
                         struct nacl_abi_timespec *req,
                         struct nacl_abi_timespec *rem) {
  uintptr_t                 sys_req;
  uintptr_t                 sys_rem;
  struct nacl_abi_timespec  t_sleep;
  struct nacl_abi_timespec  t_rem;
  struct nacl_abi_timespec  *remptr;
  int                       retval = -NACL_ABI_EINVAL;

  NaClLog(3,
          ("Entered NaClSysNanosleep(0x%08"NACL_PRIxPTR
           ", 0x%08"NACL_PRIxPTR", 0x%08"NACL_PRIxPTR"x)\n"),
          (uintptr_t) natp, (uintptr_t) req, (uintptr_t) rem);

  NaClSysCommonThreadSyscallEnter(natp);

  sys_req = NaClUserToSysAddrRange(natp->nap, (uintptr_t) req, sizeof *req);
  if (kNaClBadAddress == sys_req) {
    retval = -NACL_ABI_EFAULT;
    goto cleanup;
  }
  if (NULL == rem) {
    sys_rem = 0;
  } else {
    sys_rem = NaClUserToSysAddrRange(natp->nap, (uintptr_t) rem, sizeof *rem);
    if (kNaClBadAddress == sys_rem) {
      retval = -NACL_ABI_EFAULT;
      goto cleanup;
    }
  }
  /*
   * post-condition: if sys_rem is non-NULL, it's safe to write to
   * (modulo thread races) and the user code wants the remaining time
   * written there.
   */

  NaClLog(4, " copying timespec from %08"NACL_PRIxPTR"x\n", sys_req);
  /* copy once */
  t_sleep = *(struct nacl_abi_timespec *) sys_req;

  remptr = (0 == sys_rem) ? NULL : &t_rem;
  /* NULL != remptr \equiv NULL != rem */

  /*
   * We assume that we do not need to normalize the time request values.
   *
   * If bogus values can cause the underlying OS to get into trouble,
   * then we need more checking here.
   */
  NaClLog(4, "NaClSysNanosleep(time = %"NACL_PRId64".%09"NACL_PRId64" S)\n",
          (int64_t) t_sleep.tv_sec, (int64_t) t_sleep.tv_nsec);
  retval = NaClNanosleep(&t_sleep, remptr);
  NaClLog(4, "NaClNanosleep returned %d\n", retval);

  if (-EINTR == retval && NULL != rem) {
    /* definitely different types, and shape may actually differ too. */
    rem = (struct nacl_abi_timespec *) sys_rem;
    rem->tv_sec = remptr->tv_sec;
    rem->tv_nsec = remptr->tv_nsec;
  }

cleanup:
  NaClLog(4, "nanosleep done.\n");
  NaClSysCommonThreadSyscallLeave(natp);
  return retval;
}