Ejemplo n.º 1
0
NaClErrorCode NaClAllocateSpaceAslr(void **mem, size_t addrsp_size,
                                    enum NaClAslrMode aslr_mode) {
  /* 40G guard on each side */
  size_t        mem_sz = (NACL_ADDRSPACE_LOWER_GUARD_SIZE + FOURGIG +
                          NACL_ADDRSPACE_UPPER_GUARD_SIZE);
  size_t        log_align = ALIGN_BITS;
  void          *mem_ptr;
#if NACL_LINUX
  struct rlimit rlim;
#endif

  NaClLog(4, "NaClAllocateSpace(*, 0x%016"NACL_PRIxS" bytes).\n",
          addrsp_size);

  CHECK(addrsp_size == FOURGIG);

  if (NACL_X86_64_ZERO_BASED_SANDBOX) {
    mem_sz = 11 * FOURGIG;
    if (getenv("NACL_ENABLE_INSECURE_ZERO_BASED_SANDBOX") != NULL) {
      /*
       * For the zero-based 64-bit sandbox, we want to reserve 44GB of address
       * space: 4GB for the program plus 40GB of guard pages.  Due to a binutils
       * bug (see http://sourceware.org/bugzilla/show_bug.cgi?id=13400), the
       * amount of address space that the linker can pre-reserve is capped
       * at 4GB. For proper reservation, GNU ld version 2.22 or higher
       * needs to be used.
       *
       * Without the bug fix, trying to reserve 44GB will result in
       * pre-reserving the entire capped space of 4GB.  This tricks the run-time
       * into thinking that we can mmap up to 44GB.  This is unsafe as it can
       * overwrite the run-time program itself and/or other programs.
       *
       * For now, we allow a 4GB address space as a proof-of-concept insecure
       * sandboxing model.
       *
       * TODO(arbenson): remove this if block once the binutils bug is fixed
       */
      mem_sz = FOURGIG;
    }

    NaClAddrSpaceBeforeAlloc(mem_sz);
    if (NaClFindPrereservedSandboxMemory(mem, mem_sz)) {
      int result;
      void *tmp_mem = (void *) NACL_TRAMPOLINE_START;
      CHECK(*mem == 0);
      mem_sz -= NACL_TRAMPOLINE_START;
      result = NaClPageAllocAtAddr(&tmp_mem, mem_sz);
      if (0 != result) {
        NaClLog(2,
                "NaClAllocateSpace: NaClPageAlloc 0x%08"NACL_PRIxPTR
                " failed\n",
                (uintptr_t) *mem);
        return LOAD_NO_MEMORY;
      }
      NaClLog(4, "NaClAllocateSpace: %"NACL_PRIxPTR", %"NACL_PRIxS"\n",
              (uintptr_t) *mem,
              mem_sz);
      return LOAD_OK;
    }
    NaClLog(LOG_ERROR, "Failed to find prereserved memory\n");
    return LOAD_NO_MEMORY;
  }

  NaClAddrSpaceBeforeAlloc(mem_sz);

  errno = 0;
  mem_ptr = NaClAllocatePow2AlignedMemory(mem_sz, log_align, aslr_mode);
  if (NULL == mem_ptr) {
    if (0 != errno) {
      perror("NaClAllocatePow2AlignedMemory");
    }
    NaClLog(LOG_WARNING, "Memory allocation failed\n");
#if NACL_LINUX
    /*
     * Check with getrlimit whether RLIMIT_AS was likely to be the
     * problem with an allocation failure.  If so, generate a log
     * message.  Since this is a debugging aid and we don't know about
     * the memory requirement of the code that is embedding native
     * client, there is some slop.
     */
    if (0 != getrlimit(RLIMIT_AS, &rlim)) {
      perror("NaClAllocatePow2AlignedMemory::getrlimit");
    } else {
      if (rlim.rlim_cur < mem_sz) {
        /*
         * Developer hint/warning; this will show up in the crash log
         * and must be brief.
         */
        NaClLog(LOG_INFO,
                "Please run \"ulimit -v unlimited\" (bash)"
                " or \"limit vmemoryuse unlimited\" (tcsh)\n");
        NaClLog(LOG_INFO,
                "and restart the app.  NaCl requires at least %"NACL_PRIdS""
                " kilobytes of virtual\n",
                mem_sz / 1024);
        NaClLog(LOG_INFO,
                "address space. NB: Raising the hard limit requires"
                " root access.\n");
      }
    }
#elif NACL_OSX
    /*
     * In OSX, RLIMIT_AS and RLIMIT_RSS have the same value; i.e., OSX
     * conflates the notion of virtual address space used with the
     * resident set size.  In particular, the way NaCl uses virtual
     * address space is to allocate guard pages so that certain
     * addressing modes will not need to be explicitly masked; the
     * guard pages are allocated but inaccessible, never faulted so
     * not even zero-filled on demand, so they should not count
     * against the resident set -- which is supposed to be only the
     * frequently accessed pages in the first place.
     */
#endif

    return LOAD_NO_MEMORY;
  }
  /*
   * The module lives in the middle FOURGIG of the allocated region --
   * we skip over an initial 40G guard.
   */
  *mem = (void *) (((char *) mem_ptr) + NACL_ADDRSPACE_LOWER_GUARD_SIZE);
  NaClLog(4,
          "NaClAllocateSpace: addr space at 0x%016"NACL_PRIxPTR"\n",
          (uintptr_t) *mem);

  return LOAD_OK;
}
Ejemplo n.º 2
0
void NaClDescDtorNotImplemented(struct NaClDesc  *vself) {
    UNREFERENCED_PARAMETER(vself);

    NaClLog(LOG_FATAL, "Must implement a destructor!\n");
}
Ejemplo n.º 3
0
void NaClXMutexCtor(struct NaClMutex *mp) {
  if (!NaClMutexCtor(mp)) {
    NaClLog(LOG_FATAL, "NaClXMutexCtor failed\n");
  }
}
Ejemplo n.º 4
0
void NaClLogAddressSpaceLayout(struct NaClApp *nap) {
  NaClLog(2, "NaClApp addr space layout:\n");
  NaClLog(2, "nap->static_text_end    = 0x%016"NACL_PRIxPTR"\n",
          nap->static_text_end);
  NaClLog(2, "nap->dynamic_text_start = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_start);
  NaClLog(2, "nap->dynamic_text_end   = 0x%016"NACL_PRIxPTR"\n",
          nap->dynamic_text_end);
  NaClLog(2, "nap->rodata_start       = 0x%016"NACL_PRIxPTR"\n",
          nap->rodata_start);
  NaClLog(2, "nap->data_start         = 0x%016"NACL_PRIxPTR"\n",
          nap->data_start);
  NaClLog(2, "nap->data_end           = 0x%016"NACL_PRIxPTR"\n",
          nap->data_end);
  NaClLog(2, "nap->break_addr         = 0x%016"NACL_PRIxPTR"\n",
          nap->break_addr);
  NaClLog(2, "nap->initial_entry_pt   = 0x%016"NACL_PRIxPTR"\n",
          nap->initial_entry_pt);
  NaClLog(2, "nap->user_entry_pt      = 0x%016"NACL_PRIxPTR"\n",
          nap->user_entry_pt);
  NaClLog(2, "nap->bundle_size        = 0x%x\n", nap->bundle_size);
}
Ejemplo n.º 5
0
int NaClAppLaunchServiceThreads(struct NaClApp *nap) {
  struct NaClManifestProxy                    *manifest_proxy = NULL;
  struct NaClKernelService                    *kernel_service = NULL;
  int                                         rv = 0;
  enum NaClReverseChannelInitializationState  init_state;

  NaClNameServiceLaunch(nap->name_service);

  kernel_service = (struct NaClKernelService *) malloc(sizeof *kernel_service);
  if (NULL == kernel_service) {
    NaClLog(LOG_ERROR,
            "NaClAppLaunchServiceThreads: No memory for kern service\n");
    goto done;
  }

  if (!NaClKernelServiceCtor(kernel_service,
                             NaClAddrSpSquattingThreadIfFactoryFunction,
                             (void *) nap,
                             nap)) {
    NaClLog(LOG_ERROR,
            "NaClAppLaunchServiceThreads: KernServiceCtor failed\n");
    free(kernel_service);
    kernel_service = NULL;
    goto done;
  }

  if (!NaClSimpleServiceStartServiceThread((struct NaClSimpleService *)
                                           kernel_service)) {
    NaClLog(LOG_ERROR,
            "NaClAppLaunchServiceThreads: KernService start service failed\n");
    goto done;
  }
  /*
   * NB: StartServiceThread grabbed another reference to kernel_service,
   * used by the service thread.  Closing the connection capability
   * should cause the service thread to shut down and in turn release
   * that reference.
   */

  /*
   * The locking here isn't really needed.  Here is why:
   * reverse_channel_initialized is written in reverse_setup RPC
   * handler of the secure command channel RPC handler thread.  and
   * the RPC order requires that the plugin invoke reverse_setup prior
   * to invoking start_module, so there will have been plenty of other
   * synchronization operations to force cache coherency
   * (module_may_start, for example, is set in the cache of the secure
   * channel RPC handler (in start_module) and read by the main
   * thread, and the synchronization operations needed to propagate
   * its value properly suffices to propagate
   * reverse_channel_initialized as well).  However, reading it while
   * holding a lock is more obviously correct for tools like tsan.
   * Due to the RPC order, it is impossible for
   * reverse_channel_initialized to get set after the unlock and
   * before the if test.
   */
  NaClXMutexLock(&nap->mu);
  /*
   * If no reverse_setup RPC was made, then we do not set up a
   * manifest proxy.  Otherwise, we make sure that the reverse channel
   * setup is done, so that the application can actually use
   * reverse-channel-based services such as the manifest proxy.
   */
  if (NACL_REVERSE_CHANNEL_UNINITIALIZED !=
      (init_state = nap->reverse_channel_initialization_state)) {
    while (NACL_REVERSE_CHANNEL_INITIALIZED !=
      (init_state = nap->reverse_channel_initialization_state)) {
      NaClXCondVarWait(&nap->cv, &nap->mu);
    }
  }
  NaClXMutexUnlock(&nap->mu);
  if (NACL_REVERSE_CHANNEL_INITIALIZED != init_state) {
    NaClLog(3,
            ("NaClAppLaunchServiceThreads: no reverse channel;"
             " launched kernel services.\n"));
    NaClLog(3,
            ("NaClAppLaunchServiceThreads: no reverse channel;"
             " NOT launching manifest proxy.\n"));
    nap->kernel_service = kernel_service;
    kernel_service = NULL;

    rv = 1;
    goto done;
  }

  /*
   * Allocate/construct the manifest proxy without grabbing global
   * locks.
   */
  NaClLog(3, "NaClAppLaunchServiceThreads: launching manifest proxy\n");

  /*
   * ReverseClientSetup RPC should be done via the command channel
   * prior to the load_module / start_module RPCs, and
   *  occurs after that, so checking
   * nap->reverse_client suffices for determining whether the proxy is
   * exporting reverse services.
   */
  manifest_proxy = (struct NaClManifestProxy *) malloc(sizeof *manifest_proxy);
  if (NULL == manifest_proxy) {
    NaClLog(LOG_ERROR, "No memory for manifest proxy\n");
    NaClDescUnref(kernel_service->base.bound_and_cap[1]);
    goto done;
  }
  if (!NaClManifestProxyCtor(manifest_proxy,
                             NaClAddrSpSquattingThreadIfFactoryFunction,
                             (void *) nap,
                             nap)) {
    NaClLog(LOG_ERROR, "ManifestProxyCtor failed\n");
    /* do not leave a non-NULL pointer to a not-fully constructed object */
    free(manifest_proxy);
    manifest_proxy = NULL;
    NaClDescUnref(kernel_service->base.bound_and_cap[1]);
    goto done;
  }

  /*
   * NaClSimpleServiceStartServiceThread requires the nap->mu lock.
   */
  if (!NaClSimpleServiceStartServiceThread((struct NaClSimpleService *)
                                           manifest_proxy)) {
    NaClLog(LOG_ERROR, "ManifestProxy start service failed\n");
    NaClDescUnref(kernel_service->base.bound_and_cap[1]);
    goto done;
  }

  NaClXMutexLock(&nap->mu);
  CHECK(NULL == nap->manifest_proxy);
  CHECK(NULL == nap->kernel_service);

  nap->manifest_proxy = manifest_proxy;
  manifest_proxy = NULL;
  nap->kernel_service = kernel_service;
  kernel_service = NULL;
  NaClXMutexUnlock(&nap->mu);
  rv = 1;

done:
  NaClXMutexLock(&nap->mu);
  if (NULL != nap->manifest_proxy) {
    NaClLog(3,
            ("NaClAppLaunchServiceThreads: adding manifest proxy to"
             " name service\n"));
    (*NACL_VTBL(NaClNameService, nap->name_service)->
     CreateDescEntry)(nap->name_service,
                      "ManifestNameService", NACL_ABI_O_RDWR,
                      NaClDescRef(nap->manifest_proxy->base.bound_and_cap[1]));
  }
  if (NULL != nap->kernel_service) {
    NaClLog(3,
            ("NaClAppLaunchServiceThreads: adding kernel service to"
             " name service\n"));
    (*NACL_VTBL(NaClNameService, nap->name_service)->
     CreateDescEntry)(nap->name_service,
                      "KernelService", NACL_ABI_O_RDWR,
                      NaClDescRef(nap->kernel_service->base.bound_and_cap[1]));
  }

  NaClXMutexUnlock(&nap->mu);

  /*
   * Single exit path.
   *
   * Error cleanup invariant.  No service thread should be running
   * (modulo asynchronous shutdown).  Automatic variables refer to
   * fully constructed objects if non-NULL, and when ownership is
   * transferred to the NaClApp object the corresponding automatic
   * variable is set to NULL.
   */
  NaClRefCountSafeUnref((struct NaClRefCount *) manifest_proxy);
  NaClRefCountSafeUnref((struct NaClRefCount *) kernel_service);
  return rv;
}
Ejemplo n.º 6
0
static void nacl_flip_buffers() {
  NaClLog(LOG_INFO, "nacl_flip_buffers\n");
  VideoBuffers.current_buffer ^= 1;
}
Ejemplo n.º 7
0
static void nacl_getsize(int* w, int* h) {
  NaClLog(LOG_INFO, "nacl_getsize %d %d\n", GetWidth(), GetHeight());
  *w = GetWidth();
  *h = GetHeight();
}
Ejemplo n.º 8
0
int main(int ac, char **av) {
  int                         opt;
  char const                  *message = NULL;
  char                        *conn_addr = NULL;
  ssize_t                     rv;
  struct NaClDesc             *channel;
  struct NaClNrdXferEffector  eff;
  struct NaClDescEffector     *effp;
  struct NaClDesc             *pair[2];
  struct NaClSocketAddress    nsa;
  struct NaClDescConnCap      ndcc;
  struct NaClImcTypedMsgHdr   msg_hdr;
  struct NaClImcMsgIoVec      iov[1];
  struct NaClDesc             *desc_buffer[NACL_ABI_IMC_USER_DESC_MAX];
  char                        data_buffer[4096];
  size_t                      i;
  char                        *transfer_file = NULL;

  printf("Hello world\n");

  NaClNrdAllModulesInit();

  printf("Learning to walk... (parsing command line)\n");

  while (EOF != (opt = getopt(ac, av, "c:m:st:v"))) {
    switch (opt) {
      case 'c':
        conn_addr = optarg;
        break;
      case 'm':
        message = optarg;
        break;
      case 's':
        server = 1;
        break;
      case 't':
        transfer_file = optarg;
        break;
      case 'v':
        NaClLogIncrVerbosity();
        break;
      default:
        fprintf(stderr,
                "Usage: nrd_xfer_test [-sv] [-c connect-addr] [-m message]\n"
                "         [-t transfer_file_name]\n"
                "\n");
        fprintf(stderr,
                 "    -s run in server mode (prints NaCl sock addr)\n"
                 "    -v increases verbosity in the NRD xfer library\n"
                 "    -c run in client mode, with server NaCl sock addr as\n"
                 "       parameter\n"
                 "    -m message to be sent to peer (client sends message\n"
                 "       as payload data in IMC datagram; and if -t was\n"
                 "       specifed in the client to transfer a file\n"
                 "       descriptor to the server, the server will write its\n"
                 "       message into the file via the transferred\n"
                 "       descriptor\n");
        return 1;
    }
  }

  printf("Learning to talk... (setting up channels)\n");

  if (NULL == message) {
    if (server) {
      message = "\"Hello world!\", from server\n";
    } else {
      message = "\"Goodbye cruel world!\", from client\n";
    }
  }

  if (0 != (rv = NaClCommonDescMakeBoundSock(pair))) {
    fprintf(stderr, "make bound sock returned %"NACL_PRIdS"\n", rv);
    return 2;
  }

  if (!NaClNrdXferEffectorCtor(&eff, pair[0])) {
    fprintf(stderr, "EffectorCtor failed\n");
    return 3;
  }
  effp = (struct NaClDescEffector *) &eff;
  memset(desc_buffer, 0, sizeof desc_buffer);
  memset(data_buffer, 0, sizeof data_buffer);

  if (server) {
    /*
     * print out our sockaddr, accept a connection, then receive a message,
     * and print it out
     */

    /* not opaque type */
    printf("Server socket address:\n%.*s\n",
           NACL_PATH_MAX,
           ((struct NaClDescConnCap *) pair[1])->cap.path);
    fflush(stdout);

    if (0 != (rv = (*pair[0]->vtbl->AcceptConn)(pair[0], effp))) {
      fprintf(stderr, "AcceptConn returned %"NACL_PRIdS"\n", rv);
      return 4;
    }

    channel = NaClNrdXferEffectorTakeDesc(&eff);
    if (NULL == channel) {
      fprintf(stderr, "Could not take descriptor from accept\n");
      return 5;
    }

    iov[0].base = data_buffer;
    iov[0].length = sizeof data_buffer;

    msg_hdr.iov = iov;
    msg_hdr.iov_length = NACL_ARRAY_SIZE(iov);
    msg_hdr.ndescv = desc_buffer;
    msg_hdr.ndesc_length = NACL_ARRAY_SIZE(desc_buffer);

    rv = NaClImcRecvTypedMessage(channel, effp, &msg_hdr, 0);

    printf("Receive returned %"NACL_PRIdS"\n", rv);

    if (!NaClIsNegErrno(rv)) {
      /* Sanity check: make sure the return value is within range.
       * This is a panic check because NaClImcRecvTypedMessage should
       * never return more than the amount of data we asked for, and
       * that should never be more than INT_MAX.
       */
      if(((size_t)rv > sizeof data_buffer) || (rv > INT_MAX)) {
        NaClLog(LOG_FATAL, "Buffer overflow in NaClImcRecvTypedMessage. "
                "Requested %"NACL_PRIdS" bytes, received %"NACL_PRIdS".",
                sizeof data_buffer, rv);
      }
      /* Casting rv to int here because otherwise the pedantic Mac compiler
       * will complain. Cast is safe due to the range check above.
       */
      printf("Data bytes: %.*s\n", (int)rv, data_buffer);
      printf("Got %"NACL_PRIdNACL_SIZE" NaCl descriptors\n",
             msg_hdr.ndesc_length);

      for (i = 0; i < msg_hdr.ndesc_length; ++i) {
        struct NaClDesc *ndp;
        size_t msglen = strlen(message);
        ssize_t write_result;
        /*
         * TODO(bsy): a bit gross; we should expose type tags and RTTI
         * in a better way, e.g, downcast functions.  (Though exposing
         * type tags allows users to use a switch statement on the
         * type tag, rather than linearly trying to downcast to all
         * subclasses.)
         */
        ndp = msg_hdr.ndescv[i];
        printf(" type %d\n", ndp->vtbl->typeTag);

        write_result = (*ndp->vtbl->Write)(ndp,
                                           effp,
                                           (void *) message,
                                           msglen);
        if (-1 == write_result || msglen != (size_t) write_result) {
          printf("Write failed: got %"NACL_PRIdS", expected %"NACL_PRIuS"\n",
                 write_result, msglen);
        }

        NaClDescUnref(ndp);
      }
    }

    NaClDescUnref(channel);
    NaClDescUnref(pair[0]);
    NaClDescUnref(pair[1]);

  } else {
    if (NULL == conn_addr) {
      fprintf(stderr,
              "Client needs server socket address to which to connect\n");
      return 100;
    }

    memset(&nsa, 0, sizeof nsa);
    strncpy(nsa.path, conn_addr, sizeof nsa.path);  /* not nec'y NUL term'd */

    if (!NaClDescConnCapCtor(&ndcc, &nsa)) {
      fprintf(stderr,
              "Client conn cap initialization failed\n");
      return 101;
    }

    rv = (*ndcc.base.vtbl->ConnectAddr)((struct NaClDesc *) &ndcc, effp);

    printf("Connect returned %"NACL_PRIdS"\n", rv);

    if (0 != rv) {
      fprintf(stderr, "Client could not connect\n");
      return 102;
    }

    channel = NaClNrdXferEffectorTakeDesc(&eff);
    if (NULL == channel) {
      fprintf(stderr, "Could not take descriptor from connect\n");
      return 103;
    }

    strncpy(data_buffer, message, sizeof data_buffer);
    iov[0].base = data_buffer;
    iov[0].length = strlen(data_buffer);

    msg_hdr.iov = iov;
    msg_hdr.iov_length = NACL_ARRAY_SIZE(iov);
    msg_hdr.ndesc_length = 0;
    msg_hdr.ndescv = desc_buffer;

    if (NULL != transfer_file) {
      int                 xfer_fd;
      struct NaClHostDesc *nhdp = malloc(sizeof *nhdp);

      xfer_fd = OPEN(transfer_file, O_CREAT| O_WRONLY | O_TRUNC, 0777);
      if (-1 == xfer_fd) {
        fprintf(stderr, "Could not open file \"%s\" to transfer descriptor.\n",
                transfer_file);
        return 104;
      }
      NaClHostDescPosixTake(nhdp, xfer_fd, O_RDWR);
      desc_buffer[0] = (struct NaClDesc *) NaClDescIoDescMake(nhdp);
      msg_hdr.ndesc_length = 1;
    }

    rv = NaClImcSendTypedMessage(channel, effp, &msg_hdr, 0);

    if (NULL != desc_buffer[0]) {
      NaClDescUnref(desc_buffer[0]);
      desc_buffer[0] = NULL;
    }

    printf("Send returned %"NACL_PRIdS"\n", rv);
  }

  (*effp->vtbl->Dtor)(effp);

  NaClNrdAllModulesFini();
  return 0;
}
Ejemplo n.º 9
0
void hello_world(void) {
  NaClLog(LOG_INFO, "Hello, World!\n");
}
Ejemplo n.º 10
0
/* set *out_desc to struct NaClDescIo * output */
int NaClDescIoInternalize(struct NaClDesc               **out_desc,
                          struct NaClDescXferState      *xfer,
                          struct NaClDescQuotaInterface *quota_interface) {
  int                   rv;
  NaClHandle            h;
  int                   d;
  int                   flags;
  struct NaClHostDesc   *nhdp;
  struct NaClDescIoDesc *ndidp;

  UNREFERENCED_PARAMETER(quota_interface);
  rv = -NACL_ABI_EIO;  /* catch-all */
  h = NACL_INVALID_HANDLE;
  nhdp = NULL;
  ndidp = NULL;

  nhdp = malloc(sizeof *nhdp);
  if (NULL == nhdp) {
    rv = -NACL_ABI_ENOMEM;
    goto cleanup;
  }
  ndidp = malloc(sizeof *ndidp);
  if (!ndidp) {
    rv = -NACL_ABI_ENOMEM;
    goto cleanup;
  }
  if (!NaClDescInternalizeCtor((struct NaClDesc *) ndidp, xfer)) {
    rv = -NACL_ABI_ENOMEM;
    goto cleanup;
  }
  if (xfer->next_handle == xfer->handle_buffer_end ||
      xfer->next_byte + sizeof ndidp->hd->flags > xfer->byte_buffer_end) {
    rv = -NACL_ABI_EIO;
    goto cleanup_ndidp_dtor;
  }

  NACL_COMPILE_TIME_ASSERT(sizeof flags == sizeof(ndidp->hd->flags));
  memcpy(&flags, xfer->next_byte, sizeof flags);
  xfer->next_byte += sizeof flags;

  h = *xfer->next_handle;
  *xfer->next_handle++ = NACL_INVALID_HANDLE;
#if NACL_WINDOWS
  if (-1 == (d = _open_osfhandle((intptr_t) h, _O_RDWR | _O_BINARY))) {
    rv = -NACL_ABI_EIO;
    goto cleanup_ndidp_dtor;
  }
#else
  d = h;
#endif
  /*
   * We mark it as read/write, but don't really know for sure until we
   * try to make those syscalls (in which case we'd get EBADF).
   */
  if ((rv = NaClHostDescPosixTake(nhdp, d, flags)) < 0) {
    goto cleanup_ndidp_dtor;
  }
  h = NACL_INVALID_HANDLE;  /* nhdp took ownership of h */

  if (!NaClDescIoDescSubclassCtor(ndidp, nhdp)) {
    rv = -NACL_ABI_ENOMEM;
    goto cleanup_nhdp_dtor;
  }
  /*
   * ndidp took ownership of nhdp, now give ownership of ndidp to caller.
   */
  *out_desc = (struct NaClDesc *) ndidp;
  rv = 0;
 cleanup_nhdp_dtor:
  if (rv < 0) {
    if (0 != NaClHostDescClose(nhdp)) {
      NaClLog(LOG_FATAL, "NaClDescIoInternalize: NaClHostDescClose failed\n");
    }
  }
 cleanup_ndidp_dtor:
  if (rv < 0) {
    NaClDescSafeUnref((struct NaClDesc *) ndidp);
    ndidp = NULL;
  }
 cleanup:
  if (rv < 0) {
    free(nhdp);
    free(ndidp);
    if (NACL_INVALID_HANDLE != h) {
      (void) NaClClose(h);
    }
  }
  return rv;
}
Ejemplo n.º 11
0
static INLINE void EmitObsoleteValidatorWarning(void) {
  NaClLog(LOG_WARNING, "USING OBSOLETE NON-DFA-BASED VALIDATOR!\n");
}
static int LoadApp(struct NaClApp *nap, struct NaClChromeMainArgs *args) {
  NaClErrorCode errcode = LOAD_OK;
  int has_bootstrap_channel = args->imc_bootstrap_handle != NACL_INVALID_HANDLE;

  CHECK(g_initialized);

  /* Allow or disallow dyncode API based on args. */
  nap->enable_dyncode_syscalls = args->enable_dyncode_syscalls;
  nap->initial_nexe_max_code_bytes = args->initial_nexe_max_code_bytes;
  nap->pnacl_mode = args->pnacl_mode;

#if NACL_LINUX
  g_prereserved_sandbox_size = args->prereserved_sandbox_size;
#endif
#if NACL_LINUX || NACL_OSX
  /*
   * Overwrite value of sc_nprocessors_onln set in NaClAppCtor.  In
   * the Chrome embedding, the outer sandbox was already enabled when
   * the NaClApp Ctor was invoked, so a bogus value was written in
   * sc_nprocessors_onln.
   */
  if (-1 != args->number_of_cores) {
    nap->sc_nprocessors_onln = args->number_of_cores;
  }
#endif

  if (args->create_memory_object_func != NULL)
    NaClSetCreateMemoryObjectFunc(args->create_memory_object_func);

  /* Inject the validation caching interface, if it exists. */
  nap->validation_cache = args->validation_cache;

#if NACL_WINDOWS
  if (args->broker_duplicate_handle_func != NULL)
    NaClSetBrokerDuplicateHandleFunc(args->broker_duplicate_handle_func);
#endif

  NaClAppInitialDescriptorHookup(nap);

  /*
   * NACL_SERVICE_PORT_DESCRIPTOR and NACL_SERVICE_ADDRESS_DESCRIPTOR
   * are 3 and 4.
   */

  /*
   * in order to report load error to the browser plugin through the
   * secure command channel, we do not immediate jump to cleanup code
   * on error.  rather, we continue processing (assuming earlier
   * errors do not make it inappropriate) until the secure command
   * channel is set up, and then bail out.
   */

  /*
   * Ensure this operating system platform is supported.
   */
  if (args->skip_qualification) {
    fprintf(stderr, "PLATFORM QUALIFICATION DISABLED - "
        "Native Client's sandbox will be unreliable!\n");
  } else {
    errcode = NACL_FI_VAL("pq", NaClErrorCode,
                          NaClRunSelQualificationTests());
    if (LOAD_OK != errcode) {
      nap->module_load_status = errcode;
      fprintf(stderr, "Error while loading in SelMain: %s\n",
              NaClErrorString(errcode));
    }
  }

  /*
   * Patch the Windows exception dispatcher to be safe in the case
   * of faults inside x86-64 sandboxed code.  The sandbox is not
   * secure on 64-bit Windows without this.
   */
#if (NACL_WINDOWS && NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && \
     NACL_BUILD_SUBARCH == 64)
  NaClPatchWindowsExceptionDispatcher();
#endif
  NaClSignalTestCrashOnStartup();

  nap->enable_exception_handling = args->enable_exception_handling;

  if (args->enable_exception_handling || args->enable_debug_stub) {
#if NACL_LINUX
    /* NaCl's signal handler is always enabled on Linux. */
#elif NACL_OSX
    if (!NaClInterceptMachExceptions()) {
      NaClLog(LOG_FATAL, "LoadApp: Failed to set up Mach exception handler\n");
    }
#elif NACL_WINDOWS
    nap->attach_debug_exception_handler_func =
        args->attach_debug_exception_handler_func;
#else
# error Unknown host OS
#endif
  }
#if NACL_LINUX
  NaClSignalHandlerInit();
#endif

  /* Give debuggers a well known point at which xlate_base is known.  */
  NaClGdbHook(nap);

  if (has_bootstrap_channel) {
    NaClCreateServiceSocket(nap);
    /*
     * LOG_FATAL errors that occur before NaClSetUpBootstrapChannel will
     * not be reported via the crash log mechanism (for Chromium
     * embedding of NaCl, shown in the JavaScript console).
     *
     * Some errors, such as due to NaClRunSelQualificationTests, do not
     * trigger a LOG_FATAL but instead set module_load_status to be sent
     * in the start_module RPC reply.  Log messages associated with such
     * errors would be seen, since NaClSetUpBootstrapChannel will get
     * called.
     */
    NaClSetUpBootstrapChannel(nap, args->imc_bootstrap_handle);
  }

  CHECK(args->nexe_desc != NULL);
  NaClAppLoadModule(nap, args->nexe_desc, NULL, NULL);
  NaClDescUnref(args->nexe_desc);
  args->nexe_desc = NULL;

  if (has_bootstrap_channel) {
    NACL_FI_FATAL("BeforeSecureCommandChannel");
    /*
     * Spawns a thread that uses the command channel.
     * Hereafter any changes to nap should be done while holding locks.
     */
    NaClSecureCommandChannel(nap);

    NaClLog(4, "NaClSecureCommandChannel has spawned channel\n");

    NaClLog(4, "secure service = %"NACL_PRIxPTR"\n",
            (uintptr_t) nap->secure_service);

  }

  NACL_FI_FATAL("BeforeLoadIrt");

  /*
   * error reporting done; can quit now if there was an error earlier.
   */
  if (LOAD_OK == errcode) {
    errcode = NaClGetLoadStatus(nap);
  }
  if (LOAD_OK != errcode) {
    goto done;
  }

  /*
   * Load the integrated runtime (IRT) library.
   * Skip if irt_load_optional and the nexe doesn't have the usual 256MB
   * segment gap. PNaCl's disabling of the segment gap doesn't actually
   * disable the segment gap. It only only reduces it drastically.
   */
  if (args->irt_load_optional && nap->dynamic_text_end < 0x10000000) {
    NaClLog(1,
            "Skipped NaClLoadIrt, irt_load_optional with dynamic_text_end: %"
            NACL_PRIxPTR"\n", nap->dynamic_text_end);
  } else {
    if (args->irt_fd != -1) {
      CHECK(args->irt_desc == NULL);
      args->irt_desc = IrtDescFromFd(args->irt_fd);
      args->irt_fd = -1;
    }
    if (args->irt_desc != NULL) {
      NaClLoadIrt(nap, args->irt_desc);
      NaClDescUnref(args->irt_desc);
      args->irt_desc = NULL;
    }
  }

  if (args->enable_debug_stub) {
#if NACL_LINUX || NACL_OSX
    if (args->debug_stub_server_bound_socket_fd != NACL_INVALID_SOCKET) {
      NaClDebugSetBoundSocket(args->debug_stub_server_bound_socket_fd);
    }
#endif
    if (!NaClDebugInit(nap)) {
      goto done;
    }
#if NACL_WINDOWS
    if (NULL != args->debug_stub_server_port_selected_handler_func) {
      args->debug_stub_server_port_selected_handler_func(nap->debug_stub_port);
    }
#endif
  }

  if (args->load_status_handler_func != NULL) {
    args->load_status_handler_func(LOAD_OK);
  }
  return LOAD_OK;

done:
  fflush(stdout);

  /*
   * If there is a load status callback, call that now and transfer logs
   * in preparation for process exit.
   */
  if (args->load_status_handler_func != NULL) {
    /* Don't return LOAD_OK if we had some failure loading. */
    if (LOAD_OK == errcode) {
      errcode = LOAD_INTERNAL;
    }
    args->load_status_handler_func(errcode);
    NaClLog(LOG_ERROR, "NaCl LoadApp failed. Transferring logs before exit.\n");
    NaClLogRunAbortBehavior();
    /*
     * Fall through and run NaClBlockIfCommandChannelExists.
     * TODO(jvoung): remove NaClBlockIfCommandChannelExists() and use the
     * callback to indicate the load_status after Chromium no longer calls
     * start_module. We also need to change Chromium so that it does not
     * attempt to set up the command channel if there is a known load error.
     * Otherwise there is a race between this process's exit / load error
     * reporting, and the command channel setup on the Chromium side (plus
     * the associated reporting). Thus this could end up with two different
     * load errors being reported (1) the real load error from here, and
     * (2) the command channel setup failure because the process exited in
     * the middle of setting up the command channel.
     */
  }
  /*
   * If there is a secure command channel, we sent an RPC reply with
   * the reason that the nexe was rejected.  If we exit now, that
   * reply may still be in-flight and the various channel closure (esp
   * reverse channel) may be detected first.  This would result in a
   * crash being reported, rather than the error in the RPC reply.
   * Instead, we wait for the hard-shutdown on the command channel.
   */
  if (LOAD_OK != errcode) {
    NaClBlockIfCommandChannelExists(nap);
  } else {
    /*
     * Don't return LOAD_OK if we had some failure loading.
     */
    errcode = LOAD_INTERNAL;
  }
  return errcode;
}
static void NaClDescEffLdrUnmapMemory(struct NaClDescEffector  *vself,
                                      uintptr_t                sysaddr,
                                      size_t                   nbytes) {
  struct NaClDescEffectorLdr  *self = (struct NaClDescEffectorLdr *) vself;
  uintptr_t                   addr;
  uintptr_t                   endaddr;
  uintptr_t                   usraddr;
  struct NaClVmmapEntry const *map_region;

  NaClLog(4,
          ("NaClDescEffLdrUnmapMemory(0x%08"NACL_PRIxPTR", 0x%08"NACL_PRIxPTR
           ", 0x%"NACL_PRIxS")\n"),
          (uintptr_t) vself, (uintptr_t) sysaddr, nbytes);

  for (addr = sysaddr, endaddr = sysaddr + nbytes;
       addr < endaddr;
       addr += NACL_MAP_PAGESIZE) {
    usraddr = NaClSysToUser(self->nap, addr);

    map_region = NaClVmmapFindPage(&self->nap->mem_map,
                                   usraddr >> NACL_PAGESHIFT);
    /*
     * When mapping beyond the end of file, the mapping will be rounded to
     * the 64k page boundary and the remaining space will be marked as
     * inaccessible by marking the pages as MEM_RESERVE.
     *
     * When unmapping the memory region, we use the file size, recorded in
     * the VmmapEntry to prevent a race condition when file size changes
     * after it was mmapped, together with the page num and offset to check
     * whether the page is the one backed by the file, in which case we
     * need to unmap it, or whether it's one of the tail pages backed by the
     * virtual memory in which case we need to release it.
     */
    if (NULL != map_region &&
        NULL != map_region->desc &&
        (map_region->offset + (usraddr -
            (map_region->page_num << NACL_PAGESHIFT))
         < (uintptr_t) map_region->file_size)) {
      if (!UnmapViewOfFile((void *) addr)) {
        NaClLog(LOG_FATAL,
                ("NaClDescEffLdrUnmapMemory: UnmapViewOfFile failed at"
                 " user addr 0x%08"NACL_PRIxPTR" (sys 0x%08"NACL_PRIxPTR")"
                 " error %d\n"),
                usraddr, addr, GetLastError());
      }
    } else {
      /*
       * No memory in address space, and we have only MEM_RESERVE'd
       * the address space; or memory is in address space, but not
       * backed by a file.
       */
      if (!VirtualFree((void *) addr, 0, MEM_RELEASE)) {
        NaClLog(LOG_FATAL,
                ("NaClDescEffLdrUnmapMemory: VirtualFree at user addr"
                 " 0x%08"NACL_PRIxPTR" (sys 0x%08"NACL_PRIxPTR") failed:"
                 " error %d\n"),
                usraddr, addr, GetLastError());
      }
    }
  }
}
Ejemplo n.º 14
0
/*
 * NaClAllocatePow2AlignedMemory is for allocating a large amount of
 * memory of mem_sz bytes that must be address aligned, so that
 * log_alignment low-order address bits must be zero.
 *
 * Returns the aligned region on success, or NULL on failure.
 */
static void *NaClAllocatePow2AlignedMemory(size_t mem_sz,
                                           size_t log_alignment,
                                           enum NaClAslrMode aslr_mode) {
  uintptr_t pow2align;
  size_t request_size;
  uintptr_t unrounded_addr;
  uintptr_t rounded_addr;
  size_t extra;
  int found_memory;

  pow2align = ((uintptr_t) 1) << log_alignment;
  request_size = mem_sz + pow2align;

  NaClLog(4,
          "%"MSGWIDTH"s %016"NACL_PRIxS"\n",
          " Ask:",
          request_size);
  if (NACL_ENABLE_ASLR == aslr_mode) {
    found_memory = NaClFindAddressSpaceRandomized(
        &unrounded_addr,
        request_size,
        MAX_ADDRESS_RANDOMIZATION_ATTEMPTS);
  } else {
    found_memory = NaClFindAddressSpace(&unrounded_addr, request_size);
  }
  if (!found_memory) {
    NaClLog(LOG_FATAL,
            "NaClAllocatePow2AlignedMemory: Failed to reserve %"NACL_PRIxS
            " bytes of address space\n",
            request_size);
  }

  NaClLog(4,
          "%"MSGWIDTH"s %016"NACL_PRIxPTR"\n",
          "orig memory at",
          unrounded_addr);

  rounded_addr = (unrounded_addr + (pow2align - 1)) & ~(pow2align - 1);
  extra = rounded_addr - unrounded_addr;

  if (0 != extra) {
    NaClLog(4,
            "%"MSGWIDTH"s %016"NACL_PRIxPTR", %016"NACL_PRIxS"\n",
            "Freeing front:",
            unrounded_addr,
            extra);
    if (-1 == munmap((void *) unrounded_addr, extra)) {
      perror("munmap (front)");
      NaClLog(LOG_FATAL,
              "NaClAllocatePow2AlignedMemory: munmap front failed\n");
    }
  }

  extra = pow2align - extra;
  if (0 != extra) {
    NaClLog(4,
            "%"MSGWIDTH"s %016"NACL_PRIxPTR", %016"NACL_PRIxS"\n",
            "Freeing tail:",
            rounded_addr + mem_sz,
            extra);
    if (-1 == munmap((void *) (rounded_addr + mem_sz),
         extra)) {
      perror("munmap (end)");
      NaClLog(LOG_FATAL,
              "NaClAllocatePow2AlignedMemory: munmap tail failed\n");
    }
  }
  NaClLog(4,
          "%"MSGWIDTH"s %016"NACL_PRIxPTR"\n",
          "Aligned memory:",
          rounded_addr);

  /*
   * we could also mmap again at rounded_addr w/o MAP_NORESERVE etc to
   * ensure that we have the memory, but that's better done in another
   * utility function.  the semantics here is no paging space
   * reserved, as in Windows MEM_RESERVE without MEM_COMMIT.
   */

  return (void *) rounded_addr;
}
Ejemplo n.º 15
0
static void nacl_print(int x, int y, CONST char* text) {
  if (text[0]) {
    NaClLog(LOG_INFO, "nacl_print [%s]\n", text);
  }
}
Ejemplo n.º 16
0
static void nacl_getmouse(int* x, int* y, int* b) {
  NaClLog(LOG_INFO, "nacl_getmouse\n");
}
Ejemplo n.º 17
0
static void nacl_display() {
  NaClLog(LOG_INFO, "nacl_display\n");
  nacl_flush();
}
Ejemplo n.º 18
0
static void nacl_mousetype(int type) {
  NaClLog(LOG_INFO, "nacl_mousetype\n");
}
Ejemplo n.º 19
0
static void nacl_free_buffers(char* b1, char* b2) {
  NaClLog(LOG_INFO, "nacl_free_buffers\n");
}
Ejemplo n.º 20
0
static int nacl_init() {
  NaClLog(LOG_INFO, "nacl_init\n");
  return 1; /*1 for success 0 for fail */
}
Ejemplo n.º 21
0
/*
 * NaClAllocatePow2AlignedMemory is for allocating a large amount of
 * memory of mem_sz bytes that must be address aligned, so that
 * log_alignment low-order address bits must be zero.
 *
 * Returns the aligned region on success, or NULL on failure.
 */
static void *NaClAllocatePow2AlignedMemory(size_t mem_sz,
                                           size_t log_alignment) {
  uintptr_t pow2align;
  size_t    request_sz;
  void      *mem_ptr;
  uintptr_t orig_addr;
  uintptr_t rounded_addr;
  size_t    extra;

  pow2align = ((uintptr_t) 1) << log_alignment;

  request_sz = mem_sz + pow2align;

  NaClLog(4,
          "%"MSGWIDTH"s %016"NACL_PRIxS"\n",
          " Ask:",
          request_sz);

  mem_ptr = mmap((void *) 0,
           request_sz,
           PROT_NONE,
           MAP_ANONYMOUS | MAP_NORESERVE | MAP_PRIVATE,
           -1,
           (off_t) 0);
  if (MAP_FAILED == mem_ptr) {
    return NULL;
  }
  orig_addr = (uintptr_t) mem_ptr;

  NaClLog(4,
          "%"MSGWIDTH"s %016"NACL_PRIxPTR"\n",
          "orig memory at",
          orig_addr);

  rounded_addr = (orig_addr + (pow2align - 1)) & ~(pow2align - 1);
  extra = rounded_addr - orig_addr;

  if (0 != extra) {
    NaClLog(4,
            "%"MSGWIDTH"s %016"NACL_PRIxPTR", %016"NACL_PRIxS"\n",
            "Freeing front:",
            orig_addr,
            extra);
    if (-1 == munmap((void *) orig_addr, extra)) {
      perror("munmap (front)");
      NaClLog(LOG_FATAL,
              "NaClAllocatePow2AlignedMemory: munmap front failed\n");
    }
  }

  extra = pow2align - extra;
  if (0 != extra) {
    NaClLog(4,
            "%"MSGWIDTH"s %016"NACL_PRIxPTR", %016"NACL_PRIxS"\n",
            "Freeing tail:",
            rounded_addr + mem_sz,
            extra);
    if (-1 == munmap((void *) (rounded_addr + mem_sz),
         extra)) {
      perror("munmap (end)");
      NaClLog(LOG_FATAL,
              "NaClAllocatePow2AlignedMemory: munmap tail failed\n");
    }
  }
  NaClLog(4,
          "%"MSGWIDTH"s %016"NACL_PRIxPTR"\n",
          "Aligned memory:",
          rounded_addr);

  /*
   * we could also mmap again at rounded_addr w/o MAP_NORESERVE etc to
   * ensure that we have the memory, but that's better done in another
   * utility function.  the semantics here is no paging space
   * reserved, as in Windows MEM_RESERVE without MEM_COMMIT.
   */

  return (void *) rounded_addr;
}
Ejemplo n.º 22
0
static void nacl_uninitialise() {
  NaClLog(LOG_INFO, "nacl_uninitialise\n");
}
Ejemplo n.º 23
0
NaClErrorCode NaClAppLoadFileAslr(struct Gio        *gp,
                                  struct NaClApp    *nap,
                                  enum NaClAslrMode aslr_mode) {
  NaClErrorCode       ret = LOAD_INTERNAL;
  NaClErrorCode       subret = LOAD_INTERNAL;
  uintptr_t           rodata_end;
  uintptr_t           data_end;
  uintptr_t           max_vaddr;
  struct NaClElfImage *image = NULL;
  struct NaClPerfCounter  time_load_file;
  struct NaClElfImageInfo info;

  NaClPerfCounterCtor(&time_load_file, "NaClAppLoadFile");

  /* NACL_MAX_ADDR_BITS < 32 */
  if (nap->addr_bits > NACL_MAX_ADDR_BITS) {
    ret = LOAD_ADDR_SPACE_TOO_BIG;
    goto done;
  }

  nap->stack_size = NaClRoundAllocPage(nap->stack_size);

  /* temporay object will be deleted at end of function */
  image = NaClElfImageNew(gp, &subret);
  if (NULL == image || LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  subret = NaClElfImageValidateProgramHeaders(image,
                                              nap->addr_bits,
                                              &info);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  nap->static_text_end = info.static_text_end;
  nap->rodata_start = info.rodata_start;
  rodata_end = info.rodata_end;
  nap->data_start = info.data_start;
  data_end = info.data_end;
  max_vaddr = info.max_vaddr;

  if (0 == nap->data_start) {
    if (0 == nap->rodata_start) {
      if (NaClRoundAllocPage(max_vaddr) - max_vaddr < NACL_HALT_SLED_SIZE) {
        /*
         * if no rodata and no data, we make sure that there is space for
         * the halt sled.
         */
        max_vaddr += NACL_MAP_PAGESIZE;
      }
    } else {
      /*
       * no data, but there is rodata.  this means max_vaddr is just
       * where rodata ends.  this might not be at an allocation
       * boundary, and in this the page would not be writable.  round
       * max_vaddr up to the next allocation boundary so that bss will
       * be at the next writable region.
       */
      ;
    }
    max_vaddr = NaClRoundAllocPage(max_vaddr);
  }
  /*
   * max_vaddr -- the break or the boundary between data (initialized
   * and bss) and the address space hole -- does not have to be at a
   * page boundary.
   *
   * Memory allocation will use NaClRoundPage(nap->break_addr), but
   * the system notion of break is always an exact address.  Even
   * though we must allocate and make accessible multiples of pages,
   * the linux-style brk system call (which returns current break on
   * failure) permits a non-aligned address as argument.
   */
  nap->break_addr = max_vaddr;
  nap->data_end = max_vaddr;

  NaClLog(4, "Values from NaClElfImageValidateProgramHeaders:\n");
  NaClLog(4, "rodata_start = 0x%08"NACL_PRIxPTR"\n", nap->rodata_start);
  NaClLog(4, "rodata_end   = 0x%08"NACL_PRIxPTR"\n", rodata_end);
  NaClLog(4, "data_start   = 0x%08"NACL_PRIxPTR"\n", nap->data_start);
  NaClLog(4, "data_end     = 0x%08"NACL_PRIxPTR"\n", data_end);
  NaClLog(4, "max_vaddr    = 0x%08"NACL_PRIxPTR"\n", max_vaddr);

  /* We now support only one bundle size.  */
  nap->bundle_size = NACL_INSTR_BLOCK_SIZE;

  nap->initial_entry_pt = NaClElfImageGetEntryPoint(image);
  NaClLogAddressSpaceLayout(nap);

  if (!NaClAddrIsValidEntryPt(nap, nap->initial_entry_pt)) {
    ret = LOAD_BAD_ENTRY;
    goto done;
  }

  subret = NaClCheckAddressSpaceLayoutSanity(nap, rodata_end, data_end,
                                             max_vaddr);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Allocating address space\n");
  NaClPerfCounterMark(&time_load_file, "PreAllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  subret = NaClAllocAddrSpaceAslr(nap, aslr_mode);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "AllocAddrSpace");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * Make sure the static image pages are marked writable before we try
   * to write them.
   */
  NaClLog(2, "Loading into memory\n");
  ret = NaClMprotect((void *) (nap->mem_start + NACL_TRAMPOLINE_START),
                     NaClRoundAllocPage(nap->data_end) - NACL_TRAMPOLINE_START,
                     PROT_READ | PROT_WRITE);
  if (0 != ret) {
    NaClLog(LOG_FATAL,
            "NaClAppLoadFile: Failed to make image pages writable. "
            "Error code 0x%x\n",
            ret);
  }
  subret = NaClElfImageLoad(image, gp, nap->addr_bits, nap->mem_start);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NB: mem_map object has been initialized, but is empty.
   * NaClMakeDynamicTextShared does not touch it.
   *
   * NaClMakeDynamicTextShared also fills the dynamic memory region
   * with the architecture-specific halt instruction.  If/when we use
   * memory mapping to save paging space for the dynamic region and
   * lazily halt fill the memory as the pages become
   * readable/executable, we must make sure that the *last*
   * NACL_MAP_PAGESIZE chunk is nonetheless mapped and written with
   * halts.
   */
  NaClLog(2,
          ("Replacing gap between static text and"
           " (ro)data with shareable memory\n"));
  subret = NaClMakeDynamicTextShared(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "MakeDynText");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  /*
   * NaClFillEndOfTextRegion will fill with halt instructions the
   * padding space after the static text region.
   *
   * Shm-backed dynamic text space was filled with halt instructions
   * in NaClMakeDynamicTextShared.  This extends to the rodata.  For
   * non-shm-backed text space, this extend to the next page (and not
   * allocation page).  static_text_end is updated to include the
   * padding.
   */
  NaClFillEndOfTextRegion(nap);

  NaClLog(2, "Validating image\n");
  subret = NaClValidateImage(nap);
  NaClPerfCounterMark(&time_load_file,
                      NACL_PERF_IMPORTANT_PREFIX "ValidateImg");
  NaClPerfCounterIntervalLast(&time_load_file);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "Initializing arch switcher\n");
  NaClInitSwitchToApp(nap);

  NaClLog(2, "Installing trampoline\n");
  NaClLoadTrampoline(nap);

  NaClLog(2, "Installing springboard\n");
  NaClLoadSpringboard(nap);

  /*
   * NaClMemoryProtect also initializes the mem_map w/ information
   * about the memory pages and their current protection value.
   *
   * The contents of the dynamic text region will get remapped as
   * non-writable.
   */
  NaClLog(2, "Applying memory protection\n");
  subret = NaClMemoryProtection(nap);
  if (LOAD_OK != subret) {
    ret = subret;
    goto done;
  }

  NaClLog(2, "NaClAppLoadFile done; ");
  NaClLogAddressSpaceLayout(nap);
  ret = LOAD_OK;
done:
  NaClElfImageDelete(image);

  NaClPerfCounterMark(&time_load_file, "EndLoadFile");
  NaClPerfCounterIntervalTotal(&time_load_file);
  return ret;
}
Ejemplo n.º 24
0
char* getcwd(char* buf, size_t size) {
  NaClLog(LOG_INFO, "getcwd\n");
  strcpy(buf, "/tmp");
  return buf;
}
Ejemplo n.º 25
0
/*
 * preconditions:
 *  * argc is the length of the argv array
 *  * envv may be NULL (this happens on MacOS/Cocoa and in tests)
 *  * if envv is non-NULL it is 'consistent', null terminated etc.
 */
int NaClCreateMainThread(struct NaClApp     *nap,
                         int                argc,
                         char               **argv,
                         char const *const  *envv) {
  /*
   * Compute size of string tables for argv and envv
   */
  int                   retval;
  int                   envc;
  size_t                size;
  int                   auxv_entries;
  size_t                ptr_tbl_size;
  int                   i;
  uint32_t              *p;
  char                  *strp;
  size_t                *argv_len;
  size_t                *envv_len;
  uintptr_t             stack_ptr;

  retval = 0;  /* fail */
  CHECK(argc >= 0);
  CHECK(NULL != argv || 0 == argc);

  envc = 0;
  if (NULL != envv) {
    char const *const *pp;
    for (pp = envv; NULL != *pp; ++pp) {
      ++envc;
    }
  }
  envv_len = 0;
  argv_len = malloc(argc * sizeof argv_len[0]);
  envv_len = malloc(envc * sizeof envv_len[0]);
  if (NULL == argv_len) {
    goto cleanup;
  }
  if (NULL == envv_len && 0 != envc) {
    goto cleanup;
  }

  size = 0;

  /*
   * The following two loops cannot overflow.  The reason for this is
   * that they are counting the number of bytes used to hold the
   * NUL-terminated strings that comprise the argv and envv tables.
   * If the entire address space consisted of just those strings, then
   * the size variable would overflow; however, since there's the code
   * space required to hold the code below (and we are not targetting
   * Harvard architecture machines), at least one page holds code, not
   * data.  We are assuming that the caller is non-adversarial and the
   * code does not look like string data....
   */
  for (i = 0; i < argc; ++i) {
    argv_len[i] = strlen(argv[i]) + 1;
    size += argv_len[i];
  }
  for (i = 0; i < envc; ++i) {
    envv_len[i] = strlen(envv[i]) + 1;
    size += envv_len[i];
  }

  /*
   * NaCl modules are ILP32, so the argv, envv pointers, as well as
   * the terminating NULL pointers at the end of the argv/envv tables,
   * are 32-bit values.  We also have the auxv to take into account.
   *
   * The argv and envv pointer tables came from trusted code and is
   * part of memory.  Thus, by the same argument above, adding in
   * "ptr_tbl_size" cannot possibly overflow the "size" variable since
   * it is a size_t object.  However, the extra pointers for auxv and
   * the space for argv could cause an overflow.  The fact that we
   * used stack to get here etc means that ptr_tbl_size could not have
   * overflowed.
   *
   * NB: the underlying OS would have limited the amount of space used
   * for argv and envv -- on linux, it is ARG_MAX, or 128KB -- and
   * hence the overflow check is for obvious auditability rather than
   * for correctness.
   */
  auxv_entries = 1;
  if (0 != nap->user_entry_pt) {
    auxv_entries++;
  }
  ptr_tbl_size = (((NACL_STACK_GETS_ARG ? 1 : 0) +
                   (3 + argc + 1 + envc + 1 + auxv_entries * 2)) *
                  sizeof(uint32_t));

  if (SIZE_T_MAX - size < ptr_tbl_size) {
    NaClLog(LOG_WARNING,
            "NaClCreateMainThread: ptr_tbl_size cause size of"
            " argv / environment copy to overflow!?!\n");
    retval = 0;
    goto cleanup;
  }
  size += ptr_tbl_size;

  size = (size + NACL_STACK_ALIGN_MASK) & ~NACL_STACK_ALIGN_MASK;

  if (size > nap->stack_size) {
    retval = 0;
    goto cleanup;
  }

  /*
   * Write strings and char * arrays to stack.
   */
  stack_ptr = NaClUserToSysAddrRange(nap, NaClGetInitialStackTop(nap) - size,
                                     size);
  if (stack_ptr == kNaClBadAddress) {
    retval = 0;
    goto cleanup;
  }

  NaClLog(2, "setting stack to : %016"NACL_PRIxPTR"\n", stack_ptr);

  VCHECK(0 == (stack_ptr & NACL_STACK_ALIGN_MASK),
         ("stack_ptr not aligned: %016"NACL_PRIxPTR"\n", stack_ptr));

  p = (uint32_t *) stack_ptr;
  strp = (char *) stack_ptr + ptr_tbl_size;

  /*
   * For x86-32, we push an initial argument that is the address of
   * the main argument block.  For other machines, this is passed
   * in a register and that's set in NaClStartThreadInApp.
   */
  if (NACL_STACK_GETS_ARG) {
    uint32_t *argloc = p++;
    *argloc = (uint32_t) NaClSysToUser(nap, (uintptr_t) p);
  }

  *p++ = 0;  /* Cleanup function pointer, always NULL.  */
  *p++ = envc;
  *p++ = argc;

  for (i = 0; i < argc; ++i) {
    *p++ = (uint32_t) NaClSysToUser(nap, (uintptr_t) strp);
    NaClLog(2, "copying arg %d  %p -> %p\n",
            i, argv[i], strp);
    strcpy(strp, argv[i]);
    strp += argv_len[i];
  }
  *p++ = 0;  /* argv[argc] is NULL.  */

  for (i = 0; i < envc; ++i) {
    *p++ = (uint32_t) NaClSysToUser(nap, (uintptr_t) strp);
    NaClLog(2, "copying env %d  %p -> %p\n",
            i, envv[i], strp);
    strcpy(strp, envv[i]);
    strp += envv_len[i];
  }
  *p++ = 0;  /* envp[envc] is NULL.  */

  /* Push an auxv */
  if (0 != nap->user_entry_pt) {
    *p++ = AT_ENTRY;
    *p++ = (uint32_t) nap->user_entry_pt;
  }
  *p++ = AT_NULL;
  *p++ = 0;

  CHECK((char *) p == (char *) stack_ptr + ptr_tbl_size);

  /* now actually spawn the thread */
  NaClXMutexLock(&nap->mu);
  nap->running = 1;
  NaClXMutexUnlock(&nap->mu);

  NaClVmHoleWaitToStartThread(nap);

  /*
   * For x86, we adjust the stack pointer down to push a dummy return
   * address.  This happens after the stack pointer alignment.
   * We avoid the otherwise harmless call for the zero case because
   * _FORTIFY_SOURCE memset can warn about zero-length calls.
   */
  if (NACL_STACK_PAD_BELOW_ALIGN != 0) {
    stack_ptr -= NACL_STACK_PAD_BELOW_ALIGN;
    memset((void *) stack_ptr, 0, NACL_STACK_PAD_BELOW_ALIGN);
  }

  NaClLog(2, "system stack ptr : %016"NACL_PRIxPTR"\n", stack_ptr);
  NaClLog(2, "  user stack ptr : %016"NACL_PRIxPTR"\n",
          NaClSysToUserStackAddr(nap, stack_ptr));

  /* e_entry is user addr */
  retval = NaClAppThreadSpawn(nap,
                              nap->initial_entry_pt,
                              NaClSysToUserStackAddr(nap, stack_ptr),
                              /* user_tls1= */ (uint32_t) nap->break_addr,
                              /* user_tls2= */ 0);

cleanup:
  free(argv_len);
  free(envv_len);

  return retval;
}
Ejemplo n.º 26
0
int kill() {
  NaClLog(LOG_INFO, "kill\n");
  return -1;
}
Ejemplo n.º 27
0
int NaClDescMapDescriptor(struct NaClDesc         *desc,
                          struct NaClDescEffector *effector,
                          void                    **addr,
                          size_t                  *size) {
    struct nacl_abi_stat  st;
    size_t                rounded_size = 0;
    const int             kMaxTries = 10;
    int                   tries = 0;
    void                  *map_addr = NULL;
    int                   rval;
    uintptr_t             rval_ptr;

    *addr = NULL;
    *size = 0;

    rval = (*desc->vtbl->Fstat)(desc,
                                effector,
                                &st);
    if (0 != rval) {
        /* Failed to get the size - return failure. */
        return rval;
    }

    /*
     * on sane systems, sizef(size_t) <= sizeof(nacl_abi_off_t) must hold.
     */
    if (st.nacl_abi_st_size < 0) {
        return -NACL_ABI_ENOMEM;
    }
    if (sizeof(size_t) < sizeof(nacl_abi_off_t)) {
        if ((nacl_abi_off_t) SIZE_T_MAX < st.nacl_abi_st_size) {
            return -NACL_ABI_ENOMEM;
        }
    }
    /*
     * size_t and uintptr_t and void * should have the same number of
     * bits (well, void * could be smaller than uintptr_t, and on weird
     * architectures one could imagine the maximum size is smaller than
     * all addr bits, but we're talking sane architectures...).
     */

    /*
     * When probing by VirtualAlloc/mmap, use the same granularity
     * as the Map virtual function (64KB).
     */
    rounded_size = NaClRoundAllocPage(st.nacl_abi_st_size);

    /* Find an address range to map the object into. */
    do {
        ++tries;
#if NACL_WINDOWS
        map_addr = VirtualAlloc(NULL, rounded_size, MEM_RESERVE, PAGE_READWRITE);
        if (NULL == map_addr ||!VirtualFree(map_addr, 0, MEM_RELEASE)) {
            continue;
        }
#else
        map_addr = mmap(NULL,
                        rounded_size,
                        PROT_READ | PROT_WRITE,
                        MAP_SHARED | MAP_ANONYMOUS,
                        0,
                        0);
        if (MAP_FAILED == map_addr || munmap(map_addr, rounded_size)) {
            map_addr = NULL;
            continue;
        }
#endif
        NaClLog(4,
                "NaClDescMapDescriptor: mapping to address %"NACL_PRIxPTR"\n",
                (uintptr_t) map_addr);
        rval_ptr = (*desc->vtbl->Map)(desc,
                                      effector,
                                      map_addr,
                                      rounded_size,
                                      NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
                                      NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
                                      0);
        NaClLog(4,
                "NaClDescMapDescriptor: result is %"NACL_PRIxPTR"\n",
                rval_ptr);
        if (NaClIsNegErrno(rval_ptr)) {
            /*
             * A nonzero return from NaClIsNegErrno
             * indicates that the value is within the range
             * reserved for errors, which is representable
             * with 32 bits.
             */
            rval = (int) rval_ptr;
        } else {
            /*
             * Map() did not return an error, so set our
             * return code to 0 (success)
             */
            rval = 0;
            map_addr = (void*) rval_ptr;
            break;
        }

    } while (NULL == map_addr && tries < kMaxTries);

    if (NULL == map_addr) {
        return rval;
    }

    *addr = map_addr;
    *size = rounded_size;
    return 0;
}
Ejemplo n.º 28
0
static void nacl_setpalette(ui_palette pal, int start, int end) {
  NaClLog(LOG_INFO, "nacl_setpalette\n");
}
Ejemplo n.º 29
0
static uintptr_t NaClDescImcShmMap(struct NaClDesc         *vself,
                                   struct NaClDescEffector *effp,
                                   void                    *start_addr,
                                   size_t                  len,
                                   int                     prot,
                                   int                     flags,
                                   nacl_off64_t            offset) {
  struct NaClDescImcShm  *self = (struct NaClDescImcShm *) vself;

  int           nacl_imc_prot;
  int           nacl_imc_flags;
  uintptr_t     addr;
  void          *result;
  nacl_off64_t  tmp_off64;

  /*
   * shm must have NACL_ABI_MAP_SHARED in flags, and all calls through
   * this API must supply a start_addr, so NACL_ABI_MAP_FIXED is
   * assumed.
   */
  if (NACL_ABI_MAP_SHARED != (flags & NACL_ABI_MAP_SHARING_MASK)) {
    NaClLog(LOG_INFO,
            ("NaClDescImcShmMap: Mapping not NACL_ABI_MAP_SHARED,"
             " flags 0x%x\n"),
            flags);
    return -NACL_ABI_EINVAL;
  }
  if (0 != (NACL_ABI_MAP_FIXED & flags) && NULL == start_addr) {
    NaClLog(LOG_INFO,
            ("NaClDescImcShmMap: Mapping NACL_ABI_MAP_FIXED"
             " but start_addr is NULL\n"));
  }
  /* post-condition: if NULL == start_addr, then NACL_ABI_MAP_FIXED not set */

  /*
   * prot must not contain bits other than PROT_{READ|WRITE|EXEC}.
   */
  if (0 != (~(NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE | NACL_ABI_PROT_EXEC)
            & prot)) {
    NaClLog(LOG_INFO,
            "NaClDescImcShmMap: prot has other bits than"
            " PROT_{READ|WRITE|EXEC}\n");
    return -NACL_ABI_EINVAL;
  }
  /*
   * Map from NACL_ABI_ prot and flags bits to IMC library flags,
   * which will later map back into posix-style prot/flags on *x
   * boxen, and to MapViewOfFileEx arguments on Windows.
   */
  nacl_imc_prot = 0;
  if (NACL_ABI_PROT_READ & prot) {
    nacl_imc_prot |= NACL_PROT_READ;
  }
  if (NACL_ABI_PROT_WRITE & prot) {
    nacl_imc_prot |= NACL_PROT_WRITE;
  }
  if (NACL_ABI_PROT_EXEC & prot) {
    nacl_imc_prot |= NACL_PROT_EXEC;
  }
  nacl_imc_flags = NACL_MAP_SHARED;
  if (0 == (NACL_ABI_MAP_FIXED & flags)) {
    /* start_addr is a hint, and we just ignore the hint... */
    if (!NaClFindAddressSpace(&addr, len)) {
      NaClLog(1, "NaClDescImcShmMap: no address space?!?\n");
      return -NACL_ABI_ENOMEM;
    }
    start_addr = (void *) addr;
  }
  nacl_imc_flags |= NACL_MAP_FIXED;

  tmp_off64 = offset + len;
  /* just NaClRoundAllocPage, but in 64 bits */
  tmp_off64 = ((tmp_off64 + NACL_MAP_PAGESIZE - 1)
             & ~(uint64_t) (NACL_MAP_PAGESIZE - 1));
  if (tmp_off64 > INT32_MAX) {
    NaClLog(LOG_INFO,
            "NaClDescImcShmMap: total offset exceeds 32-bits\n");
    return -NACL_ABI_EOVERFLOW;
  }

  result = NaClMap(effp,
                   (void *) start_addr,
                   len,
                   nacl_imc_prot,
                   nacl_imc_flags,
                   self->h,
                   (off_t) offset);
  if (NACL_MAP_FAILED == result) {
    return -NACL_ABI_E_MOVE_ADDRESS_SPACE;
  }
  if (0 != (NACL_ABI_MAP_FIXED & flags) && result != (void *) start_addr) {
    NaClLog(LOG_FATAL,
            ("NaClDescImcShmMap: NACL_MAP_FIXED but got %p instead of %p\n"),
            result, start_addr);
  }
  return (uintptr_t) start_addr;
}
Ejemplo n.º 30
0
int32_t NaClSysOpen(struct NaClAppThread  *natp,
                    char                  *pathname,
                    int                   flags,
                    int                   mode) {
  struct NaClApp       *nap = natp->nap;
  uint32_t             retval = -NACL_ABI_EINVAL;
  char                 path[NACL_CONFIG_PATH_MAX];
  nacl_host_stat_t     stbuf;
  int                  allowed_flags;

  NaClLog(3, "NaClSysOpen(0x%08"NACL_PRIxPTR", "
          "0x%08"NACL_PRIxPTR", 0x%x, 0x%x)\n",
          (uintptr_t) natp, (uintptr_t) pathname, flags, mode);

  retval = CopyPathFromUser(nap, path, sizeof path, (uintptr_t) pathname);
  if (0 != retval)
    goto cleanup;

  allowed_flags = (NACL_ABI_O_ACCMODE | NACL_ABI_O_CREAT
                   | NACL_ABI_O_TRUNC | NACL_ABI_O_APPEND);
  if (0 != (flags & ~allowed_flags)) {
    NaClLog(LOG_WARNING, "Invalid open flags 0%o, ignoring extraneous bits\n",
            flags);
    flags &= allowed_flags;
  }
  if (0 != (mode & ~0600)) {
    NaClLog(1, "IGNORING Invalid access mode bits 0%o\n", mode);
    mode &= 0600;
  }

  retval = NaClOpenAclCheck(nap, path, flags, mode);
  if (0 != retval) {
    NaClLog(3, "Open ACL check rejected \"%s\".\n", path);
    goto cleanup;
  }

  /*
   * Perform a stat to determine whether the file is a directory.
   *
   * NB: it is okay for the stat to fail, since the request may be to
   * create a new file.
   *
   * There is a race conditions here: between the stat and the
   * open-as-a-file and open-as-a-dir, the type of the object that the
   * path refers to can change.
   */
  retval = NaClHostDescStat(path, &stbuf);

  /* Windows does not have S_ISDIR(m) macro */
  if (0 == retval && S_IFDIR == (S_IFDIR & stbuf.st_mode)) {
    struct NaClHostDir  *hd;

    hd = malloc(sizeof *hd);
    if (NULL == hd) {
      retval = -NACL_ABI_ENOMEM;
      goto cleanup;
    }
    retval = NaClHostDirOpen(hd, path);
    NaClLog(1, "NaClHostDirOpen(0x%08"NACL_PRIxPTR", %s) returned %d\n",
            (uintptr_t) hd, path, retval);
    if (0 == retval) {
      retval = NaClSetAvail(nap,
                            ((struct NaClDesc *) NaClDescDirDescMake(hd)));
      NaClLog(1, "Entered directory into open file table at %d\n",
              retval);
    }
  } else {
    struct NaClHostDesc  *hd;

    hd = malloc(sizeof *hd);
    if (NULL == hd) {
      retval = -NACL_ABI_ENOMEM;
      goto cleanup;
    }
    retval = NaClHostDescOpen(hd, path, flags, mode);
    NaClLog(1,
            "NaClHostDescOpen(0x%08"NACL_PRIxPTR", %s, 0%o, 0%o) returned %d\n",
            (uintptr_t) hd, path, flags, mode, retval);
    if (0 == retval) {
      struct NaClDesc *desc = (struct NaClDesc *) NaClDescIoDescMake(hd);
      if ((flags & NACL_ABI_O_ACCMODE) == NACL_ABI_O_RDONLY) {
        /*
         * Let any read-only open be used for PROT_EXEC mmap
         * calls.  Under -a, the user informally warrants that
         * files' code segments won't be changed after open.
         */
        NaClDescSetFlags(desc,
                         NaClDescGetFlags(desc) | NACL_DESC_FLAGS_MMAP_EXEC_OK);
      }
      retval = NaClSetAvail(nap, desc);
      NaClLog(1, "Entered into open file table at %d\n", retval);
    }
  }
cleanup:
  return retval;
}