Ejemplo n.º 1
0
static int osquery_close(dev_t dev, int flag, int fmt, struct proc *p) {
    lck_mtx_lock(osquery.mtx);
    if (osquery.open_count == 1) {
        unsubscribe_all_events();
        cleanup_user_kernel_buffer();
        osquery.open_count--;
    }
    lck_mtx_unlock(osquery.mtx);

    return 0;
}
Ejemplo n.º 2
0
static int allocate_user_kernel_buffer(size_t size, void **buf) {
  int err = 0;

  // The user space daemon is requesting a new circular queue.
  // Make sure the requested size is within sane size bounds.
  if (size > MAX_KMEM || size < MIN_KMEM) {
    err = -EINVAL;
    goto error_exit;
  }

  // Record the requested buffer size.
  osquery.buf_size = size;
  // Allocate a contiguous region of memory.
  osquery.buffer = IOMallocAligned(osquery.buf_size, PAGE_SIZE);
  // Cannot proceed if no memory to back the circular queue is available.
  if (osquery.buffer == NULL) {
    err = -EINVAL;
    goto error_exit;
  }

  // Zero memory for safety, this memory will be shared with user space.
  bzero(osquery.buffer, osquery.buf_size);

  // This buffer will be shared, create a descriptor.
  osquery.md =
      IOMemoryDescriptor::withAddressRange((mach_vm_address_t)osquery.buffer,
                                           osquery.buf_size,
                                           kIODirectionInOut,
                                           kernel_task);
  if (osquery.md == NULL) {
    err = -EINVAL;
    goto error_exit;
  }

  // Now map the buffer into the user space process as read only.
  osquery.mm = osquery.md->createMappingInTask(
      current_task(), NULL, kIOMapAnywhere | kIOMapReadOnly);
  if (osquery.mm == NULL) {
    err = -EINVAL;
    goto error_exit;
  }

  // The virtual address will be shared back to the user space queue manager.
  *buf = (void *)osquery.mm->getAddress();
  // Initialize the kernel space queue manager with the new buffer.
  osquery_cqueue_init(&osquery.cqueue, osquery.buffer, osquery.buf_size);

  return 0;
error_exit:
  // A drop-through error handler will clean up any intermediate allocations.
  cleanup_user_kernel_buffer();

  return err;
}
Ejemplo n.º 3
0
static int allocate_user_kernel_buffer(size_t size, void **buf) {
    int err = 0;

    if (size > MAX_KMEM || size < MIN_KMEM) {
        err = -EINVAL;
        goto error_exit;
    }

    osquery.buf_size = size;
    osquery.buffer = IOMallocAligned(osquery.buf_size, PAGE_SIZE);
    if (osquery.buffer == NULL) {
        err = -EINVAL;
        goto error_exit;
    }
    bzero(osquery.buffer, osquery.buf_size);  // Zero memory for safety.

    osquery.md
        = IOMemoryDescriptor::withAddressRange((mach_vm_address_t)osquery.buffer,
                osquery.buf_size,
                kIODirectionInOut, kernel_task);
    if (osquery.md == NULL) {
        err = -EINVAL;
        goto error_exit;
    }
    osquery.mm = osquery.md->createMappingInTask(current_task(), NULL,
                 kIOMapAnywhere | kIOMapReadOnly);
    if (osquery.mm == NULL) {
        err = -EINVAL;
        goto error_exit;
    }
    *buf = (void *)osquery.mm->getAddress();

    osquery_cqueue_init(&osquery.cqueue, osquery.buffer, osquery.buf_size);

    return 0;
error_exit:
    cleanup_user_kernel_buffer();

    return err;
}