Example #1
0
static int
os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize,
				boolean_t can_block)
{
	if (cur_reason == OS_REASON_NULL) {
		return EINVAL;
	}

	if (osr_bufsize > OS_REASON_BUFFER_MAX_SIZE) {
		return EINVAL;
	}

	lck_mtx_lock(&cur_reason->osr_lock);

	os_reason_dealloc_buffer(cur_reason);

	if (osr_bufsize == 0) {
		lck_mtx_unlock(&cur_reason->osr_lock);
		return 0;
	}

	if (can_block) {
		cur_reason->osr_kcd_buf = kalloc_tag(osr_bufsize, VM_KERN_MEMORY_REASON);
		assert(cur_reason->osr_kcd_buf != NULL);
	} else {
		cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON);
		if (cur_reason->osr_kcd_buf == NULL) {
			lck_mtx_unlock(&cur_reason->osr_lock);
			return ENOMEM;
		}
	}

	bzero(cur_reason->osr_kcd_buf, osr_bufsize);

	cur_reason->osr_bufsize = osr_bufsize;

	if (kcdata_memory_static_init(&cur_reason->osr_kcd_descriptor, (mach_vm_address_t) cur_reason->osr_kcd_buf,
					KCDATA_BUFFER_BEGIN_OS_REASON, osr_bufsize, KCFLAG_USE_MEMCOPY) != KERN_SUCCESS) {
		os_reason_dealloc_buffer(cur_reason);

		lck_mtx_unlock(&cur_reason->osr_lock);
		return EIO;
	}

	lck_mtx_unlock(&cur_reason->osr_lock);

	return 0;
}
Example #2
0
/*
 * Allocates and initializes a buffer of specified size for the reason. Also
 * initializes the kcdata descriptor accordingly. If there is an existing
 * buffer, we dealloc the buffer before allocating a new one and
 * clear the associated kcdata descriptor. If osr_bufsize is passed as 0,
 * we deallocate the existing buffer and then return.
 *
 * Returns:
 * 0 on success
 * EINVAL if the passed reason pointer is invalid or the requested size is
 * 	  larger than REASON_BUFFER_MAX_SIZE
 * ENOMEM if unable to allocate memory for the buffer
 * EIO if we fail to initialize the kcdata buffer
 */
int
os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize)
{
	if (cur_reason == OS_REASON_NULL) {
		return EINVAL;
	}

	if (osr_bufsize > OS_REASON_BUFFER_MAX_SIZE) {
		return EINVAL;
	}

	lck_mtx_lock(&cur_reason->osr_lock);

	os_reason_dealloc_buffer(cur_reason);

	if (osr_bufsize == 0) {
		lck_mtx_unlock(&cur_reason->osr_lock);
		return 0;
	}

	/*
	 * We don't want to block trying to acquire a reason buffer and hold
	 * up important things trying to clean up the system (i.e. jetsam).
	 */
	cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON);
	if (cur_reason->osr_kcd_buf == NULL) {
		lck_mtx_unlock(&cur_reason->osr_lock);
		return ENOMEM;
	}

	bzero(cur_reason->osr_kcd_buf, osr_bufsize);

	cur_reason->osr_bufsize = osr_bufsize;

	if (kcdata_memory_static_init(&cur_reason->osr_kcd_descriptor, (mach_vm_address_t) cur_reason->osr_kcd_buf,
					KCDATA_BUFFER_BEGIN_OS_REASON, osr_bufsize, KCFLAG_USE_MEMCOPY) != KERN_SUCCESS) {
		os_reason_dealloc_buffer(cur_reason);

		lck_mtx_unlock(&cur_reason->osr_lock);
		return EIO;
	}

	lck_mtx_unlock(&cur_reason->osr_lock);

	return 0;
}