Exemplo n.º 1
0
/* static */ IORequest*
IORequest::Create(bool vip)
{
	return vip
		? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
		: new(std::nothrow) IORequest;
}
Exemplo n.º 2
0
/*
 * Allocate memory.
 */
void *
osenv_mem_alloc(oskit_size_t size, osenv_memflags_t flags, unsigned align)
{
	lmm_flags_t lmm_flags = 0;
	void *p;

	if (flags & OSENV_ISADMA_MEM)
		lmm_flags |= LMMF_16MB;
	if (flags & OSENV_X861MB_MEM)
		lmm_flags |= LMMF_1MB;

        if (flags & OSENV_AUTO_SIZE) {
                if (align > 1) {
                        p = memalign_flags(size, lmm_flags, align);
                } else {
                        p = malloc_flags(size, lmm_flags);
                }
        } else { 
                if (align > 1) {
                        p = smemalign_flags(size, lmm_flags, align);
                } else {
                        p = smalloc_flags(size, lmm_flags);
                }
        }
	return p;
}
Exemplo n.º 3
0
status_t
VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
	uint32 allocationFlags)
{
	TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
		newSize);

	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
	Range* range = area->Range();

	if (newSize == range->size)
		return B_OK;

	if (newSize > range->size)
		return B_BAD_VALUE;

	Range* previousRange = fRangeList.GetPrevious(range);

	size_t sizeDiff = range->size - newSize;
	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
		// the previous range is free -- just enlarge it
		_FreeListRemoveRange(previousRange, previousRange->size);
		previousRange->size += sizeDiff;
		_FreeListInsertRange(previousRange, previousRange->size);
		range->base += sizeDiff;
		range->size = newSize;
	} else {
		// no free range before -- we need to allocate a new one and
		// insert it
		previousRange = new(malloc_flags(allocationFlags)) Range(range->base,
			sizeDiff, Range::RANGE_FREE);
		if (previousRange == NULL)
			return B_NO_MEMORY;
		range->base += sizeDiff;
		range->size = newSize;
		_InsertRange(previousRange);
	}

	area->SetBase(range->base);
	area->SetSize(range->size);

	IncrementChangeCount();
	PARANOIA_CHECK_STRUCTURES();
	return B_OK;
}
Exemplo n.º 4
0
void *malloc(size_t size)
{
        return malloc_flags(size,0);
}
Exemplo n.º 5
0
status_t
do_iterative_fd_io(int fd, io_request* request, iterative_io_get_vecs getVecs,
	iterative_io_finished finished, void* cookie)
{
	TRACE_RIO("[%" B_PRId32 "] do_iterative_fd_io(fd: %d, request: %p "
		"(offset: %" B_PRIdOFF ", length: %" B_PRIuGENADDR "))\n",
		find_thread(NULL), fd, request, request->Offset(), request->Length());

	struct vnode* vnode;
	file_descriptor* descriptor = get_fd_and_vnode(fd, &vnode, true);
	if (descriptor == NULL) {
		finished(cookie, request, B_FILE_ERROR, true, 0);
		request->SetStatusAndNotify(B_FILE_ERROR);
		return B_FILE_ERROR;
	}

	CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd);

	if (!HAS_FS_CALL(vnode, io)) {
		// no io() call -- fall back to synchronous I/O
		return do_synchronous_iterative_vnode_io(vnode, descriptor->cookie,
			request, getVecs, finished, cookie);
	}

	iterative_io_cookie* iterationCookie
		= (request->Flags() & B_VIP_IO_REQUEST) != 0
			? new(malloc_flags(HEAP_PRIORITY_VIP)) iterative_io_cookie
			: new(std::nothrow) iterative_io_cookie;
	if (iterationCookie == NULL) {
		// no memory -- fall back to synchronous I/O
		return do_synchronous_iterative_vnode_io(vnode, descriptor->cookie,
			request, getVecs, finished, cookie);
	}

	iterationCookie->vnode = vnode;
	iterationCookie->descriptor = descriptor;
	iterationCookie->get_vecs = getVecs;
	iterationCookie->finished = finished;
	iterationCookie->cookie = cookie;
	iterationCookie->request_offset = request->Offset();
	iterationCookie->next_finished_callback = request->FinishedCallback(
		&iterationCookie->next_finished_cookie);

	request->SetFinishedCallback(&do_iterative_fd_io_finish, iterationCookie);
	request->SetIterationCallback(&do_iterative_fd_io_iterate, iterationCookie);

	descriptorPutter.Detach();
		// From now on the descriptor is put by our finish callback.

	bool partialTransfer = false;
	status_t error = do_iterative_fd_io_iterate(iterationCookie, request,
		&partialTransfer);
	if (error != B_OK || partialTransfer) {
		if (partialTransfer) {
			request->SetTransferredBytes(partialTransfer,
				request->TransferredBytes());
		}

		request->SetStatusAndNotify(error);
		return error;
	}

	return B_OK;
}
Exemplo n.º 6
0
status_t
IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
{
	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
	if (cookie == NULL) {
		cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
			virtual_vec_cookie;
		if (cookie == NULL)
			return B_NO_MEMORY;

		cookie->vec_index = 0;
		cookie->vec_offset = 0;
		cookie->mapped_area = -1;
		cookie->physical_page_handle = NULL;
		cookie->virtual_address = 0;
		_cookie = cookie;
	}

	// recycle a potential previously mapped page
	if (cookie->physical_page_handle != NULL) {
// TODO: This check is invalid! The physical page mapper is not required to
// return a non-NULL handle (the generic implementation does not)!
		vm_put_physical_page(cookie->virtual_address,
			cookie->physical_page_handle);
	}

	if (cookie->vec_index >= fVecCount)
		return B_BAD_INDEX;

	if (!fPhysical) {
		vector = fVecs[cookie->vec_index++];
		return B_OK;
	}

	if (cookie->vec_index == 0
		&& (fVecCount > 1 || fVecs[0].iov_len > B_PAGE_SIZE)) {
		void* mappedAddress;
		addr_t mappedSize;

// TODO: This is a potential violation of the VIP requirement, since
// vm_map_physical_memory_vecs() allocates memory without special flags!
		cookie->mapped_area = vm_map_physical_memory_vecs(
			VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
			&mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);

		if (cookie->mapped_area >= 0) {
			vector.iov_base = (void*)mappedAddress;
			vector.iov_len = mappedSize;
			return B_OK;
		} else
			ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
	}

	// fallback to page wise mapping
	iovec& currentVec = fVecs[cookie->vec_index];
	addr_t address = (addr_t)currentVec.iov_base + cookie->vec_offset;
	addr_t pageOffset = address % B_PAGE_SIZE;

// TODO: This is a potential violation of the VIP requirement, since
// vm_get_physical_page() may allocate memory without special flags!
	status_t result = vm_get_physical_page(address - pageOffset,
		&cookie->virtual_address, &cookie->physical_page_handle);
	if (result != B_OK)
		return result;

	size_t length = min_c(currentVec.iov_len - cookie->vec_offset,
		B_PAGE_SIZE - pageOffset);

	vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
	vector.iov_len = length;

	cookie->vec_offset += length;
	if (cookie->vec_offset >= currentVec.iov_len) {
		cookie->vec_index++;
		cookie->vec_offset = 0;
	}

	return B_OK;
}
Exemplo n.º 7
0
status_t
VMKernelAddressSpace::_AllocateRange(
	const virtual_address_restrictions* addressRestrictions,
	size_t size, bool allowReservedRange, uint32 allocationFlags,
	Range*& _range)
{
	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
		addressRestrictions->address, size,
		addressRestrictions->address_specification, allowReservedRange);

	// prepare size, alignment and the base address for the range search
	addr_t address = (addr_t)addressRestrictions->address;
	size = ROUNDUP(size, B_PAGE_SIZE);
	size_t alignment = addressRestrictions->alignment != 0
		? addressRestrictions->alignment : B_PAGE_SIZE;

	switch (addressRestrictions->address_specification) {
		case B_EXACT_ADDRESS:
		{
			if (address % B_PAGE_SIZE != 0)
				return B_BAD_VALUE;
			break;
		}

		case B_BASE_ADDRESS:
			address = ROUNDUP(address, B_PAGE_SIZE);
			break;

		case B_ANY_KERNEL_BLOCK_ADDRESS:
			// align the memory to the next power of two of the size
			while (alignment < size)
				alignment <<= 1;

			// fall through...

		case B_ANY_ADDRESS:
		case B_ANY_KERNEL_ADDRESS:
			address = fBase;
			// TODO: remove this again when vm86 mode is moved into the kernel
			// completely (currently needs a userland address space!)
			if (address == USER_BASE)
				address = USER_BASE_ANY;
			break;

		default:
			return B_BAD_VALUE;
	}

	// find a range
	Range* range = _FindFreeRange(address, size, alignment,
		addressRestrictions->address_specification, allowReservedRange,
		address);
	if (range == NULL) {
		return addressRestrictions->address_specification == B_EXACT_ADDRESS
			? B_BAD_VALUE : B_NO_MEMORY;
	}

	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
		range->type);

	// We have found a range. It might not be a perfect fit, in which case
	// we have to split the range.
	size_t rangeSize = range->size;

	if (address == range->base) {
		// allocation at the beginning of the range
		if (range->size > size) {
			// only partial -- split the range
			Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
				address + size, range->size - size, range);
			if (leftOverRange == NULL)
				return B_NO_MEMORY;

			range->size = size;
			_InsertRange(leftOverRange);
		}
	} else if (address + size == range->base + range->size) {
		// allocation at the end of the range -- split the range
		Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
			range->base, range->size - size, range);
		if (leftOverRange == NULL)
			return B_NO_MEMORY;

		range->base = address;
		range->size = size;
		_InsertRange(leftOverRange);
	} else {
		// allocation in the middle of the range -- split the range in three
		Range* leftOverRange1 = new(malloc_flags(allocationFlags)) Range(
			range->base, address - range->base, range);
		if (leftOverRange1 == NULL)
			return B_NO_MEMORY;
		Range* leftOverRange2 = new(malloc_flags(allocationFlags)) Range(
			address + size, range->size - size - leftOverRange1->size, range);
		if (leftOverRange2 == NULL) {
			free_etc(leftOverRange1, allocationFlags);
			return B_NO_MEMORY;
		}

		range->base = address;
		range->size = size;
		_InsertRange(leftOverRange1);
		_InsertRange(leftOverRange2);
	}

	// If the range is a free range, remove it from the respective free list.
	if (range->type == Range::RANGE_FREE)
		_FreeListRemoveRange(range, rangeSize);

	IncrementChangeCount();

	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
		B_PRIxSIZE ")\n", range, range->base, range->size);

	_range = range;
	return B_OK;
}
Exemplo n.º 8
0
status_t
VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
	uint32 allocationFlags)
{
	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
		newSize);

	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
	Range* range = area->Range();

	if (newSize == range->size)
		return B_OK;

	Range* nextRange = fRangeList.GetNext(range);

	if (newSize < range->size) {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// a free range is following -- just enlarge it
			_FreeListRemoveRange(nextRange, nextRange->size);
			nextRange->size += range->size - newSize;
			nextRange->base = range->base + newSize;
			_FreeListInsertRange(nextRange, nextRange->size);
		} else {
			// no free range following -- we need to allocate a new one and
			// insert it
			nextRange = new(malloc_flags(allocationFlags)) Range(
				range->base + newSize, range->size - newSize,
				Range::RANGE_FREE);
			if (nextRange == NULL)
				return B_NO_MEMORY;
			_InsertRange(nextRange);
		}
	} else {
		if (nextRange == NULL
			|| (nextRange->type == Range::RANGE_RESERVED
				&& nextRange->reserved.base > range->base)) {
			return B_BAD_VALUE;
		}
		// TODO: If there is free space after a reserved range (or vice versa),
		// it could be used as well.
		size_t sizeDiff = newSize - range->size;
		if (sizeDiff > nextRange->size)
			return B_BAD_VALUE;

		if (sizeDiff == nextRange->size) {
			// The next range is completely covered -- remove and delete it.
			_RemoveRange(nextRange);
			free_etc(nextRange, allocationFlags);
		} else {
			// The next range is only partially covered -- shrink it.
			if (nextRange->type == Range::RANGE_FREE)
				_FreeListRemoveRange(nextRange, nextRange->size);
			nextRange->size -= sizeDiff;
			nextRange->base = range->base + newSize;
			if (nextRange->type == Range::RANGE_FREE)
				_FreeListInsertRange(nextRange, nextRange->size);
		}
	}

	range->size = newSize;
	area->SetSize(newSize);

	IncrementChangeCount();
	PARANOIA_CHECK_STRUCTURES();
	return B_OK;
}