Ejemplo n.º 1
0
VMArea::~VMArea()
{
	const uint32 flags = HEAP_DONT_WAIT_FOR_MEMORY
		| HEAP_DONT_LOCK_KERNEL_SPACE;
		// TODO: This might be stricter than necessary.

	free_etc(page_protections, flags);
	free_etc(name, flags);
}
Ejemplo n.º 2
0
status_t
VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
	size_t size, uint32 flags, uint32 allocationFlags)
{
	// check to see if this address space has entered DELETE state
	if (fDeleting) {
		// okay, someone is trying to delete this address space now, so we
		// can't insert the area, let's back out
		return B_BAD_TEAM_ID;
	}

	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
	if (area == NULL)
		return B_NO_MEMORY;

	status_t status = InsertArea(_address, addressSpec, size, area,
		allocationFlags);
	if (status != B_OK) {
		area->~VMUserArea();
		free_etc(area, allocationFlags);
		return status;
	}

	area->cache_offset = area->Base();
		// we cache the original base address here

	Get();
	return B_OK;
}
Ejemplo n.º 3
0
void
VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
{
	VMUserArea* area = static_cast<VMUserArea*>(_area);
	area->~VMUserArea();
	free_etc(area, allocationFlags);
}
Ejemplo n.º 4
0
status_t
VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
	uint32 allocationFlags)
{
	// check to see if this address space has entered DELETE state
	if (fDeleting) {
		// okay, someone is trying to delete this address space now, so we can't
		// insert the area, so back out
		return B_BAD_TEAM_ID;
	}

	// search area list and remove any matching reserved ranges
	addr_t endAddress = address + (size - 1);
	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
			VMUserArea* area = it.Next();) {
		// the area must be completely part of the reserved range
		if (area->Base() + (area->Size() - 1) > endAddress)
			break;
		if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
			// remove reserved range
			RemoveArea(area, allocationFlags);
			Put();
			area->~VMUserArea();
			free_etc(area, allocationFlags);
		}
	}

	return B_OK;
}
Ejemplo n.º 5
0
status_t
VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
	uint32 allocationFlags)
{
	VMUserArea* area = static_cast<VMUserArea*>(_area);

	addr_t newEnd = area->Base() + (newSize - 1);
	VMUserArea* next = fAreas.GetNext(area);
	if (next != NULL && next->Base() <= newEnd) {
		if (next->id != RESERVED_AREA_ID
			|| next->cache_offset > area->Base()
			|| next->Base() + (next->Size() - 1) < newEnd) {
			panic("resize situation for area %p has changed although we "
				"should have the address space lock", area);
			return B_ERROR;
		}

		// resize reserved area
		addr_t offset = area->Base() + newSize - next->Base();
		if (next->Size() <= offset) {
			RemoveArea(next, allocationFlags);
			next->~VMUserArea();
			free_etc(next, allocationFlags);
		} else {
			status_t error = ShrinkAreaHead(next, next->Size() - offset,
				allocationFlags);
			if (error != B_OK)
				return error;
		}
	}

	area->SetSize(newSize);
	return B_OK;
}
Ejemplo n.º 6
0
void
IOBuffer::Delete()
{
	if (this == NULL)
		return;

	free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
}
Ejemplo n.º 7
0
void
VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
{
	TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", area);

	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
	area->~VMKernelArea();
	free_etc(area, allocationFlags);
}
Ejemplo n.º 8
0
void
VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
{
	TRACE("  VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);

	// Check whether one or both of the neighboring ranges are free already,
	// and join them, if so.
	Range* previousRange = fRangeList.GetPrevious(range);
	Range* nextRange = fRangeList.GetNext(range);

	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// join them all -- keep the first one, delete the others
			_FreeListRemoveRange(previousRange, previousRange->size);
			_RemoveRange(range);
			_RemoveRange(nextRange);
			previousRange->size += range->size + nextRange->size;
			free_etc(range, allocationFlags);
			free_etc(nextRange, allocationFlags);
			_FreeListInsertRange(previousRange, previousRange->size);
		} else {
			// join with the previous range only, delete the supplied one
			_FreeListRemoveRange(previousRange, previousRange->size);
			_RemoveRange(range);
			previousRange->size += range->size;
			free_etc(range, allocationFlags);
			_FreeListInsertRange(previousRange, previousRange->size);
		}
	} else {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// join with the next range and delete it
			_RemoveRange(nextRange);
			range->size += nextRange->size;
			free_etc(nextRange, allocationFlags);
		}

		// mark the range free and add it to the respective free list
		range->type = Range::RANGE_FREE;
		_FreeListInsertRange(range, range->size);
	}

	IncrementChangeCount();
}
Ejemplo n.º 9
0
void
IOBuffer::FreeVirtualVecCookie(void* _cookie)
{
	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
	if (cookie->mapped_area >= 0)
		delete_area(cookie->mapped_area);
// TODO: A vm_get_physical_page() may still be unmatched!

	free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
}
Ejemplo n.º 10
0
void
VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
{
	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
			VMUserArea* area = it.Next();) {
		if (area->id == RESERVED_AREA_ID) {
			RemoveArea(area, allocationFlags);
			Put();
			area->~VMUserArea();
			free_etc(area, allocationFlags);
		}
	}
}
Ejemplo n.º 11
0
/*!	Must be called with this address space's write lock held */
status_t
VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
	uint32 addressSpec, VMUserArea* area, uint32 allocationFlags)
{
	VMUserArea* last = NULL;
	VMUserArea* next;
	bool foundSpot = false;

	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
		"0x%lx, size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, start,
		size, end, addressSpec, area));

	// do some sanity checking
	if (start < fBase || size == 0 || end > fEndAddress
		|| start + (size - 1) > end)
		return B_BAD_ADDRESS;

	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
		// search for a reserved area
		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
			allocationFlags);
		if (status == B_OK || status == B_BAD_VALUE)
			return status;

		// There was no reserved area, and the slot doesn't seem to be used
		// already
		// TODO: this could be further optimized.
	}

	size_t alignment = B_PAGE_SIZE;
	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
		// align the memory to the next power of two of the size
		while (alignment < size)
			alignment <<= 1;
	}

	start = ROUNDUP(start, alignment);

	// walk up to the spot where we should start searching
second_chance:
	VMUserAreaList::Iterator it = fAreas.GetIterator();
	while ((next = it.Next()) != NULL) {
		if (next->Base() > start + (size - 1)) {
			// we have a winner
			break;
		}

		last = next;
	}

	// find the right spot depending on the address specification - the area
	// will be inserted directly after "last" ("next" is not referenced anymore)

	switch (addressSpec) {
		case B_ANY_ADDRESS:
		case B_ANY_KERNEL_ADDRESS:
		case B_ANY_KERNEL_BLOCK_ADDRESS:
		{
			// find a hole big enough for a new area
			if (last == NULL) {
				// see if we can build it at the beginning of the virtual map
				addr_t alignedBase = ROUNDUP(fBase, alignment);
				if (is_valid_spot(fBase, alignedBase, size,
						next == NULL ? end : next->Base())) {
					foundSpot = true;
					area->SetBase(alignedBase);
					break;
				}

				last = next;
				next = it.Next();
			}

			// keep walking
			while (next != NULL) {
				addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
					alignment);
				if (is_valid_spot(last->Base() + (last->Size() - 1),
						alignedBase, size, next->Base())) {
					foundSpot = true;
					area->SetBase(alignedBase);
					break;
				}

				last = next;
				next = it.Next();
			}

			if (foundSpot)
				break;

			addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
				alignment);
			if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase,
					size, end)) {
				// got a spot
				foundSpot = true;
				area->SetBase(alignedBase);
				break;
			} else if (area->id != RESERVED_AREA_ID) {
				// We didn't find a free spot - if there are any reserved areas,
				// we can now test those for free space
				// TODO: it would make sense to start with the biggest of them
				it.Rewind();
				next = it.Next();
				for (last = NULL; next != NULL; next = it.Next()) {
					if (next->id != RESERVED_AREA_ID) {
						last = next;
						continue;
					}

					// TODO: take free space after the reserved area into
					// account!
					addr_t alignedBase = ROUNDUP(next->Base(), alignment);
					if (next->Base() == alignedBase && next->Size() == size) {
						// The reserved area is entirely covered, and thus,
						// removed
						fAreas.Remove(next);

						foundSpot = true;
						area->SetBase(alignedBase);
						next->~VMUserArea();
						free_etc(next, allocationFlags);
						break;
					}

					if ((next->protection & RESERVED_AVOID_BASE) == 0
						&&  alignedBase == next->Base()
						&& next->Size() >= size) {
						// The new area will be placed at the beginning of the
						// reserved area and the reserved area will be offset
						// and resized
						foundSpot = true;
						next->SetBase(next->Base() + size);
						next->SetSize(next->Size() - size);
						area->SetBase(alignedBase);
						break;
					}

					if (is_valid_spot(next->Base(), alignedBase, size,
							next->Base() + (next->Size() - 1))) {
						// The new area will be placed at the end of the
						// reserved area, and the reserved area will be resized
						// to make space
						alignedBase = ROUNDDOWN(
							next->Base() + next->Size() - size, alignment);

						foundSpot = true;
						next->SetSize(alignedBase - next->Base());
						area->SetBase(alignedBase);
						last = next;
						break;
					}

					last = next;
				}
			}
			break;
		}

		case B_BASE_ADDRESS:
		{
			// find a hole big enough for a new area beginning with "start"
			if (last == NULL) {
				// see if we can build it at the beginning of the specified
				// start
				if (next == NULL || next->Base() > start + (size - 1)) {
					foundSpot = true;
					area->SetBase(start);
					break;
				}

				last = next;
				next = it.Next();
			}

			// keep walking
			while (next != NULL) {
				if (next->Base() - (last->Base() + last->Size()) >= size) {
					// we found a spot (it'll be filled up below)
					break;
				}

				last = next;
				next = it.Next();
			}

			addr_t lastEnd = last->Base() + (last->Size() - 1);
			if (next != NULL || end - lastEnd >= size) {
				// got a spot
				foundSpot = true;
				if (lastEnd < start)
					area->SetBase(start);
				else
					area->SetBase(lastEnd + 1);
				break;
			}

			// we didn't find a free spot in the requested range, so we'll
			// try again without any restrictions
			start = fBase;
			addressSpec = B_ANY_ADDRESS;
			last = NULL;
			goto second_chance;
		}

		case B_EXACT_ADDRESS:
			// see if we can create it exactly here
			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
				&& (next == NULL || next->Base() > start + (size - 1))) {
				foundSpot = true;
				area->SetBase(start);
				break;
			}
			break;
		default:
			return B_BAD_VALUE;
	}

	if (!foundSpot)
		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;

	area->SetSize(size);
	if (last)
		fAreas.Insert(fAreas.GetNext(last), area);
	else
		fAreas.Insert(fAreas.Head(), area);

	IncrementChangeCount();
	return B_OK;
}
Ejemplo n.º 12
0
/*!	Finds a reserved area that covers the region spanned by \a start and
	\a size, inserts the \a area into that region and makes sure that
	there are reserved regions for the remaining parts.
*/
status_t
VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
	VMUserArea* area, uint32 allocationFlags)
{
	VMUserArea* next;

	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
			(next = it.Next()) != NULL;) {
		if (next->Base() <= start
			&& next->Base() + (next->Size() - 1) >= start + (size - 1)) {
			// This area covers the requested range
			if (next->id != RESERVED_AREA_ID) {
				// but it's not reserved space, it's a real area
				return B_BAD_VALUE;
			}

			break;
		}
	}

	if (next == NULL)
		return B_ENTRY_NOT_FOUND;

	// Now we have to transfer the requested part of the reserved
	// range to the new area - and remove, resize or split the old
	// reserved area.

	if (start == next->Base()) {
		// the area starts at the beginning of the reserved range
		fAreas.Insert(next, area);

		if (size == next->Size()) {
			// the new area fully covers the reversed range
			fAreas.Remove(next);
			Put();
			next->~VMUserArea();
			free_etc(next, allocationFlags);
		} else {
			// resize the reserved range behind the area
			next->SetBase(next->Base() + size);
			next->SetSize(next->Size() - size);
		}
	} else if (start + size == next->Base() + next->Size()) {
		// the area is at the end of the reserved range
		fAreas.Insert(fAreas.GetNext(next), area);

		// resize the reserved range before the area
		next->SetSize(start - next->Base());
	} else {
		// the area splits the reserved range into two separate ones
		// we need a new reserved area to cover this space
		VMUserArea* reserved = VMUserArea::CreateReserved(this,
			next->protection, allocationFlags);
		if (reserved == NULL)
			return B_NO_MEMORY;

		Get();
		fAreas.Insert(fAreas.GetNext(next), reserved);
		fAreas.Insert(reserved, area);

		// resize regions
		reserved->SetSize(next->Base() + next->Size() - start - size);
		next->SetSize(start - next->Base());
		reserved->SetBase(start + size);
		reserved->cache_offset = next->cache_offset;
	}

	area->SetBase(start);
	area->SetSize(size);
	IncrementChangeCount();

	return B_OK;
}
Ejemplo n.º 13
0
status_t
VMKernelAddressSpace::_AllocateRange(
	const virtual_address_restrictions* addressRestrictions,
	size_t size, bool allowReservedRange, uint32 allocationFlags,
	Range*& _range)
{
	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
		addressRestrictions->address, size,
		addressRestrictions->address_specification, allowReservedRange);

	// prepare size, alignment and the base address for the range search
	addr_t address = (addr_t)addressRestrictions->address;
	size = ROUNDUP(size, B_PAGE_SIZE);
	size_t alignment = addressRestrictions->alignment != 0
		? addressRestrictions->alignment : B_PAGE_SIZE;

	switch (addressRestrictions->address_specification) {
		case B_EXACT_ADDRESS:
		{
			if (address % B_PAGE_SIZE != 0)
				return B_BAD_VALUE;
			break;
		}

		case B_BASE_ADDRESS:
			address = ROUNDUP(address, B_PAGE_SIZE);
			break;

		case B_ANY_KERNEL_BLOCK_ADDRESS:
			// align the memory to the next power of two of the size
			while (alignment < size)
				alignment <<= 1;

			// fall through...

		case B_ANY_ADDRESS:
		case B_ANY_KERNEL_ADDRESS:
			address = fBase;
			// TODO: remove this again when vm86 mode is moved into the kernel
			// completely (currently needs a userland address space!)
			if (address == USER_BASE)
				address = USER_BASE_ANY;
			break;

		default:
			return B_BAD_VALUE;
	}

	// find a range
	Range* range = _FindFreeRange(address, size, alignment,
		addressRestrictions->address_specification, allowReservedRange,
		address);
	if (range == NULL) {
		return addressRestrictions->address_specification == B_EXACT_ADDRESS
			? B_BAD_VALUE : B_NO_MEMORY;
	}

	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
		range->type);

	// We have found a range. It might not be a perfect fit, in which case
	// we have to split the range.
	size_t rangeSize = range->size;

	if (address == range->base) {
		// allocation at the beginning of the range
		if (range->size > size) {
			// only partial -- split the range
			Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
				address + size, range->size - size, range);
			if (leftOverRange == NULL)
				return B_NO_MEMORY;

			range->size = size;
			_InsertRange(leftOverRange);
		}
	} else if (address + size == range->base + range->size) {
		// allocation at the end of the range -- split the range
		Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
			range->base, range->size - size, range);
		if (leftOverRange == NULL)
			return B_NO_MEMORY;

		range->base = address;
		range->size = size;
		_InsertRange(leftOverRange);
	} else {
		// allocation in the middle of the range -- split the range in three
		Range* leftOverRange1 = new(malloc_flags(allocationFlags)) Range(
			range->base, address - range->base, range);
		if (leftOverRange1 == NULL)
			return B_NO_MEMORY;
		Range* leftOverRange2 = new(malloc_flags(allocationFlags)) Range(
			address + size, range->size - size - leftOverRange1->size, range);
		if (leftOverRange2 == NULL) {
			free_etc(leftOverRange1, allocationFlags);
			return B_NO_MEMORY;
		}

		range->base = address;
		range->size = size;
		_InsertRange(leftOverRange1);
		_InsertRange(leftOverRange2);
	}

	// If the range is a free range, remove it from the respective free list.
	if (range->type == Range::RANGE_FREE)
		_FreeListRemoveRange(range, rangeSize);

	IncrementChangeCount();

	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
		B_PRIxSIZE ")\n", range, range->base, range->size);

	_range = range;
	return B_OK;
}
Ejemplo n.º 14
0
status_t
VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
	uint32 allocationFlags)
{
	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
		newSize);

	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
	Range* range = area->Range();

	if (newSize == range->size)
		return B_OK;

	Range* nextRange = fRangeList.GetNext(range);

	if (newSize < range->size) {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// a free range is following -- just enlarge it
			_FreeListRemoveRange(nextRange, nextRange->size);
			nextRange->size += range->size - newSize;
			nextRange->base = range->base + newSize;
			_FreeListInsertRange(nextRange, nextRange->size);
		} else {
			// no free range following -- we need to allocate a new one and
			// insert it
			nextRange = new(malloc_flags(allocationFlags)) Range(
				range->base + newSize, range->size - newSize,
				Range::RANGE_FREE);
			if (nextRange == NULL)
				return B_NO_MEMORY;
			_InsertRange(nextRange);
		}
	} else {
		if (nextRange == NULL
			|| (nextRange->type == Range::RANGE_RESERVED
				&& nextRange->reserved.base > range->base)) {
			return B_BAD_VALUE;
		}
		// TODO: If there is free space after a reserved range (or vice versa),
		// it could be used as well.
		size_t sizeDiff = newSize - range->size;
		if (sizeDiff > nextRange->size)
			return B_BAD_VALUE;

		if (sizeDiff == nextRange->size) {
			// The next range is completely covered -- remove and delete it.
			_RemoveRange(nextRange);
			free_etc(nextRange, allocationFlags);
		} else {
			// The next range is only partially covered -- shrink it.
			if (nextRange->type == Range::RANGE_FREE)
				_FreeListRemoveRange(nextRange, nextRange->size);
			nextRange->size -= sizeDiff;
			nextRange->base = range->base + newSize;
			if (nextRange->type == Range::RANGE_FREE)
				_FreeListInsertRange(nextRange, nextRange->size);
		}
	}

	range->size = newSize;
	area->SetSize(newSize);

	IncrementChangeCount();
	PARANOIA_CHECK_STRUCTURES();
	return B_OK;
}