Ejemplo n.º 1
0
void EffectLayer::RemoveAllEffects()
{
    std::unique_lock<std::recursive_mutex> locker(lock);
    for (int x = 0; x < mEffects.size(); x++) {
        IncrementChangeCount(mEffects[x]->GetStartTimeMS(), mEffects[x]->GetEndTimeMS());
        delete mEffects[x];
    }
    mEffects.clear();
}
Ejemplo n.º 2
0
Effect* EffectLayer::AddEffect(int id, const std::string &name, const std::string &settings, const std::string &palette,
                               int startTimeMS, int endTimeMS, int Selected, bool Protected)
{
    std::unique_lock<std::recursive_mutex> locker(lock);
    Effect *e = new Effect(this, id, name, settings, palette, startTimeMS, endTimeMS, Selected, Protected);
    mEffects.push_back(e);
    SortEffects();
    IncrementChangeCount(startTimeMS, endTimeMS);
    return e;
}
Ejemplo n.º 3
0
void EffectLayer::RemoveEffect(int index)
{
    std::unique_lock<std::recursive_mutex> locker(lock);
    if(index<mEffects.size())
    {
        Effect *e = mEffects[index];
        mEffects.erase(mEffects.begin()+index);
        IncrementChangeCount(e->GetStartTimeMS(), e->GetEndTimeMS());
        delete e;
    }
}
Ejemplo n.º 4
0
Element* SequenceElements::AddElement(const std::string &name, const std::string &type,
                                      bool visible,bool collapsed,bool active, bool selected)
{
    if(!ElementExists(name))
    {
        mAllViews[MASTER_VIEW].push_back(new Element(this, name,type,visible,collapsed,active,selected));
        Element *el = mAllViews[MASTER_VIEW][mAllViews[MASTER_VIEW].size()-1];
        IncrementChangeCount(el);
        return el;
    }
    return NULL;
}
Ejemplo n.º 5
0
Element* SequenceElements::AddElement(int index, const std::string &name,
                                      const std::string &type,
                                      bool visible,bool collapsed,bool active, bool selected)
{
    if(!ElementExists(name) && index <= mAllViews[MASTER_VIEW].size())
    {
        mAllViews[MASTER_VIEW].insert(mAllViews[MASTER_VIEW].begin()+index,new Element(this, name,type,visible,collapsed,active,selected));
        Element *el = mAllViews[MASTER_VIEW][index];
        IncrementChangeCount(el);
        return el;
    }
    return NULL;
}
Ejemplo n.º 6
0
void EffectLayer::DeleteSelectedEffects(UndoManager& undo_mgr)
{
    std::unique_lock<std::recursive_mutex> locker(lock);
    for (std::vector<Effect*>::iterator it = mEffects.begin(); it != mEffects.end(); it++) {
        if ((*it)->GetSelected() != EFFECT_NOT_SELECTED) {
            IncrementChangeCount((*it)->GetStartTimeMS(), (*it)->GetEndTimeMS());
            undo_mgr.CaptureEffectToBeDeleted( mParentElement->GetName(), mIndex, (*it)->GetEffectName(),
                                               (*it)->GetSettingsAsString(), (*it)->GetPaletteAsString(),
                                               (*it)->GetStartTimeMS(), (*it)->GetEndTimeMS(),
                                               (*it)->GetSelected(), (*it)->GetProtected() );
        }
    }
    mEffects.erase(std::remove_if(mEffects.begin(), mEffects.end(),ShouldDeleteSelected),mEffects.end());
}
Ejemplo n.º 7
0
//! You must hold the address space's write lock.
void
VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
{
	VMUserArea* area = static_cast<VMUserArea*>(_area);

	fAreas.Remove(area);

	if (area->id != RESERVED_AREA_ID) {
		IncrementChangeCount();
		fFreeSpace += area->Size();

		if (area == fAreaHint)
			fAreaHint = NULL;
	}
}
Ejemplo n.º 8
0
void SequenceElements::MoveElementDown(const std::string &name, int view)
{
    IncrementChangeCount(nullptr);

    for(int i=0;i<mAllViews[view].size();i++)
    {
        if(name == mAllViews[view][i]->GetName())
        {
            // found element
            if( i < mAllViews[view].size()-1 )
            {
                MoveSequenceElement(i+1, i, view);
            }
            break;
        }
    }
}
Ejemplo n.º 9
0
void SequenceElements::DeleteElement(const std::string &name)
{
    for(wxXmlNode* view=mViewsNode->GetChildren(); view!=NULL; view=view->GetNext() )
    {
        wxString view_models = view->GetAttribute("models");
        wxArrayString all_models = wxSplit(view_models, ',');
        wxArrayString new_models;
        for( int model = 0; model < all_models.size(); model++ )
        {
            if( all_models[model] != name )
            {
                new_models.push_back(all_models[model]);
            }
        }
        view_models = wxJoin(new_models, ',');
        view->DeleteAttribute("models");
        view->AddAttribute("models", view_models);
    }

    // delete element pointer from all views
    for(int i=0;i<mAllViews.size();i++)
    {
        for(int j=0;j<mAllViews[i].size();j++)
        {
            if(name == mAllViews[i][j]->GetName())
            {
                mAllViews[i].erase(mAllViews[i].begin()+j);
                IncrementChangeCount(nullptr);
                break;
            }
        }
    }

    // delete contents of pointer
    for(int j=0;j<mAllViews[MASTER_VIEW].size();j++)
    {
        if(name == mAllViews[MASTER_VIEW][j]->GetName())
        {
            Element *e = mAllViews[MASTER_VIEW][j];
            delete e;
            break;
        }
    }
    PopulateRowInformation();
}
Ejemplo n.º 10
0
status_t
VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
	uint32 allocationFlags)
{
	TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
		newSize);

	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
	Range* range = area->Range();

	if (newSize == range->size)
		return B_OK;

	if (newSize > range->size)
		return B_BAD_VALUE;

	Range* previousRange = fRangeList.GetPrevious(range);

	size_t sizeDiff = range->size - newSize;
	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
		// the previous range is free -- just enlarge it
		_FreeListRemoveRange(previousRange, previousRange->size);
		previousRange->size += sizeDiff;
		_FreeListInsertRange(previousRange, previousRange->size);
		range->base += sizeDiff;
		range->size = newSize;
	} else {
		// no free range before -- we need to allocate a new one and
		// insert it
		previousRange = new(malloc_flags(allocationFlags)) Range(range->base,
			sizeDiff, Range::RANGE_FREE);
		if (previousRange == NULL)
			return B_NO_MEMORY;
		range->base += sizeDiff;
		range->size = newSize;
		_InsertRange(previousRange);
	}

	area->SetBase(range->base);
	area->SetSize(range->size);

	IncrementChangeCount();
	PARANOIA_CHECK_STRUCTURES();
	return B_OK;
}
Ejemplo n.º 11
0
void SequenceElements::MoveSequenceElement(int index, int dest, int view)
{
    IncrementChangeCount(nullptr);

    if(index<mAllViews[view].size() && dest<mAllViews[view].size())
    {
        Element* e = mAllViews[view][index];
        mAllViews[view].erase(mAllViews[view].begin()+index);
        if(index >= dest)
        {
            mAllViews[view].insert(mAllViews[view].begin()+dest,e);
        }
        else
        {
            mAllViews[view].insert(mAllViews[view].begin()+(dest-1),e);
        }
    }
}
Ejemplo n.º 12
0
void
VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
{
	TRACE("  VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);

	// Check whether one or both of the neighboring ranges are free already,
	// and join them, if so.
	Range* previousRange = fRangeList.GetPrevious(range);
	Range* nextRange = fRangeList.GetNext(range);

	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// join them all -- keep the first one, delete the others
			_FreeListRemoveRange(previousRange, previousRange->size);
			_RemoveRange(range);
			_RemoveRange(nextRange);
			previousRange->size += range->size + nextRange->size;
			free_etc(range, allocationFlags);
			free_etc(nextRange, allocationFlags);
			_FreeListInsertRange(previousRange, previousRange->size);
		} else {
			// join with the previous range only, delete the supplied one
			_FreeListRemoveRange(previousRange, previousRange->size);
			_RemoveRange(range);
			previousRange->size += range->size;
			free_etc(range, allocationFlags);
			_FreeListInsertRange(previousRange, previousRange->size);
		}
	} else {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// join with the next range and delete it
			_RemoveRange(nextRange);
			range->size += nextRange->size;
			free_etc(nextRange, allocationFlags);
		}

		// mark the range free and add it to the respective free list
		range->type = Range::RANGE_FREE;
		_FreeListInsertRange(range, range->size);
	}

	IncrementChangeCount();
}
Ejemplo n.º 13
0
void SequenceElements::MoveElement(int index,int destinationIndex)
{
    IncrementChangeCount(nullptr);
    if(index<destinationIndex)
    {
        mAllViews[mCurrentView][index]->Index() = destinationIndex;
        for(int i=index+1;i<destinationIndex;i++)
        {
            mAllViews[mCurrentView][i]->Index() = i-1;
        }
    }
    else
    {
        mAllViews[mCurrentView][index]->Index() = destinationIndex;
        for(int i=destinationIndex;i<index;i++)
        {
            mAllViews[mCurrentView][i]->Index() = i+1;
        }
    }
    SortElements();
}
Ejemplo n.º 14
0
status_t
VMKernelAddressSpace::_AllocateRange(
	const virtual_address_restrictions* addressRestrictions,
	size_t size, bool allowReservedRange, uint32 allocationFlags,
	Range*& _range)
{
	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
		addressRestrictions->address, size,
		addressRestrictions->address_specification, allowReservedRange);

	// prepare size, alignment and the base address for the range search
	addr_t address = (addr_t)addressRestrictions->address;
	size = ROUNDUP(size, B_PAGE_SIZE);
	size_t alignment = addressRestrictions->alignment != 0
		? addressRestrictions->alignment : B_PAGE_SIZE;

	switch (addressRestrictions->address_specification) {
		case B_EXACT_ADDRESS:
		{
			if (address % B_PAGE_SIZE != 0)
				return B_BAD_VALUE;
			break;
		}

		case B_BASE_ADDRESS:
			address = ROUNDUP(address, B_PAGE_SIZE);
			break;

		case B_ANY_KERNEL_BLOCK_ADDRESS:
			// align the memory to the next power of two of the size
			while (alignment < size)
				alignment <<= 1;

			// fall through...

		case B_ANY_ADDRESS:
		case B_ANY_KERNEL_ADDRESS:
			address = fBase;
			// TODO: remove this again when vm86 mode is moved into the kernel
			// completely (currently needs a userland address space!)
			if (address == USER_BASE)
				address = USER_BASE_ANY;
			break;

		default:
			return B_BAD_VALUE;
	}

	// find a range
	Range* range = _FindFreeRange(address, size, alignment,
		addressRestrictions->address_specification, allowReservedRange,
		address);
	if (range == NULL) {
		return addressRestrictions->address_specification == B_EXACT_ADDRESS
			? B_BAD_VALUE : B_NO_MEMORY;
	}

	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
		range->type);

	// We have found a range. It might not be a perfect fit, in which case
	// we have to split the range.
	size_t rangeSize = range->size;

	if (address == range->base) {
		// allocation at the beginning of the range
		if (range->size > size) {
			// only partial -- split the range
			Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
				address + size, range->size - size, range);
			if (leftOverRange == NULL)
				return B_NO_MEMORY;

			range->size = size;
			_InsertRange(leftOverRange);
		}
	} else if (address + size == range->base + range->size) {
		// allocation at the end of the range -- split the range
		Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
			range->base, range->size - size, range);
		if (leftOverRange == NULL)
			return B_NO_MEMORY;

		range->base = address;
		range->size = size;
		_InsertRange(leftOverRange);
	} else {
		// allocation in the middle of the range -- split the range in three
		Range* leftOverRange1 = new(malloc_flags(allocationFlags)) Range(
			range->base, address - range->base, range);
		if (leftOverRange1 == NULL)
			return B_NO_MEMORY;
		Range* leftOverRange2 = new(malloc_flags(allocationFlags)) Range(
			address + size, range->size - size - leftOverRange1->size, range);
		if (leftOverRange2 == NULL) {
			free_etc(leftOverRange1, allocationFlags);
			return B_NO_MEMORY;
		}

		range->base = address;
		range->size = size;
		_InsertRange(leftOverRange1);
		_InsertRange(leftOverRange2);
	}

	// If the range is a free range, remove it from the respective free list.
	if (range->type == Range::RANGE_FREE)
		_FreeListRemoveRange(range, rangeSize);

	IncrementChangeCount();

	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
		B_PRIxSIZE ")\n", range, range->base, range->size);

	_range = range;
	return B_OK;
}
Ejemplo n.º 15
0
/*!	Finds a reserved area that covers the region spanned by \a start and
	\a size, inserts the \a area into that region and makes sure that
	there are reserved regions for the remaining parts.
*/
status_t
VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
	VMUserArea* area, uint32 allocationFlags)
{
	VMUserArea* next;

	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
			(next = it.Next()) != NULL;) {
		if (next->Base() <= start
			&& next->Base() + (next->Size() - 1) >= start + (size - 1)) {
			// This area covers the requested range
			if (next->id != RESERVED_AREA_ID) {
				// but it's not reserved space, it's a real area
				return B_BAD_VALUE;
			}

			break;
		}
	}

	if (next == NULL)
		return B_ENTRY_NOT_FOUND;

	// Now we have to transfer the requested part of the reserved
	// range to the new area - and remove, resize or split the old
	// reserved area.

	if (start == next->Base()) {
		// the area starts at the beginning of the reserved range
		fAreas.Insert(next, area);

		if (size == next->Size()) {
			// the new area fully covers the reversed range
			fAreas.Remove(next);
			Put();
			next->~VMUserArea();
			free_etc(next, allocationFlags);
		} else {
			// resize the reserved range behind the area
			next->SetBase(next->Base() + size);
			next->SetSize(next->Size() - size);
		}
	} else if (start + size == next->Base() + next->Size()) {
		// the area is at the end of the reserved range
		fAreas.Insert(fAreas.GetNext(next), area);

		// resize the reserved range before the area
		next->SetSize(start - next->Base());
	} else {
		// the area splits the reserved range into two separate ones
		// we need a new reserved area to cover this space
		VMUserArea* reserved = VMUserArea::CreateReserved(this,
			next->protection, allocationFlags);
		if (reserved == NULL)
			return B_NO_MEMORY;

		Get();
		fAreas.Insert(fAreas.GetNext(next), reserved);
		fAreas.Insert(reserved, area);

		// resize regions
		reserved->SetSize(next->Base() + next->Size() - start - size);
		next->SetSize(start - next->Base());
		reserved->SetBase(start + size);
		reserved->cache_offset = next->cache_offset;
	}

	area->SetBase(start);
	area->SetSize(size);
	IncrementChangeCount();

	return B_OK;
}
Ejemplo n.º 16
0
/*!	Must be called with this address space's write lock held */
status_t
VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
	uint32 addressSpec, VMUserArea* area, uint32 allocationFlags)
{
	VMUserArea* last = NULL;
	VMUserArea* next;
	bool foundSpot = false;

	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
		"0x%lx, size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, start,
		size, end, addressSpec, area));

	// do some sanity checking
	if (start < fBase || size == 0 || end > fEndAddress
		|| start + (size - 1) > end)
		return B_BAD_ADDRESS;

	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
		// search for a reserved area
		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
			allocationFlags);
		if (status == B_OK || status == B_BAD_VALUE)
			return status;

		// There was no reserved area, and the slot doesn't seem to be used
		// already
		// TODO: this could be further optimized.
	}

	size_t alignment = B_PAGE_SIZE;
	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
		// align the memory to the next power of two of the size
		while (alignment < size)
			alignment <<= 1;
	}

	start = ROUNDUP(start, alignment);

	// walk up to the spot where we should start searching
second_chance:
	VMUserAreaList::Iterator it = fAreas.GetIterator();
	while ((next = it.Next()) != NULL) {
		if (next->Base() > start + (size - 1)) {
			// we have a winner
			break;
		}

		last = next;
	}

	// find the right spot depending on the address specification - the area
	// will be inserted directly after "last" ("next" is not referenced anymore)

	switch (addressSpec) {
		case B_ANY_ADDRESS:
		case B_ANY_KERNEL_ADDRESS:
		case B_ANY_KERNEL_BLOCK_ADDRESS:
		{
			// find a hole big enough for a new area
			if (last == NULL) {
				// see if we can build it at the beginning of the virtual map
				addr_t alignedBase = ROUNDUP(fBase, alignment);
				if (is_valid_spot(fBase, alignedBase, size,
						next == NULL ? end : next->Base())) {
					foundSpot = true;
					area->SetBase(alignedBase);
					break;
				}

				last = next;
				next = it.Next();
			}

			// keep walking
			while (next != NULL) {
				addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
					alignment);
				if (is_valid_spot(last->Base() + (last->Size() - 1),
						alignedBase, size, next->Base())) {
					foundSpot = true;
					area->SetBase(alignedBase);
					break;
				}

				last = next;
				next = it.Next();
			}

			if (foundSpot)
				break;

			addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
				alignment);
			if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase,
					size, end)) {
				// got a spot
				foundSpot = true;
				area->SetBase(alignedBase);
				break;
			} else if (area->id != RESERVED_AREA_ID) {
				// We didn't find a free spot - if there are any reserved areas,
				// we can now test those for free space
				// TODO: it would make sense to start with the biggest of them
				it.Rewind();
				next = it.Next();
				for (last = NULL; next != NULL; next = it.Next()) {
					if (next->id != RESERVED_AREA_ID) {
						last = next;
						continue;
					}

					// TODO: take free space after the reserved area into
					// account!
					addr_t alignedBase = ROUNDUP(next->Base(), alignment);
					if (next->Base() == alignedBase && next->Size() == size) {
						// The reserved area is entirely covered, and thus,
						// removed
						fAreas.Remove(next);

						foundSpot = true;
						area->SetBase(alignedBase);
						next->~VMUserArea();
						free_etc(next, allocationFlags);
						break;
					}

					if ((next->protection & RESERVED_AVOID_BASE) == 0
						&&  alignedBase == next->Base()
						&& next->Size() >= size) {
						// The new area will be placed at the beginning of the
						// reserved area and the reserved area will be offset
						// and resized
						foundSpot = true;
						next->SetBase(next->Base() + size);
						next->SetSize(next->Size() - size);
						area->SetBase(alignedBase);
						break;
					}

					if (is_valid_spot(next->Base(), alignedBase, size,
							next->Base() + (next->Size() - 1))) {
						// The new area will be placed at the end of the
						// reserved area, and the reserved area will be resized
						// to make space
						alignedBase = ROUNDDOWN(
							next->Base() + next->Size() - size, alignment);

						foundSpot = true;
						next->SetSize(alignedBase - next->Base());
						area->SetBase(alignedBase);
						last = next;
						break;
					}

					last = next;
				}
			}
			break;
		}

		case B_BASE_ADDRESS:
		{
			// find a hole big enough for a new area beginning with "start"
			if (last == NULL) {
				// see if we can build it at the beginning of the specified
				// start
				if (next == NULL || next->Base() > start + (size - 1)) {
					foundSpot = true;
					area->SetBase(start);
					break;
				}

				last = next;
				next = it.Next();
			}

			// keep walking
			while (next != NULL) {
				if (next->Base() - (last->Base() + last->Size()) >= size) {
					// we found a spot (it'll be filled up below)
					break;
				}

				last = next;
				next = it.Next();
			}

			addr_t lastEnd = last->Base() + (last->Size() - 1);
			if (next != NULL || end - lastEnd >= size) {
				// got a spot
				foundSpot = true;
				if (lastEnd < start)
					area->SetBase(start);
				else
					area->SetBase(lastEnd + 1);
				break;
			}

			// we didn't find a free spot in the requested range, so we'll
			// try again without any restrictions
			start = fBase;
			addressSpec = B_ANY_ADDRESS;
			last = NULL;
			goto second_chance;
		}

		case B_EXACT_ADDRESS:
			// see if we can create it exactly here
			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
				&& (next == NULL || next->Base() > start + (size - 1))) {
				foundSpot = true;
				area->SetBase(start);
				break;
			}
			break;
		default:
			return B_BAD_VALUE;
	}

	if (!foundSpot)
		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;

	area->SetSize(size);
	if (last)
		fAreas.Insert(fAreas.GetNext(last), area);
	else
		fAreas.Insert(fAreas.Head(), area);

	IncrementChangeCount();
	return B_OK;
}
Ejemplo n.º 17
0
status_t
VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
	uint32 allocationFlags)
{
	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
		newSize);

	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
	Range* range = area->Range();

	if (newSize == range->size)
		return B_OK;

	Range* nextRange = fRangeList.GetNext(range);

	if (newSize < range->size) {
		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
			// a free range is following -- just enlarge it
			_FreeListRemoveRange(nextRange, nextRange->size);
			nextRange->size += range->size - newSize;
			nextRange->base = range->base + newSize;
			_FreeListInsertRange(nextRange, nextRange->size);
		} else {
			// no free range following -- we need to allocate a new one and
			// insert it
			nextRange = new(malloc_flags(allocationFlags)) Range(
				range->base + newSize, range->size - newSize,
				Range::RANGE_FREE);
			if (nextRange == NULL)
				return B_NO_MEMORY;
			_InsertRange(nextRange);
		}
	} else {
		if (nextRange == NULL
			|| (nextRange->type == Range::RANGE_RESERVED
				&& nextRange->reserved.base > range->base)) {
			return B_BAD_VALUE;
		}
		// TODO: If there is free space after a reserved range (or vice versa),
		// it could be used as well.
		size_t sizeDiff = newSize - range->size;
		if (sizeDiff > nextRange->size)
			return B_BAD_VALUE;

		if (sizeDiff == nextRange->size) {
			// The next range is completely covered -- remove and delete it.
			_RemoveRange(nextRange);
			free_etc(nextRange, allocationFlags);
		} else {
			// The next range is only partially covered -- shrink it.
			if (nextRange->type == Range::RANGE_FREE)
				_FreeListRemoveRange(nextRange, nextRange->size);
			nextRange->size -= sizeDiff;
			nextRange->base = range->base + newSize;
			if (nextRange->type == Range::RANGE_FREE)
				_FreeListInsertRange(nextRange, nextRange->size);
		}
	}

	range->size = newSize;
	area->SetSize(newSize);

	IncrementChangeCount();
	PARANOIA_CHECK_STRUCTURES();
	return B_OK;
}