Esempio n. 1
0
int
uuid_table_insert(uuid_t *uuid)
{
	int	i, hole;

	mutex_lock(&uuid_monitor, PVFS);
	for (i = 0, hole = -1; i < uuid_table_size; i++) {
		if (uuid_is_nil(&uuid_table[i])) {
			hole = i;
			continue;
		}
		if (uuid_equal(uuid, &uuid_table[i])) {
			mutex_unlock(&uuid_monitor);
			return 0;
		}
	}
	if (hole < 0) {
		uuid_table = kmem_realloc(uuid_table,
			(uuid_table_size + 1) * sizeof(*uuid_table),
			uuid_table_size  * sizeof(*uuid_table),
			KM_SLEEP);
		hole = uuid_table_size++;
	}
	uuid_table[hole] = *uuid;
	mutex_unlock(&uuid_monitor);
	return 1;
}
Esempio n. 2
0
unsigned int OSSerialize::ensureCapacity(unsigned int newCapacity)
{
	char *newData;

	if (newCapacity <= capacity)
		return capacity;

	// round up
	newCapacity = round_page_32(newCapacity);

	kern_return_t rc = kmem_realloc(kernel_map,
					(vm_offset_t)data,
					capacity,
					(vm_offset_t *)&newData,
					newCapacity);
	if (!rc) {
	    ACCUMSIZE(newCapacity);

	    // kmem realloc does not free the old address range
	    kmem_free(kernel_map, (vm_offset_t)data, capacity); 
	    ACCUMSIZE(-capacity);
	    
	    // kmem realloc does not zero out the new memory
	    // and this could end up going to user land
	    bzero(&newData[capacity], newCapacity - capacity);
		
	    data = newData;
	    capacity = newCapacity;
	}

	return capacity;
}
Esempio n. 3
0
unsigned int OSSerialize::ensureCapacity(unsigned int newCapacity)
{
    char *newData;

    if (newCapacity <= capacity)
        return capacity;

    if (round_page_overflow(newCapacity, &newCapacity)) {
        return capacity;
    }

    kern_return_t rc = kmem_realloc(kernel_map,
                                    (vm_offset_t)data,
                                    capacity,
                                    (vm_offset_t *)&newData,
                                    newCapacity,
                                    VM_KERN_MEMORY_IOKIT);
    if (!rc) {
        OSCONTAINER_ACCUMSIZE(newCapacity);

        // kmem realloc does not free the old address range
        kmem_free(kernel_map, (vm_offset_t)data, capacity);
        OSCONTAINER_ACCUMSIZE(-((size_t)capacity));

        // kmem realloc does not zero out the new memory
        // and this could end up going to user land
        bzero(&newData[capacity], newCapacity - capacity);

        data = newData;
        capacity = newCapacity;
    }

    return capacity;
}
Esempio n. 4
0
vm_offset_t
ipc_table_realloc(
	vm_size_t	old_size,
	vm_offset_t	old_table,
	vm_size_t	new_size)
{
	vm_offset_t new_table;

	if (kmem_realloc(kalloc_map, old_table, old_size,
			 &new_table, new_size) != KERN_SUCCESS)
		new_table = 0;

	return new_table;
}
Esempio n. 5
0
void
krealloc(
	vm_offset_t	*addrp,
	vm_size_t	old_size,
	vm_size_t	new_size,
	simple_lock_t	lock)
{
	register int zindex;
	register vm_size_t allocsize;
	vm_offset_t naddr;

	/* can only be used for increasing allocation size */

	assert(new_size > old_size);

	/* if old_size is zero, then we are simply allocating */

	if (old_size == 0) {
		simple_unlock(lock);
		naddr = kalloc(new_size);
		simple_lock(lock);
		*addrp = naddr;
		return;
	}

	/* if old block was kmem_alloc'd, then use kmem_realloc if necessary */

	if (old_size >= kalloc_max_prerounded) {
		old_size = round_page_32(old_size);
		new_size = round_page_32(new_size);
		if (new_size > old_size) {

			if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr,
					 new_size) != KERN_SUCCESS) {
				panic("krealloc: kmem_realloc");
				naddr = 0;
			}

			simple_lock(lock);
			*addrp = naddr;

			/* kmem_realloc() doesn't free old page range. */
			kmem_free(kalloc_map, *addrp, old_size);

			kalloc_large_total += (new_size - old_size);

			if (kalloc_large_total > kalloc_large_max)
			        kalloc_large_max = kalloc_large_total;
		}
		return;
	}

	/* compute the size of the block that we actually allocated */

	allocsize = KALLOC_MINSIZE;
	zindex = first_k_zone;
	while (allocsize < old_size) {
		allocsize <<= 1;
		zindex++;
	}

	/* if new size fits in old block, then return */

	if (new_size <= allocsize) {
		return;
	}

	/* if new size does not fit in zone, kmem_alloc it, else zalloc it */

	simple_unlock(lock);
	if (new_size >= kalloc_max_prerounded) {
		if (kmem_alloc(kalloc_map, &naddr, new_size) != KERN_SUCCESS) {
			panic("krealloc: kmem_alloc");
			simple_lock(lock);
			*addrp = 0;
			return;
		}
		kalloc_large_inuse++;
		kalloc_large_total += new_size;

		if (kalloc_large_total > kalloc_large_max)
		        kalloc_large_max = kalloc_large_total;
	} else {
		register int new_zindex;

		allocsize <<= 1;
		new_zindex = zindex + 1;
		while (allocsize < new_size) {
			allocsize <<= 1;
			new_zindex++;
		}
		naddr = zalloc(k_zone[new_zindex]);
	}
	simple_lock(lock);

	/* copy existing data */

	bcopy((const char *)*addrp, (char *)naddr, old_size);

	/* free old block, and return */

	zfree(k_zone[zindex], *addrp);

	/* set up new address */

	*addrp = naddr;
}