예제 #1
0
int do_fork(unsigned flags)
{
	assert(current_task && kernel_task);
	assert(running_processes < (unsigned)MAX_TASKS || MAX_TASKS == -1);
	addr_t eip;
	task_t *task = task_create();
	page_dir_t *newspace;
	if(flags & FORK_SHAREDIR)
		newspace = vm_copy(current_task->pd);
	else
		newspace = vm_clone(current_task->pd, 0);
	if(!newspace)
	{
		kfree((void *)task);
		return -ENOMEM;
	}
	/* set the address space's entry for the current task.
	 * this is a fast and easy way to store the "what task am I" data
	 * that gets automatically updated when the scheduler switches
	 * into a new address space */
	arch_specific_set_current_task(newspace, (addr_t)task);
	/* Create the new task structure */
	task->pd = newspace;
	copy_task_struct(task, current_task, flags & FORK_SHAREDAT);
	add_atomic(&running_processes, 1);
	/* Set the state as usleep temporarily, so that it doesn't accidentally run.
	 * And then add it to the queue */
	task->state = TASK_USLEEP;
	tqueue_insert(primary_queue, (void *)task, task->listnode);
	cpu_t *cpu = (cpu_t *)current_task->cpu;
#if CONFIG_SMP
	cpu = fork_choose_cpu(current_task);
#endif
	/* Copy the stack */
	set_int(0);
	engage_new_stack(task, current_task);
	/* Here we read the EIP of this exact location. The parent then sets the
	 * eip of the child to this. On the reschedule for the child, it will 
	 * start here as well. */
	volatile task_t *parent = current_task;
	store_context_fork(task);
	eip = read_eip();
	if(current_task == parent)
	{
		/* These last things allow full execution of the task */
		task->eip=eip;
		task->state = TASK_RUNNING;
		task->cpu = cpu;
		add_atomic(&cpu->numtasks, 1);
		tqueue_insert(cpu->active_queue, (void *)task, task->activenode);
		__engage_idle();
		return task->pid;
	}
	return 0;
}
예제 #2
0
static void betterCopy(void *source, unsigned bytes, void *dest, NXStream *s) {
    if (bytes < vm_page_size ||
	(((unsigned)source | (unsigned)dest) & (vm_page_size - 1))) {
	bcopy(source, dest, bytes);
    } else {
	kern_return_t ret;
        ret = vm_copy(mach_task_self(), (vm_address_t)source, round_page(bytes), 
	    (vm_address_t)dest);
	if (ret != KERN_SUCCESS)
	    NX_RAISE(NX_streamVMError, s, (void *)ret);
    }
}
PassOwnPtr<PurgeableBuffer> PurgeableBuffer::create(const char* data, size_t size)
{
    if (size < minPurgeableBufferSize)
        return PassOwnPtr<PurgeableBuffer>();

    vm_address_t buffer = 0;
    kern_return_t ret = vm_allocate(mach_task_self(), &buffer, size, VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE | VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY);

    ASSERT(ret == KERN_SUCCESS);
    if (ret != KERN_SUCCESS)
        return PassOwnPtr<PurgeableBuffer>();

    ret = vm_copy(mach_task_self(), reinterpret_cast<vm_address_t>(data), size, buffer);

    ASSERT(ret == KERN_SUCCESS);
    if (ret != KERN_SUCCESS) {
        vm_deallocate(mach_task_self(), buffer, size);
        return PassOwnPtr<PurgeableBuffer>();
    }

    return adoptPtr(new PurgeableBuffer(reinterpret_cast<char*>(buffer), size));
}
예제 #4
0
static void memory_extend(register NXStream *s, int size)
{
    vm_size_t       new_size;
    vm_offset_t     new_addr;
    int             cur_offset;
    kern_return_t   ret;

    new_size = (size + CHUNK_SIZE) & (~(vm_page_size - 1));
    ret = vm_allocate(mach_task_self(), &new_addr, new_size, TRUE);
    if (ret != KERN_SUCCESS)
	NX_RAISE(NX_streamVMError, s, (void *)ret);
    cur_offset = 0;
    if (s->buf_base) {
	int             copySize;

	copySize = s->buf_size;
	if (copySize % vm_page_size)
	    copySize += vm_page_size - (copySize % vm_page_size);
	ret = vm_copy(mach_task_self(),
		      (vm_offset_t)s->buf_base,
		      (vm_size_t)copySize,
		      (vm_offset_t)new_addr);
	if (ret != KERN_SUCCESS)
	    NX_RAISE(NX_streamVMError, s, (void *)ret);
	ret = vm_deallocate(mach_task_self(),
			    (vm_offset_t)s->buf_base,
			    (vm_size_t)s->buf_size);
	if (ret != KERN_SUCCESS)
	    NX_RAISE(NX_streamVMError, s, (void *)ret);
	cur_offset = s->buf_ptr - s->buf_base;
    }
    s->buf_base = (unsigned char *)new_addr;
    s->buf_size = new_size;
    s->buf_ptr = s->buf_base + cur_offset;
    s->buf_left = new_size - size;
    s->flags &= ~NX_USER_OWNS_BUF;
}
예제 #5
0
파일: malloc.c 프로젝트: rohsaini/mkunity
/* The most common use of realloc is to manage a buffer of unlimited size
   that is grown as it fills.  So we try to optimise the case where you
   are growing the last object allocated to avoid copies.  */
void *
realloc(void *data, size_t size)
{
	void *p;
	union header *addr = ((union header *) data) - 1;
	vm_address_t vmaddr = (vm_address_t) addr;
	vm_address_t newaddr;
	vm_size_t oldsize, allocsize;
	size_t tocopy;

	if (data == NULL)
	    return malloc(size);

	oldsize = addr->size;
	allocsize = get_allocsize(size + sizeof(union header), NULL);
	if (allocsize == oldsize)
	    return data;

	/* Deal with every case where we don't want to do a simple
	   malloc+memcpy+free.  Otherwise it is a "simple case" in the
	   comments.  */
	if (allocsize < oldsize) {
	    /* Shrinking.  We favour space over time here since if time is
	       really important you can just not do the realloc.  */
	    if (oldsize >= kalloc_max) {
		/* Shrinking a lot.  */
		if (allocsize >= kalloc_max) {
		    (void) vm_deallocate(mach_task_self(), vmaddr + allocsize,
					 oldsize - allocsize);
		    addr->size = allocsize;
		    return data;
		}
		/* Simple case: shrinking from a whole page or pages to less
		   than a page.  */
	    } else {
		if (vmaddr + oldsize == kalloc_next_space) {
		    /* Shrinking the last item in the current page.  */
		    kalloc_next_space = vmaddr + allocsize;
		    addr->size = allocsize;
		    return data;
		}
		/* Simple case: shrinking enough to fit in a smaller power
		   of two.  */
	    }
	    tocopy = size;
	} else {
	    /* Growing.  */
	    if (allocsize >= kalloc_max) {
		/* Growing a lot.  */
		if (oldsize >= kalloc_max) {
		    /* We could try to vm_allocate extra pages after the old
		       data, but vm_allocate + vm_copy is not much more
		       expensive than that, even if it does fragment the
		       address space a bit more.  */
		    newaddr = vmaddr;
		    if (vm_allocate(mach_task_self(), &newaddr, allocsize,
				    TRUE) != KERN_SUCCESS ||
			vm_copy(mach_task_self(), vmaddr, oldsize, newaddr)
			!= KERN_SUCCESS)
			return NULL;
		    (void) vm_deallocate(mach_task_self(), vmaddr, oldsize);
		    addr = (union header *) newaddr;
		    addr->size = allocsize;
		    return (void *) (addr + 1);
		}
		/* Simple case: growing from less than a page to one or more
		   whole pages.  */
	    } else {
		/* Growing from a within-page size to a larger within-page
		   size.  Frequently the item being grown is the last one
		   allocated so try to avoid copies in that case.  */
		if (vmaddr + oldsize == kalloc_next_space) {
		    if (vmaddr + allocsize <= kalloc_end_of_space) {
			kalloc_next_space = vmaddr + allocsize;
			addr->size = allocsize;
			return data;
		    } else {
			newaddr = round_page(vmaddr);
			if (vm_allocate(mach_task_self(), &newaddr,
					vm_page_size, FALSE)
			    == KERN_SUCCESS) {
			    kalloc_next_space = vmaddr + allocsize;
			    kalloc_end_of_space = newaddr + vm_page_size;
			    addr->size = allocsize;
			    return (void *) (addr + 1);
			}
			/* Simple case: growing the last object in the page
			   past the end of the page when the next page is
			   unavailable.  */
		    }
		}
		/* Simple case: growing a within-page object that is not the
		   last object allocated. */
	    }
	    tocopy = oldsize - sizeof(union header);
	}

	/* So if we get here, we can't do any better than this: */
	p = malloc(size);
	if (p != NULL) {
	    memcpy(p, data, tocopy);
	    free(data);
	}
	return p;
}