Exemple #1
0
static
int
vm_region_expand( struct vm_region *vmr, unsigned npages ) {
	unsigned		new_pages;
	int			res;
	unsigned		i;
	unsigned		old_pages;

	old_pages = vm_page_array_num( vmr->vmr_pages );
	new_pages = npages - old_pages;

	//trivial case, nothing to do.
	if( new_pages == 0 )
		return 0;

	//see if we can back this loan with storage.
	res = swap_reserve( new_pages );
	if( res )
		return res;

	//attempt to rezize the vmr_pages array.
	res = vm_page_array_setsize( vmr->vmr_pages, npages );
	if( res ) {
		swap_unreserve( new_pages );
		return res;
	}


	//initialize each of the newly created vm_pages to NULL.
	for( i = old_pages; i < npages; ++i )
		vm_page_array_set( vmr->vmr_pages, i, NULL );
	
	return 0;
}
Exemple #2
0
/*
 *	kmap_alloc_wait:
 *
 *	Allocates pageable memory from a sub-map of the kernel.  If the submap
 *	has no room, the caller sleeps waiting for more memory in the submap.
 *
 *	This routine may block.
 */
vm_offset_t
kmap_alloc_wait(vm_map_t map, vm_size_t size)
{
	vm_offset_t addr;

	size = round_page(size);
	if (!swap_reserve(size))
		return (0);

	for (;;) {
		/*
		 * To make this work for more than one map, use the map's lock
		 * to lock out sleepers/wakers.
		 */
		vm_map_lock(map);
		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
			break;
		/* no space now; see if we can ever get space */
		if (vm_map_max(map) - vm_map_min(map) < size) {
			vm_map_unlock(map);
			swap_release(size);
			return (0);
		}
		map->needs_wakeup = TRUE;
		vm_map_unlock_and_wait(map, 0);
	}
	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
	    VM_PROT_ALL, MAP_ACC_CHARGED);
	vm_map_unlock(map);
	return (addr);
}
Exemple #3
0
/*
 * vm_object_setsize: change the size of a vm_object.
 */
int
vm_object_setsize(struct addrspace *as, struct vm_object *vmo, unsigned npages)
{
    int result;
    unsigned i;
    struct lpage *lp;

    KASSERT(vmo != NULL);
    KASSERT(vmo->vmo_lpages != NULL);

    if (npages < lpage_array_num(vmo->vmo_lpages)) {
        for (i=npages; i<lpage_array_num(vmo->vmo_lpages); i++) {
            lp = lpage_array_get(vmo->vmo_lpages, i);
            if (lp != NULL) {
                KASSERT(as != NULL);
                /* remove any tlb entry for this mapping */
                mmu_unmap(as, vmo->vmo_base+PAGE_SIZE*i);
                lpage_destroy(lp);
            }
            else {
                swap_unreserve(1);
            }
        }
        result = lpage_array_setsize(vmo->vmo_lpages, npages);
        /* shrinking an array shouldn't fail */
        KASSERT(result==0);
    }
    else if (npages > lpage_array_num(vmo->vmo_lpages)) {
        int oldsize = lpage_array_num(vmo->vmo_lpages);
        unsigned newpages = npages - oldsize;

        result = swap_reserve(newpages);
        if (result) {
            return result;
        }

        result = lpage_array_setsize(vmo->vmo_lpages, npages);
        if (result) {
            swap_unreserve(newpages);
            return result;
        }
        for (i=oldsize; i<npages; i++) {
            lpage_array_set(vmo->vmo_lpages, i, NULL);
        }
    }
    return 0;
}
Exemple #4
0
struct vm_region *
vm_region_create( size_t npages ) {
	int 			res;
	struct vm_region	*vmr;
	unsigned		i;
	int			err;

	//see if we can reserve npages of swap first.
	err = swap_reserve( npages );
	if( err )
		return NULL;

	//attempt to create the vm_region
	vmr = kmalloc( sizeof( struct vm_region ) );
	if( vmr == NULL )
		return NULL;

	//create the vm_pages.
	vmr->vmr_pages = vm_page_array_create();
	if( vmr->vmr_pages == NULL ) {
		kfree( vmr );
		swap_unreserve( npages );
		return NULL;
	}

	//set the base address to point to an invalid virtual address.
	vmr->vmr_base = 0xdeadbeef;

	//adjust the array to hold npages.
	res = vm_page_array_setsize( vmr->vmr_pages, npages );
	if( res ) {
		vm_page_array_destroy( vmr->vmr_pages );
		kfree( vmr );
		swap_unreserve( npages );
		return NULL;
	}

	//initialize all the pages to NULL.
	for( i = 0; i < npages; ++i )
		vm_page_array_set( vmr->vmr_pages, i, NULL );

	return vmr;
}
Exemple #5
0
/*
 * vm_object_create: Allocate a new vm_object with nothing in it.
 * Returns: new vm_object on success, NULL on error.
 */
struct vm_object *
vm_object_create(size_t npages)
{
    struct vm_object *vmo;
    unsigned i;
    int result;

    result = swap_reserve(npages);
    if (result != 0) {
        return NULL;
    }

    vmo = kmalloc(sizeof(struct vm_object));
    if (vmo == NULL) {
        swap_unreserve(npages);
        return NULL;
    }

    vmo->vmo_lpages = lpage_array_create();
    if (vmo->vmo_lpages == NULL) {
        kfree(vmo);
        swap_unreserve(npages);
        return NULL;
    }

    vmo->vmo_base = 0xdeafbeef;		/* make sure these */
    vmo->vmo_lower_redzone = 0xdeafbeef;	/* get filled in later */

    /* add the requested number of zerofilled pages */
    result = lpage_array_setsize(vmo->vmo_lpages, npages);
    if (result) {
        lpage_array_destroy(vmo->vmo_lpages);
        kfree(vmo);
        swap_unreserve(npages);
        return NULL;
    }

    for (i=0; i<npages; i++) {
        lpage_array_set(vmo->vmo_lpages, i, NULL);
    }

    return vmo;
}
Exemple #6
0
int
fork1(struct thread *td, struct fork_req *fr)
{
	struct proc *p1, *newproc;
	struct thread *td2;
	struct vmspace *vm2;
	struct file *fp_procdesc;
	vm_ooffset_t mem_charged;
	int error, nprocs_new, ok;
	static int curfail;
	static struct timeval lastfail;
	int flags, pages;

	flags = fr->fr_flags;
	pages = fr->fr_pages;

	if ((flags & RFSTOPPED) != 0)
		MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL);
	else
		MPASS(fr->fr_procp == NULL);

	/* Check for the undefined or unimplemented flags. */
	if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
		return (EINVAL);

	/* Signal value requires RFTSIGZMB. */
	if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
		return (EINVAL);

	/* Can't copy and clear. */
	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
		return (EINVAL);

	/* Check the validity of the signal number. */
	if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
		return (EINVAL);

	if ((flags & RFPROCDESC) != 0) {
		/* Can't not create a process yet get a process descriptor. */
		if ((flags & RFPROC) == 0)
			return (EINVAL);

		/* Must provide a place to put a procdesc if creating one. */
		if (fr->fr_pd_fd == NULL)
			return (EINVAL);

		/* Check if we are using supported flags. */
		if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0)
			return (EINVAL);
	}

	p1 = td->td_proc;

	/*
	 * Here we don't create a new process, but we divorce
	 * certain parts of a process from itself.
	 */
	if ((flags & RFPROC) == 0) {
		if (fr->fr_procp != NULL)
			*fr->fr_procp = NULL;
		else if (fr->fr_pidp != NULL)
			*fr->fr_pidp = 0;
		return (fork_norfproc(td, flags));
	}

	fp_procdesc = NULL;
	newproc = NULL;
	vm2 = NULL;

	/*
	 * Increment the nprocs resource before allocations occur.
	 * Although process entries are dynamically created, we still
	 * keep a global limit on the maximum number we will
	 * create. There are hard-limits as to the number of processes
	 * that can run, established by the KVA and memory usage for
	 * the process data.
	 *
	 * Don't allow a nonprivileged user to use the last ten
	 * processes; don't let root exceed the limit.
	 */
	nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
	if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred,
	    PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) {
		error = EAGAIN;
		sx_xlock(&allproc_lock);
		if (ppsratecheck(&lastfail, &curfail, 1)) {
			printf("maxproc limit exceeded by uid %u (pid %d); "
			    "see tuning(7) and login.conf(5)\n",
			    td->td_ucred->cr_ruid, p1->p_pid);
		}
		sx_xunlock(&allproc_lock);
		goto fail2;
	}

	/*
	 * If required, create a process descriptor in the parent first; we
	 * will abandon it if something goes wrong. We don't finit() until
	 * later.
	 */
	if (flags & RFPROCDESC) {
		error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd,
		    fr->fr_pd_flags, fr->fr_pd_fcaps);
		if (error != 0)
			goto fail2;
	}

	mem_charged = 0;
	if (pages == 0)
		pages = kstack_pages;
	/* Allocate new proc. */
	newproc = uma_zalloc(proc_zone, M_WAITOK);
	td2 = FIRST_THREAD_IN_PROC(newproc);
	if (td2 == NULL) {
		td2 = thread_alloc(pages);
		if (td2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		proc_linkup(newproc, td2);
	} else {
		if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
			if (td2->td_kstack != 0)
				vm_thread_dispose(td2);
			if (!thread_alloc_stack(td2, pages)) {
				error = ENOMEM;
				goto fail2;
			}
		}
	}

	if ((flags & RFMEM) == 0) {
		vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
		if (vm2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		if (!swap_reserve(mem_charged)) {
			/*
			 * The swap reservation failed. The accounting
			 * from the entries of the copied vm2 will be
			 * subtracted in vmspace_free(), so force the
			 * reservation there.
			 */
			swap_reserve_force(mem_charged);
			error = ENOMEM;
			goto fail2;
		}
	} else
		vm2 = NULL;

	/*
	 * XXX: This is ugly; when we copy resource usage, we need to bump
	 *      per-cred resource counters.
	 */
	proc_set_cred_init(newproc, crhold(td->td_ucred));

	/*
	 * Initialize resource accounting for the child process.
	 */
	error = racct_proc_fork(p1, newproc);
	if (error != 0) {
		error = EAGAIN;
		goto fail1;
	}

#ifdef MAC
	mac_proc_init(newproc);
#endif
	newproc->p_klist = knlist_alloc(&newproc->p_mtx);
	STAILQ_INIT(&newproc->p_ktr);

	/* We have to lock the process tree while we look for a pid. */
	sx_slock(&proctree_lock);
	sx_xlock(&allproc_lock);

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit.
	 *
	 * XXXRW: Can we avoid privilege here if it's not needed?
	 */
	error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
	if (error == 0)
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
	else {
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
		    lim_cur(td, RLIMIT_NPROC));
	}
	if (ok) {
		do_fork(td, fr, newproc, td2, vm2, fp_procdesc);
		return (0);
	}

	error = EAGAIN;
	sx_sunlock(&proctree_lock);
	sx_xunlock(&allproc_lock);
#ifdef MAC
	mac_proc_destroy(newproc);
#endif
	racct_proc_exit(newproc);
fail1:
	crfree(newproc->p_ucred);
	newproc->p_ucred = NULL;
fail2:
	if (vm2 != NULL)
		vmspace_free(vm2);
	uma_zfree(proc_zone, newproc);
	if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
		fdclose(td, fp_procdesc, *fr->fr_pd_fd);
		fdrop(fp_procdesc, td);
	}
	atomic_add_int(&nprocs, -1);
	pause("fork", hz / 2);
	return (error);
}