Exemplo n.º 1
0
DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
{
    vm_prot_t          ProtectionFlags = 0;
    vm_offset_t        AddrStart       = (uintptr_t)pMem->pv + offSub;
    vm_offset_t        AddrEnd         = AddrStart + cbSub;
    vm_map_t           pVmMap          = rtR0MemObjFreeBSDGetMap(pMem);

    if (!pVmMap)
        return VERR_NOT_SUPPORTED;

    if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
        ProtectionFlags = VM_PROT_NONE;
    if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
        ProtectionFlags |= VM_PROT_READ;
    if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
        ProtectionFlags |= VM_PROT_WRITE;
    if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
        ProtectionFlags |= VM_PROT_EXECUTE;

    int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
    if (krc == KERN_SUCCESS)
        return VINF_SUCCESS;

    return VERR_NOT_SUPPORTED;
}
Exemplo n.º 2
0
/*
 * Unguard a page containing specified object (make it read-and-write to
 * allow full data access).
 */
static void
memguard_unguard(void *addr, int numpgs)
{
	void *a = (void *)trunc_page((unsigned long)addr);
	if (vm_map_protect(memguard_map, (vm_offset_t)a,
	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
	    VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS)
		panic("MEMGUARD: Unable to unguard page!");
}
Exemplo n.º 3
0
/*
 * mprotect_args(const void *addr, size_t len, int prot)
 *
 * No requirements.
 */
int
sys_mprotect(struct mprotect_args *uap)
{
	struct proc *p = curproc;
	vm_offset_t addr;
	vm_offset_t tmpaddr;
	vm_size_t size, pageoff;
	vm_prot_t prot;
	int error;

	addr = (vm_offset_t) uap->addr;
	size = uap->len;
	prot = uap->prot & VM_PROT_ALL;
#if defined(VM_PROT_READ_IS_EXEC)
	if (prot & VM_PROT_READ)
		prot |= VM_PROT_EXECUTE;
#endif

	pageoff = (addr & PAGE_MASK);
	addr -= pageoff;
	size += pageoff;
	size = (vm_size_t) round_page(size);
	if (size < uap->len)		/* wrap */
		return(EINVAL);
	tmpaddr = addr + size;		/* workaround gcc4 opt */
	if (tmpaddr < addr)		/* wrap */
		return(EINVAL);

	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size,
			       prot, FALSE)) {
	case KERN_SUCCESS:
		error = 0;
		break;
	case KERN_PROTECTION_FAILURE:
		error = EACCES;
		break;
	default:
		error = EINVAL;
		break;
	}
	return (error);
}
Exemplo n.º 4
0
/* Simple allocator for executable memory.  At the two extremes of complexity,
   we have (simple) allocate one page for every executable block and (complex)
   duplicate or modify kalloc to deal separately with executable pages.  As a
   compromise, this allocator allocates in big blocks (about 500 bytes) with a
   `used' bitmap tracking the use of these blocks.  Currently, all allocations
   are smaller than this, and there are never enough of them at once to require
   more than one executable page.  */
void *
kmem_alloc_exec(vm_size_t size)
{
    struct exec_page_header *p;
    int nblocks, i, blockmask;
    vm_address_t v;
    kern_return_t kr;

    nblocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
    assert(nblocks <= BLOCKS_PER_PAGE);
    blockmask = (1 << nblocks) - 1;
    while (1) {		/* Loop back at most once to add an extra page. */
	for (p = exec_pages; p != NULL; p = p->next) {
	    for (i = 0; i <= BLOCKS_PER_PAGE - nblocks; i++) {
		if (!(p->usedmap & (blockmask << i))) {
		    p->usedmap |= blockmask << i;
		    v = (vm_address_t) (p + 1) + i * BLOCK_SIZE;
		    ficache(HP700_SID_KERNEL, (vm_offset_t) v, size);
		    return (void *) v;
		}
	    }
	}
	kr = kmem_alloc_wired(kernel_map, &v, PAGE_SIZE);
	if (kr != KERN_SUCCESS)
	    return NULL;
	kr = vm_map_protect(kernel_map, v, v + PAGE_SIZE, VM_PROT_ALL, FALSE);
	if (kr != KERN_SUCCESS) {
	    kmem_free(kernel_map, v, PAGE_SIZE);
	    return NULL;
	}
	p = (struct exec_page_header *) v;
	p->next = exec_pages;
	exec_pages = p;
	p->usedmap = 0;
    }
}
Exemplo n.º 5
0
/*
 * Modify a resource limit (from system call)
 *
 * MPSAFE
 */
int
kern_setrlimit(u_int which, struct rlimit *limp)
{
        struct proc *p = curproc;
	struct plimit *limit;
        struct rlimit *alimp;
        int error;

        if (which >= RLIM_NLIMITS)
                return (EINVAL);

	/*
	 * We will be modifying a resource, make a copy if necessary.
	 */
	plimit_modify(p, -1, NULL);
	limit = p->p_limit;
        alimp = &limit->pl_rlimit[which];

        /*
         * Preserve historical bugs by treating negative limits as unsigned.
         */
        if (limp->rlim_cur < 0)
                limp->rlim_cur = RLIM_INFINITY;
        if (limp->rlim_max < 0)
                limp->rlim_max = RLIM_INFINITY;

	spin_lock(&limit->p_spin);
        if (limp->rlim_cur > alimp->rlim_max ||
            limp->rlim_max > alimp->rlim_max) {
		spin_unlock(&limit->p_spin);
                error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0);
                if (error)
                        return (error);
	} else {
		spin_unlock(&limit->p_spin);
	}
        if (limp->rlim_cur > limp->rlim_max)
                limp->rlim_cur = limp->rlim_max;

        switch (which) {
        case RLIMIT_CPU:
		spin_lock(&limit->p_spin);
                if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
                        limit->p_cpulimit = RLIM_INFINITY;
                else
                        limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur;
		spin_unlock(&limit->p_spin);
                break;
        case RLIMIT_DATA:
                if (limp->rlim_cur > maxdsiz)
                        limp->rlim_cur = maxdsiz;
                if (limp->rlim_max > maxdsiz)
                        limp->rlim_max = maxdsiz;
                break;

        case RLIMIT_STACK:
                if (limp->rlim_cur > maxssiz)
                        limp->rlim_cur = maxssiz;
                if (limp->rlim_max > maxssiz)
                        limp->rlim_max = maxssiz;
                /*
                 * Stack is allocated to the max at exec time with only
                 * "rlim_cur" bytes accessible.  If stack limit is going
                 * up make more accessible, if going down make inaccessible.
                 */
		spin_lock(&limit->p_spin);
                if (limp->rlim_cur != alimp->rlim_cur) {
                        vm_offset_t addr;
                        vm_size_t size;
                        vm_prot_t prot;

                        if (limp->rlim_cur > alimp->rlim_cur) {
                                prot = VM_PROT_ALL;
                                size = limp->rlim_cur - alimp->rlim_cur;
                                addr = USRSTACK - limp->rlim_cur;
                        } else {
                                prot = VM_PROT_NONE;
                                size = alimp->rlim_cur - limp->rlim_cur;
                                addr = USRSTACK - alimp->rlim_cur;
                        }
			spin_unlock(&limit->p_spin);
                        addr = trunc_page(addr);
                        size = round_page(size);
                        vm_map_protect(&p->p_vmspace->vm_map,
				       addr, addr+size, prot, FALSE);
                } else {
			spin_unlock(&limit->p_spin);
		}
                break;

        case RLIMIT_NOFILE:
                if (limp->rlim_cur > maxfilesperproc)
                        limp->rlim_cur = maxfilesperproc;
                if (limp->rlim_max > maxfilesperproc)
                        limp->rlim_max = maxfilesperproc;
                break;

        case RLIMIT_NPROC:
                if (limp->rlim_cur > maxprocperuid)
                        limp->rlim_cur = maxprocperuid;
                if (limp->rlim_max > maxprocperuid)
                        limp->rlim_max = maxprocperuid;
                if (limp->rlim_cur < 1)
                        limp->rlim_cur = 1;
                if (limp->rlim_max < 1)
                        limp->rlim_max = 1;
                break;
        case RLIMIT_POSIXLOCKS:
                if (limp->rlim_cur > maxposixlocksperuid)
                        limp->rlim_cur = maxposixlocksperuid;
                if (limp->rlim_max > maxposixlocksperuid)
                        limp->rlim_max = maxposixlocksperuid;
                break;
        }
	spin_lock(&limit->p_spin);
        *alimp = *limp;
	spin_unlock(&limit->p_spin);
        return (0);
}
Exemplo n.º 6
0
boolean_t gzalloc_free(zone_t zone, void *addr) {
	boolean_t gzfreed = FALSE;
	kern_return_t kr;

	if (__improbable(gzalloc_mode &&
		(((zone->elem_size >= gzalloc_min) &&
		    (zone->elem_size <= gzalloc_max))) &&
		(zone->gzalloc_exempt == 0))) {
		gzhdr_t *gzh;
		vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
		vm_offset_t residue = rounded_size - zone->elem_size;
		vm_offset_t saddr;
		vm_offset_t free_addr = 0;

		if (gzalloc_uf_mode) {
			gzh = (gzhdr_t *)((vm_offset_t)addr + zone->elem_size);
			saddr = (vm_offset_t) addr - PAGE_SIZE;
		} else {
			gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE);
			saddr = ((vm_offset_t)addr) - residue;
		}

		assert((saddr & PAGE_MASK) == 0);

		if (gzalloc_consistency_checks) {
			if (gzh->gzsig != GZALLOC_SIGNATURE) {
				panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr, GZALLOC_SIGNATURE, gzh->gzsig);
			}

			if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE))
				panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__, zone, gzh->gzone, (void *)addr);
			/* Partially redundant given the zone check, but may flag header corruption */
			if (gzh->gzsize != zone->elem_size) {
				panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone, gzh->gzsize, (uint32_t) zone->elem_size, (void *)addr);
			}
		}

		if (!kmem_ready || gzh->gzone == GZDEADZONE) {
			/* For now, just leak frees of early allocations
			 * performed before kmem is fully configured.
			 * They don't seem to get freed currently;
			 * consider ml_static_mfree in the future.
			 */
			OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free);
			return TRUE;
		}

		if (get_preemption_level() != 0) {
				pdzfree_count++;
		}

		if (gzfc_size) {
			/* Either write protect or unmap the newly freed
			 * allocation
			 */
			kr = vm_map_protect(
				gzalloc_map,
				saddr,
				saddr + rounded_size + (1 * PAGE_SIZE),
				gzalloc_prot,
				FALSE);
			if (kr != KERN_SUCCESS)
				panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__, (void *)saddr, kr);
		} else {
			free_addr = saddr;
		}

		lock_zone(zone);

		/* Insert newly freed element into the protected free element
		 * cache, and rotate out the LRU element.
		 */
		if (gzfc_size) {
			if (zone->gz.gzfc_index >= gzfc_size) {
				zone->gz.gzfc_index = 0;
			}
			free_addr = zone->gz.gzfc[zone->gz.gzfc_index];
			zone->gz.gzfc[zone->gz.gzfc_index++] = saddr;
		}

		if (free_addr) {
			zone->count--;
			zone->cur_size -= rounded_size;
		}

		unlock_zone(zone);

		if (free_addr) {
			kr = vm_map_remove(
				gzalloc_map,
				free_addr,
				free_addr + rounded_size + (1 * PAGE_SIZE),
				VM_MAP_REMOVE_KUNWIRE);
			if (kr != KERN_SUCCESS)
				panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);

			OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
			OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted);
		}

		gzfreed = TRUE;
	}
	return gzfreed;
}