/**
 * OS specific allocation function.
 */
DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
{
    size_t      cbAllocated = cb;
    PRTMEMHDR   pHdr;

#ifdef RT_ARCH_AMD64
    if (fFlags & RTMEMHDR_FLAG_EXEC)
    {
        AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), NULL);
        cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr);
        pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP);
    }
    else
#endif
    {
        unsigned fKmFlags = fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC ? KM_NOSLEEP : KM_SLEEP;
        if (fFlags & RTMEMHDR_FLAG_ZEROED)
            pHdr = (PRTMEMHDR)kmem_zalloc(cb + sizeof(*pHdr), fKmFlags);
        else
            pHdr = (PRTMEMHDR)kmem_alloc(cb + sizeof(*pHdr), fKmFlags);
    }
    if (RT_UNLIKELY(!pHdr))
    {
        LogRel(("rtMemAllocEx(%u, %#x) failed\n", (unsigned)cb + sizeof(*pHdr), fFlags));
        return VERR_NO_MEMORY;
    }

    pHdr->u32Magic  = RTMEMHDR_MAGIC;
    pHdr->fFlags    = fFlags;
    pHdr->cb        = cbAllocated;
    pHdr->cbReq     = cb;

    *ppHdr = pHdr;
    return VINF_SUCCESS;
}
Exemple #2
0
/*
 * Allocations from the hat_memload arena add VM_MEMLOAD to their
 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
 * to take steps to prevent infinite recursion.  HAT allocations also
 * must be non-relocatable to prevent recursive page faults.
 */
static void *
hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
{
	flags |= (VM_MEMLOAD | VM_NORELOC);
	return (segkmem_alloc(vmp, size, flags));
}
Exemple #3
0
/*
 * Allocations from static_arena arena (or any other arena that uses
 * segkmem_alloc_permanent()) require non-relocatable (permanently
 * wired) memory pages, since these pages are referenced by physical
 * as well as virtual address.
 */
void *
segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
{
	return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
}
Exemple #4
0
/*ARGSUSED*/
void *
segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
{
	size_t size;
	kthread_t *t = curthread;
	segkmem_lpcb_t *lpcb = &segkmem_lpcb;

	ASSERT(sizep != NULL);

	size = *sizep;

	if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
	    !(vmflag & SEGKMEM_SHARELOCKED)) {

		size_t kmemlp_qnt = segkmem_kmemlp_quantum;
		size_t asize = P2ROUNDUP(size, kmemlp_qnt);
		void  *addr = NULL;
		ulong_t *lpthrtp = &lpcb->lp_throttle;
		ulong_t lpthrt = *lpthrtp;
		int	dowakeup = 0;
		int	doalloc = 1;

		ASSERT(kmem_lp_arena != NULL);
		ASSERT(asize >= size);

		if (lpthrt != 0) {
			/* try to update the throttle value */
			lpthrt = atomic_inc_ulong_nv(lpthrtp);
			if (lpthrt >= segkmem_lpthrottle_max) {
				lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
				    segkmem_lpthrottle_max / 4);
			}

			/*
			 * when we get above throttle start do an exponential
			 * backoff at trying large pages and reaping
			 */
			if (lpthrt > segkmem_lpthrottle_start &&
			    !ISP2(lpthrt)) {
				lpcb->allocs_throttled++;
				lpthrt--;
				if (ISP2(lpthrt))
					kmem_reap();
				return (segkmem_alloc(vmp, size, vmflag));
			}
		}

		if (!(vmflag & VM_NOSLEEP) &&
		    segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
		    vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
		    asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {

			/*
			 * we are low on free memory in kmem_lp_arena
			 * we let only one guy to allocate heap_lp
			 * quantum size chunk that everybody is going to
			 * share
			 */
			mutex_enter(&lpcb->lp_lock);

			if (lpcb->lp_wait) {

				/* we are not the first one - wait */
				cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
				if (vmem_size(kmem_lp_arena, VMEM_FREE) <
				    kmemlp_qnt)  {
					doalloc = 0;
				}
			} else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
			    kmemlp_qnt) {

				/*
				 * we are the first one, make sure we import
				 * a large page
				 */
				if (asize == kmemlp_qnt)
					asize += kmemlp_qnt;
				dowakeup = 1;
				lpcb->lp_wait = 1;
			}

			mutex_exit(&lpcb->lp_lock);
		}

		/*
		 * VM_ABORT flag prevents sleeps in vmem_xalloc when
		 * large pages are not available. In that case this allocation
		 * attempt will fail and we will retry allocation with small
		 * pages. We also do not want to panic if this allocation fails
		 * because we are going to retry.
		 */
		if (doalloc) {
			addr = vmem_alloc(kmem_lp_arena, asize,
			    (vmflag | VM_ABORT) & ~VM_PANIC);

			if (dowakeup) {
				mutex_enter(&lpcb->lp_lock);
				ASSERT(lpcb->lp_wait != 0);
				lpcb->lp_wait = 0;
				cv_broadcast(&lpcb->lp_cv);
				mutex_exit(&lpcb->lp_lock);
			}
		}

		if (addr != NULL) {
			*sizep = asize;
			*lpthrtp = 0;
			return (addr);
		}

		if (vmflag & VM_NOSLEEP)
			lpcb->nosleep_allocs_failed++;
		else
			lpcb->sleep_allocs_failed++;
		lpcb->alloc_bytes_failed += size;

		/* if large page throttling is not started yet do it */
		if (segkmem_use_lpthrottle && lpthrt == 0) {
			lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
		}
	}
	return (segkmem_alloc(vmp, size, vmflag));
}