示例#1
0
/*
 * Unmap the MMIO regions for a window. Hold the vas_mutex so we don't
 * unmap when the window's debugfs dir is in use. This serializes close
 * of a window even on another VAS instance but since its not a critical
 * path, just minimize the time we hold the mutex for now. We can add
 * a per-instance mutex later if necessary.
 */
static void unmap_winctx_mmio_bars(struct vas_window *window)
{
	int len;
	void *uwc_map;
	void *hvwc_map;
	u64 busaddr_start;

	mutex_lock(&vas_mutex);

	hvwc_map = window->hvwc_map;
	window->hvwc_map = NULL;

	uwc_map = window->uwc_map;
	window->uwc_map = NULL;

	mutex_unlock(&vas_mutex);

	if (hvwc_map) {
		get_hvwc_mmio_bar(window, &busaddr_start, &len);
		unmap_region(hvwc_map, busaddr_start, len);
	}

	if (uwc_map) {
		get_uwc_mmio_bar(window, &busaddr_start, &len);
		unmap_region(uwc_map, busaddr_start, len);
	}
}
示例#2
0
/*
 * Flush from the share cache the least-recently-used region of memory
 * and unmap it from the server.  Assumes that only the mru list is
 * locked.
 */
void
user_memory_flush_lru()
{
	register user_memory_t	region_p;

	region_p = oldest_region;

	ASSERT(region_p->ref_count >= 0);
	if (region_p->ref_count > 0) {
		/* can't flush it now: try later */
		return;
	}

	/*
	 * Unlink it from the cache lists
	 */
	user_memory_delete(region_p, TRUE, "user_memory_flush_lru");

	debug_prf(2, ("user_memory_flush_lru()\n"));

	/*
	 * Deallocate the memory and the region descriptor
	 */
	unmap_region(region_p);
}
示例#3
0
/*
 * Unmap the paste address region for a window.
 */
static void unmap_paste_region(struct vas_window *window)
{
	int len;
	u64 busaddr_start;

	if (window->paste_kaddr) {
		compute_paste_address(window, &busaddr_start, &len);
		unmap_region(window->paste_kaddr, busaddr_start, len);
		window->paste_kaddr = NULL;
		kfree(window->paste_addr_name);
		window->paste_addr_name = NULL;
	}
}
示例#4
0
void
user_memory_unlock(
	user_memory_t		region_p)
{
	if (!region_p)
		return;
	ASSERT(region_p->ref_count > 0);
	region_p->ref_count--;
	if (region_p->ref_count == 0 &&
	    region_p->task->osfmach3.task->mach_aware) {
		/*
		 * This task can change its memory at any time using Mach
		 * calls, so don't keep anything in the user memory cache.
		 */
		user_memory_delete(region_p, TRUE, "user_memory_unlock");
		unmap_region(region_p);
	}
}
示例#5
0
/*
 * Called when a VM area is deallocated in the task's address space.
 * Flushes the shared memory regions in that area.
 */
void
user_memory_flush_area(
	struct osfmach3_mach_task_struct	*mach_task,
	vm_address_t				start,
	vm_address_t				end)
{
	user_memory_t	region_p, next_p;
	vm_address_t	start_region, end_region;

	for (region_p = mach_task->user_memory;
	     region_p;
	     region_p = next_p) {
		next_p = region_p->next_intask;
		start_region = PAGENUM_TO_ADDR(region_p->user_page);
		end_region = start_region + region_p->size;
		if (end_region > start && start_region < end) {
			user_memory_delete(region_p, TRUE,
					   "user_memory_flush_area");
			unmap_region(region_p);
		}
	}
}
示例#6
0
/*
 * Called when a task exits.  Flushes all the shared memory regions for
 * that task.
 */
void
user_memory_flush_task(
	struct osfmach3_mach_task_struct	*mach_task)
{
	register user_memory_t	region_p, next_p;
	int			starting_num_entries;

	region_p = mach_task->user_memory;
	starting_num_entries = user_memory_num_entries;

	/*
	 * The first time through, just unlink the regions from all lists
	 * but don't deallocate them, to minimize list unavailibility
	 */
	for ( ; region_p; region_p = next_p) {
		next_p = region_p->next_intask;
		user_memory_delete(region_p, FALSE, "user_memory_flush_task");
	}

	region_p = mach_task->user_memory; /* Save it for next pass */
	mach_task->user_memory = NULL;
	ASSERT(mach_task->um_hint1 == NULL);
	ASSERT(mach_task->um_hint2 == NULL);

	/*
	 * This time through we will deallocate all the regions, which are
	 * no longer in any accessible list
	 */
	for ( ; region_p; region_p = next_p) {
		next_p = region_p->next_intask;
		unmap_region(region_p);
	}

	debug_prf(2, ("user_memory_flush_task: "
		      "deallocated %d regions of task %p\n",
		      starting_num_entries - user_memory_num_entries,
		      mach_task));
}
void unmap_page(void *vaddr)
{
	unmap_region(vaddr, 0x1000);
}
示例#8
0
/* Attempt to mmap a region of memory of size LEN bytes somewhere
   between LO and HI.  Returns the address of the region on success, 0
   otherwise.  MAPS is the current address space map, with NMAPS
   elements.  FD is the mmap file descriptor argument. */
static Address constrained_mmap(size_t len, Address lo, Address hi,
                                const dyninstmm_t *maps, int nmaps, int fd)
{
    const dyninstmm_t *mlo, *mhi, *p;
    Address beg, end, try;
#if defined (os_linux)  && defined(arch_power)
// DYNINSTheap_loAddr should already be defined in DYNINSTos_malloc.
// Redefining here, just in case constrained_mmap is called from a different call path.
    DYNINSTheap_loAddr = getpagesize();
#endif

    if (lo > DYNINSTheap_hiAddr) return 0;

    if (lo < DYNINSTheap_loAddr) lo = DYNINSTheap_loAddr;
    if (hi > DYNINSTheap_hiAddr) hi = DYNINSTheap_hiAddr;

    /* Round down to nearest page boundary */
    lo = lo & ~(psize-1);
    hi = hi & ~(psize-1);

    /* Round up to nearest page boundary */
    if (len % psize) {
        len += psize;
        len = len & ~(psize-1);
    }

    assert(lo < hi);
    /* Find lowest (mlo) and highest (mhi) segments between lo and
       hi.  If either lo or hi occurs within a segment, they are
       shifted out of it toward the other bound. */
    mlo = maps;
    mhi = &maps[nmaps-1];
    while (mlo <= mhi) {
        beg = BEG(mlo);
        end = END(mlo);

        if (lo < beg)
            break;

        if (lo >= beg && lo < end)
            /* lo occurs in this segment.  Shift lo to end of segment. */
            lo = end; /* still a page boundary */

        ++mlo;
    }

    while (mhi >= mlo) {
        beg = BEG(mhi);
        end = END(mhi);

        if (hi > end)
            break;
        if (hi >= beg && hi <= end)
            /* hi occurs in this segment (or just after it).  Shift
               hi to beginning of segment. */
            hi = beg; /* still a page boundary */

        --mhi;
    }
    if (lo >= hi)
        return 0;

    /* We've set the bounds of the search, now go find some free space. */

    /* Pathological cases in which the range (lo,hi) is entirely
       above or below the rest of the address space, or there are no
       segments between lo and hi.  Return no matter what from
       here. */
    if (BEG(mlo) >= hi || END(mhi) <= lo) {
        return trymmap(len, lo, hi, psize, fd);
    }
    assert(lo < BEG(mlo) && hi > END(mhi));
    /* Try to mmap in space before mlo */
    try = trymmap(len, lo, BEG(mlo), psize, fd);
    if (try) {
        return try;
    }

    /* Try to mmap in space between mlo and mhi.  Try nothing here if
       mlo and mhi are the same. */
    for (p = mlo; p < mhi; p++) {
        try = trymmap(len, END(p), BEG(p+1), psize, fd);
        if (try)
            return try;
    }

    /* Try to mmap in space between mhi and hi */
    try = trymmap(len, END(mhi), hi, psize, fd);
    if (try)
        return try;

    /* We've tried everything */
    return 0;
}
#undef BEG
#undef END


static int heap_memmapCompare(const void *A, const void *B)
{
    const dyninstmm_t *a = (const dyninstmm_t *)A;
    const dyninstmm_t *b = (const dyninstmm_t *)B;
    if (a->pr_vaddr < b->pr_vaddr) return -1;
    if (a->pr_vaddr > b->pr_vaddr) return 1;
    return 0;
}

void *DYNINSTos_malloc(size_t nbytes, void *lo_addr, void *hi_addr)
{
    void *heap;
    size_t size = nbytes;
    heapList_t *node = (heapList_t *)malloc(sizeof(heapList_t));

    /* initialize page size */
    if (psize == -1) psize = getpagesize();

    /* buffer size must be aligned */
    if (size % DYNINSTheap_align != 0) {
        free(node);
        return ((void *)-1);
    }

    /* use malloc() if appropriate */
    if (DYNINSTheap_useMalloc(lo_addr, hi_addr)) {

        Address ret_heap;
        int size_heap = size + DYNINSTheap_align;
        heap = malloc(size_heap);
        if (heap == NULL) {
            free(node);
#ifdef DEBUG
            fprintf(stderr, "Failed to MALLOC\n");
#endif
            return NULL;
        }
        ret_heap = heap_alignUp((Address)heap, DYNINSTheap_align);

        /* malloc buffer must meet range constraints */
        if (ret_heap < (Address)lo_addr ||
                ret_heap + size - 1 > (Address)hi_addr) {
            free(heap);
            free(node);
#ifdef DEBUG
            fprintf(stderr, "MALLOC'd area fails range constraints\n");
#endif
            return NULL;
        }

        /* define new heap */
        node->heap.ret_addr = (void *)ret_heap;
        node->heap.addr = heap;
        node->heap.len = size_heap;
        node->heap.type = HEAP_TYPE_MALLOC;


    } else { /* use mmap() for allocation */
        Address lo = (Address) lo_addr;
        Address hi = (Address) hi_addr;
        int fd;
        unsigned nmaps;
        dyninstmm_t *maps;

        /* What if we need to allocate memory not in the area we can mmap? */
#if defined (os_linux)  && defined(arch_power)
        DYNINSTheap_loAddr = getpagesize();
#endif
        if ((hi < DYNINSTheap_loAddr) || (lo > DYNINSTheap_hiAddr)) {
            free(node);
#ifdef DEBUG
            fprintf(stderr, "CAN'T MMAP IN RANGE GIVEN\n");
#endif
            return NULL;
        }


        /* Get memory map and sort it.  maps will point to malloc'd memory
           that we must free. */
        if (0 > DYNINSTgetMemoryMap(&nmaps, &maps)) {
            free(node);
#ifdef DEBUG
            fprintf(stderr, "failed MMAP\n");
#endif
            return NULL;
        }
        qsort(maps, (size_t)nmaps, (size_t)sizeof(dyninstmm_t), &heap_memmapCompare);
        heap_checkMappings(nmaps, maps); /* sanity check */

        /*DYNINSTheap_printMappings(nmaps, maps);*/

        fd = DYNINSTheap_mmapFdOpen();
        if (0 > fd) {
            free(node);
            free(maps);
            return NULL;
        }
        heap = (void*) constrained_mmap(size, lo, hi, maps, nmaps, fd);
        free(maps);
        DYNINSTheap_mmapFdClose(fd);
        if (!heap) {
            free(node);
#ifdef DEBUG
            fprintf(stderr, "failed MMAP(2)\n");
#endif
            return NULL;
        }

        /* define new heap */
        node->heap.ret_addr = heap;
        node->heap.addr = heap;
        node->heap.len = size;
        node->heap.type = HEAP_TYPE_MMAP;
    }

    /* insert new heap into heap list */
    node->prev = NULL;
    node->next = Heaps;
    if (Heaps) Heaps->prev = node;
    Heaps = node;

    return node->heap.ret_addr;
}

int DYNINSTos_free(void *buf)
{
    int ret = 0;
    heapList_t *t;
    /*
    fprintf(stderr, "*** DYNINSTos_free(0x%08x)\n", buf);
    */
    for (t = Heaps; t != NULL; t = t->next) {
        /* lookup heap by (returned) address */
        heap_t *heap = &t->heap;
        if (heap->ret_addr != buf) continue;

        /* remove heap from list */
        if (t->next) t->next->prev = t->prev;
        if (t->prev) t->prev->next = t->next;
        if (Heaps == t) Heaps = t->next;

        /* deallocate heap */
        switch (heap->type) {
        case HEAP_TYPE_MMAP:
            if (!unmap_region(heap->addr, heap->len)) {
                perror("DYNINSTos_free(munmap)");
                ret = -1;
            }
            break;
        case HEAP_TYPE_MALLOC:
            free(heap->addr);
            break;
        default:
            fprintf(stderr, "DYNINSTos_free(): unknown inferior heap type\n");
            ret = -1;
            break;
        }

        /* free list element */
        free(t);
        break;
    }

    return ret;
}