int main(int argc, char **argv)
{
	char *p;
	int c;
	int len = HPS;
	int inject = 0;
	int collapse = 0;
	int mremap_flag = 0;
	int pfn_get = 0;
	int split = 0;
	int verbose = 0;
	char buf[256];
	uint64_t pfn_buf[10];

	while ((c = getopt(argc, argv, "m:w:crpsiv")) != -1) {
		switch (c) {
		case 'm':
			len = strtol(optarg, NULL, 10) * HPS;
			break;
		case 'c':
			collapse = 1;
			break;
		case 'r':
			mremap_flag = 1;
			break;
		case 'p':
			pfn_get = 1;
			break;
		case 's':
			split = 1;
			break;
		case 'i':
			inject = 1;
			break;
		case 'v':
			verbose = 1;
			break;
		default:
			usage(argv[0]);
		}
	}

	signal(SIGUSR1, sighandler);

	p = mmap_check((void *)0x7f0000000000, len, PROTECTION,
		       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	Dprintf(verbose, "p = %p\n", p);
	madvise(p, len, MADV_HUGEPAGE);

	/* directly allocate thp in page fault. */
	write_bytes(p, len);
	pause();

	if (inject)
		madvise(p, 1, MADV_SOFT_OFFLINE);

	if (split) {
		mlock(p, 4096);
		munlock(p, 4096);
	}

	if (mremap_flag) {
		Dprintf(verbose, "mremap old p is %p!\n", p);
		p = mremap(p, len, len + HPS, 0, MREMAP_FIXED);
		/* p = mremap(p, len, len + HPS, 0, NULL); */
		if (p == (void *)-1)
			err("mremap");
		Dprintf(verbose, "mremap new p is %p!\n", p);
	}

	/* splitted thp will be collapsed again in the next khugepaged scan */
	if (split && collapse) {
		/* split thp */
		write_bytes(p, len);
	}
	pause();

	munmap(p, len);
	return 0;
}
Пример #2
0
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
    size_t alignment, bool zero, bool try_tcache_dalloc)
{
	void *ret;
	size_t copysize;

	/* Try to avoid moving the allocation. */
	if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
		return (ptr);

	/*
	 * size and oldsize are different enough that we need to use a
	 * different size class.  In that case, fall back to allocating new
	 * space and copying.
	 */
	if (alignment > chunksize)
		ret = huge_palloc(size + extra, alignment, zero);
	else
		ret = huge_malloc(size + extra, zero);

	if (ret == NULL) {
		if (extra == 0)
			return (NULL);
		/* Try again, this time without extra. */
		if (alignment > chunksize)
			ret = huge_palloc(size, alignment, zero);
		else
			ret = huge_malloc(size, zero);

		if (ret == NULL)
			return (NULL);
	}

	/*
	 * Copy at most size bytes (not size+extra), since the caller has no
	 * expectation that the extra bytes will be reliably preserved.
	 */
	copysize = (size < oldsize) ? size : oldsize;

#ifdef JEMALLOC_MREMAP
	/*
	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
	 * source nor the destination are in dss.
	 */
	if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
	    == false && chunk_in_dss(ret) == false))) {
		size_t newsize = huge_salloc(ret);

		/*
		 * Remove ptr from the tree of huge allocations before
		 * performing the remap operation, in order to avoid the
		 * possibility of another thread acquiring that mapping before
		 * this one removes it from the tree.
		 */
		huge_dalloc(ptr, false);
		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
		    ret) == MAP_FAILED) {
			/*
			 * Assuming no chunk management bugs in the allocator,
			 * the only documented way an error can occur here is
			 * if the application changed the map type for a
			 * portion of the old allocation.  This is firmly in
			 * undefined behavior territory, so write a diagnostic
			 * message, and optionally abort.
			 */
			char buf[BUFERROR_BUF];

			buferror(get_errno(), buf, sizeof(buf));
			malloc_printf("<jemalloc>: Error in mremap(): %s\n",
			    buf);
			if (opt_abort)
				abort();
			memcpy(ret, ptr, copysize);
			chunk_dealloc_mmap(ptr, oldsize);
		} else if (config_fill && zero == false && opt_junk && oldsize
		    < newsize) {
			/*
			 * mremap(2) clobbers the original mapping, so
			 * junk/zero filling is not preserved.  There is no
			 * need to zero fill here, since any trailing
			 * uninititialized memory is demand-zeroed by the
			 * kernel, but junk filling must be redone.
			 */
			memset(ret + oldsize, 0xa5, newsize - oldsize);
		}
	} else
#endif
	{
		memcpy(ret, ptr, copysize);
		iqalloct(ptr, try_tcache_dalloc);
	}
	return (ret);
}
Пример #3
0
/* ------------------------------ realloc ------------------------------ */
void* realloc(void* oldmem, size_t bytes)
{
    mstate av;

    size_t  nb;              /* padded request size */

    mchunkptr        oldp;            /* chunk corresponding to oldmem */
    size_t  oldsize;         /* its size */

    mchunkptr        newp;            /* chunk to return */
    size_t  newsize;         /* its size */
    void*          newmem;          /* corresponding user mem */

    mchunkptr        next;            /* next contiguous chunk after oldp */

    mchunkptr        remainder;       /* extra space at end of newp */
    unsigned long     remainder_size;  /* its size */

    mchunkptr        bck;             /* misc temp for linking */
    mchunkptr        fwd;             /* misc temp for linking */

    unsigned long     copysize;        /* bytes to copy */
    unsigned int     ncopies;         /* size_t words to copy */
    size_t* s;               /* copy source */
    size_t* d;               /* copy destination */

    void *retval;

    /* Check for special cases.  */
    if (! oldmem)
	return malloc(bytes);
    if (! bytes) {
	free (oldmem);
	return NULL;
    }

    checked_request2size(bytes, nb);
    __MALLOC_LOCK;
    av = get_malloc_state();

    oldp    = mem2chunk(oldmem);
    oldsize = chunksize(oldp);

    check_inuse_chunk(oldp);

    if (!chunk_is_mmapped(oldp)) {

	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
	    /* already big enough; split below */
	    newp = oldp;
	    newsize = oldsize;
	}

	else {
	    next = chunk_at_offset(oldp, oldsize);

	    /* Try to expand forward into top */
	    if (next == av->top &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb + MINSIZE)) {
		set_head_size(oldp, nb);
		av->top = chunk_at_offset(oldp, nb);
		set_head(av->top, (newsize - nb) | PREV_INUSE);
		retval = chunk2mem(oldp);
		goto DONE;
	    }

	    /* Try to expand forward into next chunk;  split off remainder below */
	    else if (next != av->top &&
		    !inuse(next) &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb)) {
		newp = oldp;
		unlink(next, bck, fwd);
	    }

	    /* allocate, copy, free */
	    else {
		newmem = malloc(nb - MALLOC_ALIGN_MASK);
		if (newmem == 0) {
		    retval = 0; /* propagate failure */
		    goto DONE;
		}

		newp = mem2chunk(newmem);
		newsize = chunksize(newp);

		/*
		   Avoid copy if newp is next chunk after oldp.
		   */
		if (newp == next) {
		    newsize += oldsize;
		    newp = oldp;
		}
		else {
		    /*
		       Unroll copy of <= 36 bytes (72 if 8byte sizes)
		       We know that contents have an odd number of
		       size_t-sized words; minimally 3.
		       */

		    copysize = oldsize - (sizeof(size_t));
		    s = (size_t*)(oldmem);
		    d = (size_t*)(newmem);
		    ncopies = copysize / sizeof(size_t);
		    assert(ncopies >= 3);

		    if (ncopies > 9)
			memcpy(d, s, copysize);

		    else {
			*(d+0) = *(s+0);
			*(d+1) = *(s+1);
			*(d+2) = *(s+2);
			if (ncopies > 4) {
			    *(d+3) = *(s+3);
			    *(d+4) = *(s+4);
			    if (ncopies > 6) {
				*(d+5) = *(s+5);
				*(d+6) = *(s+6);
				if (ncopies > 8) {
				    *(d+7) = *(s+7);
				    *(d+8) = *(s+8);
				}
			    }
			}
		    }

		    free(oldmem);
		    check_inuse_chunk(newp);
		    retval = chunk2mem(newp);
		    goto DONE;
		}
	    }
	}

	/* If possible, free extra space in old or extended chunk */

	assert((unsigned long)(newsize) >= (unsigned long)(nb));

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
	    set_head_size(newp, newsize);
	    set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
	    remainder = chunk_at_offset(newp, nb);
	    set_head_size(newp, nb);
	    set_head(remainder, remainder_size | PREV_INUSE);
	    /* Mark remainder as inuse so free() won't complain */
	    set_inuse_bit_at_offset(remainder, remainder_size);
	    free(chunk2mem(remainder));
	}

	check_inuse_chunk(newp);
	retval = chunk2mem(newp);
	goto DONE;
    }

    /*
       Handle mmap cases
       */

    else {
	size_t offset = oldp->prev_size;
	size_t pagemask = av->pagesize - 1;
	char *cp;
	unsigned long  sum;

	/* Note the extra (sizeof(size_t)) overhead */
	newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;

	/* don't need to remap if still within same page */
	if (oldsize == newsize - offset) {
	    retval = oldmem;
	    goto DONE;
	}

	cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);

	if (cp != (char*)MORECORE_FAILURE) {

	    newp = (mchunkptr)(cp + offset);
	    set_head(newp, (newsize - offset)|IS_MMAPPED);

	    assert(aligned_OK(chunk2mem(newp)));
	    assert((newp->prev_size == offset));

	    /* update statistics */
	    sum = av->mmapped_mem += newsize - oldsize;
	    if (sum > (unsigned long)(av->max_mmapped_mem))
		av->max_mmapped_mem = sum;
	    sum += av->sbrked_mem;
	    if (sum > (unsigned long)(av->max_total_mem))
		av->max_total_mem = sum;

	    retval = chunk2mem(newp);
	    goto DONE;
	}

	/* Note the extra (sizeof(size_t)) overhead. */
	if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
	    newmem = oldmem; /* do nothing */
	else {
	    /* Must alloc, copy, free. */
	    newmem = malloc(nb - MALLOC_ALIGN_MASK);
	    if (newmem != 0) {
		memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
		free(oldmem);
	    }
	}
	retval = newmem;
    }

 DONE:
    __MALLOC_UNLOCK;
    return retval;
}
TEST(sys_mman, mremap) {
  ASSERT_EQ(MAP_FAILED, mremap(nullptr, 0, 0, 0));
}
Пример #5
0
/*
 *  try_remap()
 *	try and remap old size to new size
 */
static int try_remap(
	const args_t *args,
	uint8_t **buf,
	const size_t old_sz,
	const size_t new_sz)
{
	uint8_t *newbuf;
	int retry, flags = 0;
#if defined(MREMAP_MAYMOVE)
	const int maymove = MREMAP_MAYMOVE;
#else
	const int maymove = 0;
#endif

#if defined(MREMAP_FIXED) && defined(MREMAP_MAYMOVE)
	flags = maymove | (mwc32() & MREMAP_FIXED);
#else
	flags = maymove;
#endif

	for (retry = 0; retry < 100; retry++) {
#if defined(MREMAP_FIXED)
		void *addr = rand_mremap_addr(new_sz, flags);
#endif
		if (!g_keep_stressing_flag)
			return 0;
#if defined(MREMAP_FIXED)
		if (addr) {
			newbuf = mremap(*buf, old_sz, new_sz, flags, addr);
		} else {
			newbuf = mremap(*buf, old_sz, new_sz, flags & ~MREMAP_FIXED);
		}
#else
		newbuf = mremap(*buf, old_sz, new_sz, flags);
#endif
		if (newbuf != MAP_FAILED) {
			*buf = newbuf;
			return 0;
		}

		switch (errno) {
		case ENOMEM:
		case EAGAIN:
			continue;
		case EINVAL:
#if defined(MREMAP_FIXED)
			/*
			 * Earlier kernels may not support this or we
			 * chose a bad random address, so just fall
			 * back to non fixed remapping
			 */
			if (flags & MREMAP_FIXED) {
				flags &= ~MREMAP_FIXED;
				continue;
			}
#endif
			break;
		case EFAULT:
		default:
			break;
		}
	}
	pr_fail_err("mremap");
	return -1;
}
Пример #6
0
int _ULCC_HIDDEN _remap_pages_rand(
		struct _page_picker_s *picker,
		const unsigned long *start,
		const unsigned long *end,
		const int n,
		char *do_remap,
		const int movedata)
{
	void *remap_to, *remap_to_end, *remap_from;
	int c_colors = 0, i_remap = 0, i, idr;
	int *index = NULL;
	unsigned int rand_seed;
	int ret = 0;

	index = malloc(sizeof(*index) * ULCC_NR_CACHE_COLORS);
	if(!index) {
		_ULCC_PERROR("failed to malloc for index array in _remap_pages_seq");
		ret = -1;
		goto finish;
	}

	/* Compute the number of colors in this request and build index array */
	for(i = 0; i < ULCC_NR_CACHE_COLORS; i++)
		if(picker[i].needed > 0)
			index[c_colors++] = i;
	_ULCC_ASSERT(c_colors > 0);

	rand_seed = time(NULL);

	/* For each data region, do the remapping */
	for(idr = 0, i = 0; idr < n; idr++) {
		remap_to = (void *)ULCC_ALIGN_HIGHER(start[idr]);
		remap_to_end = (void *)ULCC_ALIGN_LOWER(end[idr]);

		while(remap_to < remap_to_end) {
			if(!do_remap[i_remap++]) {
				remap_to += ULCC_PAGE_BYTES;
				continue;
			}

			/* Get the next picked page to remap from */
			i = (i + rand_r(&rand_seed)) % c_colors;
			while(picker[index[i]].picked == 0)
				i = (i + 1) % c_colors;

			remap_from = picker[index[i]].pages[--picker[index[i]].picked];

			/* Copy data before remapping if required so */
			if(movedata)
				memcpy(remap_from, remap_to, ULCC_PAGE_BYTES);

			/* Remap the picked physical page to user data page */
			if(mremap(remap_from, ULCC_PAGE_BYTES, ULCC_PAGE_BYTES,
			   MREMAP_MAYMOVE | MREMAP_FIXED, remap_to) == MAP_FAILED) {
				_ULCC_PERROR("mremap failed in _remap_pages_rand");
				ret = -1;
				goto finish;
			}

			/* Repair the page hole caused by the above remapping */
			if(mmap(remap_from, ULCC_PAGE_BYTES, PROT_READ | PROT_WRITE,
			   MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0, 0) != remap_from) {
				_ULCC_PERROR("mmap failed in _remap_pages_rand");
				ret = -1;
				goto finish;
			}

			remap_to += ULCC_PAGE_BYTES;
		} /* while */
	} /* for each data region */

finish:
	if(index)
		free(index);

	return ret;
}
Пример #7
0
int main(int argc, char **argv)
{	int	x = 0;
	char	*args[10];

	setuid(2);

	signal(SIGCHLD, sigchld);
	do_signals();

	x += getpid();
	x += getppid();
	x += getuid();
	x += getgid();
	x += setsid();
	x += seteuid();
	x += setegid();
	lseek(0, 0, -1);
	kill(0, 0);
	signal(99, 0);
	signal(SIGINT, int_handler);
	signal(SIGSEGV, segv_handler);
//	*(int *) 0 = 0;
	pipe(0);
	munmap(0, 0);
	mincore(0, 0);
	shmget(0);
	shmat(0);

	line = __LINE__;
	poll(-1, 0, 0);
	signal(SIGSEGV, SIG_IGN);
//	ppoll(-1, -1, -1, 0);
	signal(SIGSEGV, SIG_DFL);
	sched_yield();
	readv(-1, 0, 0, 0);
	writev(-1, 0, 0, 0);
	msync(0, 0, 0);
	fsync(-1);
	fdatasync(-1);
	semget(0, 0, 0);
	semctl(0, 0, 0);
	uselib(NULL);
	pivot_root(0, 0);
	personality(-1);
	setfsuid(-1);
	flock(-1, 0);
	shmdt(0, 0, 0);
	times(0);
	mremap(0, 0, 0, 0, 0);
	madvise(0, 0, 0);
	fchown(-1, 0, 0);
	lchown(0, 0, 0);
	setreuid();
	setregid();
	link("/nonexistant", "/also-nonexistant");

	do_slow();

	symlink("/nothing", "/");
	rename("/", "/");
	mkdir("/junk/stuff////0", 0777);
	geteuid();
	getsid();
	getpgid();
	getresuid();
	getresgid();
	getpgid();
	ptrace(-1, 0, 0, 0);
	semop(0, 0, 0);
	capget(0, 0);

	line = __LINE__;
	gettimeofday(0, 0);
	settimeofday(0, 0);
	dup(-1);
	dup2(-1, -1);
	shmctl(0, 0, 0, 0);
	execve("/bin/nothing", "/bin/nothing", 0);
	alarm(9999);
	bind(0, 0, 0);
	socket(0, 0, 0);
	accept(0, 0, 0);
	listen(0);
	shutdown(0);
	getsockname(0, 0, 0);
	getpeername(0, 0, 0);
	truncate(0, 0);
	ftruncate(0, 0);
	line = __LINE__;
	if (vfork() == 0)
		exit(0);
	line = __LINE__;
	x = opendir("/", 0, 0);
	line = __LINE__;
	readdir(x, 0, 0);
	line = __LINE__;
	closedir(x);
	line = __LINE__;
	chroot("/");
	line = __LINE__;
	sigaction(0, 0, 0);
	line = __LINE__;
	sigprocmask(0, 0, 0);
	x += open("/nothing", 0);
	x += chdir("/nothing");
	x += mknod("/nothing/nothing", 0);
	x += ioctl();
	execve("/nothing", NULL, NULL);
	line = __LINE__;
	x += close(-2);
	line = __LINE__;
	if (fork() == 0)
		exit(0);
	line = __LINE__;
	clone(clone_func, 0, 0, 0);
	line = __LINE__;
	brk(0);
	sbrk(0);
	line = __LINE__;
	mmap(0, 0, 0, 0, 0);
	line = __LINE__;
	uname(0);
	line = __LINE__;
	getcwd(0, 0);
	line = __LINE__;
	iopl(3);
	ioperm(0, 0, 0);
	mount(0, 0, 0, 0, 0);
	umount(0, 0);
	umount(0, 0, 0);
	swapon(0, 0);
	swapoff(0);
	sethostname(0);
	line = __LINE__;
	time(NULL);
	unlink("/nothing");
	line = __LINE__;
	rmdir("/nothing");
	chmod(0, 0);
	line = __LINE__;
# if defined(__i386) || defined(__amd64)
	modify_ldt(0);
# endif

	stat("/doing-nice", 0);
	nice(0);

	args[0] = "/bin/df";
	args[1] = "-l";
	args[2] = NULL;
	close(1);
	open("/dev/null", O_WRONLY);
	/***********************************************/
	/*   Some  syscalls  arent  available  direct  */
	/*   from  libc,  so get them here. We mostly  */
	/*   care  about  the  ones which have caused  */
	/*   implementation   difficulty  and  kernel  */
	/*   crashes - eventually we can be complete.  */
	/***********************************************/
	line = __LINE__;
	open("/system-dependent-syscalls-follow", 0);
	line = __LINE__;
	if (fork() == 0)
		exit(0);

	{int status;
	while (wait(&status) >= 0)
		;
	}

	sigaltstack(0, 0);

	/*vm86(0, 0);*/

	/***********************************************/
	/*   Some syscalls arent directly accessible,  */
	/*   e.g. legacy.			       */
	/***********************************************/
#if defined(__x86_64__)
	trace(__LINE__, "x64 syscalls");
	syscall(174, 0, 0, 0); // create_module
	syscall(176, 0, 0, 0); // delete_module
	syscall(178, 0, 0, 0); // query_module
#else
	trace(__LINE__, "x32 syscalls");
	syscall(0, 0, 0, 0); // restart_syscall
	syscall(34, 0, 0, 0); // nice
	syscall(59, 0, 0, 0); // oldolduname	
	syscall(109, 0, 0, 0); // olduname	
	if (fork() == 0)
		syscall(1, 0, 0, 0); // exit
#endif
	line = __LINE__;
	execve("/bin/df", args, NULL);

	fprintf(stderr, "Error: should not get here -- %s\n", strerror(errno));

	exit(1);
}
Пример #8
0
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
    size_t alignment, bool zero)
{
	void *ret;
	size_t copysize;

	/* Try to avoid moving the allocation. */
	ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
	if (ret != NULL)
		return (ret);

	/*
	 * size and oldsize are different enough that we need to use a
	 * different size class.  In that case, fall back to allocating new
	 * space and copying.
	 */
	if (alignment != 0)
		ret = huge_palloc(size + extra, alignment, zero);
	else
		ret = huge_malloc(size + extra, zero);

	if (ret == NULL) {
		if (extra == 0)
			return (NULL);
		/* Try again, this time without extra. */
		if (alignment != 0)
			ret = huge_palloc(size, alignment, zero);
		else
			ret = huge_malloc(size, zero);

		if (ret == NULL)
			return (NULL);
	}

	/*
	 * Copy at most size bytes (not size+extra), since the caller has no
	 * expectation that the extra bytes will be reliably preserved.
	 */
	copysize = (size < oldsize) ? size : oldsize;

	/*
	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
	 * source nor the destination are in swap or dss.
	 */
#ifdef JEMALLOC_MREMAP_FIXED
	if (oldsize >= chunksize
#  ifdef JEMALLOC_SWAP
	    && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
	    chunk_in_swap(ret) == false))
#  endif
#  ifdef JEMALLOC_DSS
	    && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
#  endif
	    ) {
		size_t newsize = huge_salloc(ret);

		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
		    ret) == MAP_FAILED) {
			/*
			 * Assuming no chunk management bugs in the allocator,
			 * the only documented way an error can occur here is
			 * if the application changed the map type for a
			 * portion of the old allocation.  This is firmly in
			 * undefined behavior territory, so write a diagnostic
			 * message, and optionally abort.
			 */
			char buf[BUFERROR_BUF];

			buferror(errno, buf, sizeof(buf));
			malloc_write("<jemalloc>: Error in mremap(): ");
			malloc_write(buf);
			malloc_write("\n");
			if (opt_abort)
				abort();
			memcpy(ret, ptr, copysize);
			idalloc(ptr);
		} else
			huge_dalloc(ptr, false);
	} else
#endif
	{
		memcpy(ret, ptr, copysize);
		idalloc(ptr);
	}
	return (ret);
}
Пример #9
0
int
main(int ac, char **av)
{
	const char *const name = ac > 1 ? av[1] : "mmap";
	const intmax_t pagesize = get_page_size();
	const unsigned long length1 = pagesize * 6;
	const unsigned long length2 = pagesize * 3;
	const unsigned long length3 = pagesize * 2;
	const int fd = -1;
	off_t offset;
	void *addr, *p;

#if ULONG_MAX > 4294967295UL
	offset = 0xcafedeadbeef000 & -pagesize;
	addr = (void *) (uintmax_t) (0xfacefeed000 & -pagesize);
#else
	offset = 0xdeadbeef000 & -pagesize;
	addr = (void *) (unsigned int) (0xfaced000 & -pagesize);
#endif
	const uintmax_t uoffset =
	       sizeof(offset) == sizeof(int) ? (uintmax_t) (unsigned int) offset
					     : (uintmax_t) offset;

	(void) close(0);
	(void) close(0);
	printf("%s(NULL, 0, PROT_NONE, MAP_FILE, 0, 0) = -1 EBADF (%m)\n",
	       name);
	mmap(NULL, 0, PROT_NONE, MAP_FILE, 0, 0);

	p = mmap(addr, length1, PROT_READ | PROT_WRITE,
		 MAP_PRIVATE | MAP_ANONYMOUS, fd, offset);
	if (MAP_FAILED == p)
		perror_msg_and_fail("mmap");
	printf("%s(%p, %lu, PROT_READ|PROT_WRITE, "
	       "MAP_PRIVATE|MAP_ANONYMOUS, %d, %#jx) = %p\n",
	       name, addr, length1, fd, uoffset, p);

	if (msync(p, length1, MS_SYNC))
		perror_msg_and_fail("msync");
	printf("msync(%p, %lu, MS_SYNC) = 0\n", p, length1);

	if (mprotect(p, length1, PROT_NONE))
		perror_msg_and_fail("mprotect");
	printf("mprotect(%p, %lu, PROT_NONE) = 0\n", p, length1);

	addr = mremap(p, length1, length2, 0);
	if (MAP_FAILED == addr)
		perror_msg_and_fail("mremap");
	printf("mremap(%p, %lu, %lu, 0) = %p\n", p, length1, length2, addr);

	p =  mremap(addr, length2, length3, MREMAP_MAYMOVE | MREMAP_FIXED,
		    addr + length2);
	if (MAP_FAILED == p)
		perror_msg_and_fail("mremap");
	printf("mremap(%p, %lu, %lu, MREMAP_MAYMOVE|MREMAP_FIXED"
	       ", %p) = %p\n", addr, length2, length3, addr + length2, p);

	if (madvise(p, length3, MADV_NORMAL))
		perror_msg_and_fail("madvise");
	printf("madvise(%p, %lu, MADV_NORMAL) = 0\n", p, length3);

	if (munmap(p, length3))
		perror_msg_and_fail("munmap");
	printf("munmap(%p, %lu) = 0\n", p, length3);

	if (mlockall(MCL_FUTURE))
		perror_msg_and_fail("mlockall");
	puts("mlockall(MCL_FUTURE) = 0");

	puts("+++ exited with 0 +++");
	return 0;
}
Пример #10
0
void *pool_remap(void *ptr, ulint old_size, ulint size)
{
	ptr = mremap(ptr, old_size, size, MREMAP_MAYMOVE);
    return ptr;
}
Пример #11
0
static
void *mremap_wrapper(void *old_address, size_t old_size,
		size_t new_size, int flags)
{
	return mremap(old_address, old_size, new_size, flags);
}
Пример #12
0
static int push(lua_State *L)
{
	int     memfd;
	struct shmhead *ptr;
	const char *shmname = NULL;
	char *semname = NULL;
	const char *pushdata = NULL;
	int shmsize = 0;
	char *data = NULL;
	/*\ get args \*/
	if(lua_gettop(L) == 2){
		shmname = lua_tostring (L, 1);
		pushdata = lua_tostring (L, 2);
	}else{
		return 0;
	}
	if (shmname == NULL){
		perror("shmname");
		return 1;
	}
	if (pushdata == NULL){
		perror("pushdata");
		return 1;
	}

	/*\ open share memory \*/
	if((memfd = shm_open(shmname,O_RDWR,FILE_MODE)) == -1){
		perror("shm_open");
		return 1;
	}
	/*\ memory map \*/
	if((ptr = mmap(NULL, sizeof(struct shmhead), PROT_READ | PROT_WRITE,MAP_SHARED,memfd,0)) == MAP_FAILED){
		close(memfd);
		perror("mmap");
		return 1;
	}
	shmsize = ptr->data_size + sizeof(struct shmhead);
	/*\ memory remap \*/
	ptr = mremap(ptr, sizeof(struct shmhead), shmsize, MREMAP_MAYMOVE);
	if (ptr == NULL){
		close(memfd);
		perror("mremap");
		return 1;
	}
	data = (char *)ptr + sizeof(struct shmhead);
	semname = ptr->semname;
	close(memfd);

	/*==============================================================*/
	sem_t  *mutex = NULL;
	/*\ open semaphore \*/
	if((mutex = sem_open(semname, 0)) == SEM_FAILED){
		munmap(ptr, shmsize);
		perror("mutex");
		return 1;
	}
	/*\ lock semaphore \*/
	sem_wait(mutex);
	
	/*\==> push method <==\*/
	int svlen = strlen(pushdata);
	int lvlen = ptr->data_size - ptr->data_used;
	svlen = (lvlen > svlen ) ? svlen : lvlen;
	if(svlen > 0){
		memcpy(data + ptr->data_used, pushdata, svlen);
		ptr->data_used += svlen;
	}
	/*\ memory unmap \*/
	munmap(ptr, shmsize);
	
	/*\ unlock semaphore \*/
	sem_post(mutex);
	/*\ close semaphore \*/
	sem_close(mutex);
	return 0;
}
Пример #13
0
//static int get(lua_State *L)
//static int set(lua_State *L)
static int pop(lua_State *L)
{
	int     memfd;
	struct shmhead *ptr;
	const char *shmname = NULL;
	char *semname = NULL;
	int shmsize = 0;
	char *data = NULL;
	/*\ get args \*/
	if(lua_gettop(L) == 1){
		shmname = lua_tostring (L, 1);
	}else{
		return 0;
	}

	/*\ open share memory \*/
	if((memfd = shm_open(shmname,O_RDWR,FILE_MODE)) == -1){
		perror("shm_open");
		return 1;
	}
	/*\ memory map \*/
	if((ptr = mmap(NULL, sizeof(struct shmhead), PROT_READ | PROT_WRITE,MAP_SHARED,memfd,0)) == MAP_FAILED){
		close(memfd);
		perror("mmap");
		return 1;
	}
	shmsize = ptr->data_size + sizeof(struct shmhead);
	/*\ memory remap \*/
	ptr = mremap(ptr, sizeof(struct shmhead), shmsize, MREMAP_MAYMOVE);
	if (ptr == NULL){
		close(memfd);
		perror("mremap");
		return 1;
	}
	if (ptr->data_used == 0)
	{
		munmap(ptr, shmsize);
		close(memfd);
		return 0;
	}
	data = (char *)ptr + sizeof(struct shmhead);
	semname = ptr->semname;
	close(memfd);

	/*==============================================================*/
	sem_t  *mutex = NULL;
	/*\ open semaphore \*/
	if((mutex = sem_open(semname, 0)) == SEM_FAILED){
		munmap(ptr, shmsize);
		perror("mutex");
		return 1;
	}
	/*\ lock semaphore \*/
	sem_wait(mutex);
	
	/*\ ==> pop method <== \*/
	lua_pushlstring(L, (const char *)data, ptr->data_used);
	ptr->data_used = 0;
	memset(data, 0, (size_t)ptr->data_size);
	
	/*\ memory unmap \*/
	munmap(ptr, shmsize);
	
	/*\ unlock semaphore \*/
	sem_post(mutex);
	/*\ close semaphore \*/
	sem_close(mutex);
	return 1;
}
int main(int argc, char *argv[])
{
	int fd, rc;
	void *p, *q, *r;

	test_init(argc, argv);

	hpage_size = check_hugepagesize();
	page_size = getpagesize();


	fd = hugetlbfs_unlinked_fd();
	if (fd < 0)
		FAIL("hugetlbfs_unlinked_fd()");
	init_slice_boundary(fd);

	/* First, hugepages above, normal below */
	p = mmap((void *)(slice_boundary + hpage_size), hpage_size,
		 PROT_READ | PROT_WRITE,
		 MAP_SHARED | MAP_FIXED, fd, 0);
	if (p == MAP_FAILED)
		FAIL("mmap(huge above): %s", strerror(errno));

	do_readback(p, hpage_size, "huge above");

	q = mmap((void *)(slice_boundary - page_size), page_size,
		 PROT_READ | PROT_WRITE,
		 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
	if (q == MAP_FAILED)
		FAIL("mmap(normal below): %s", strerror(errno));

	do_readback(q, page_size, "normal below");

	verbose_printf("Attempting to remap...");

	r = mremap(q, page_size, 2*page_size, 0);
	if (r == MAP_FAILED) {
		verbose_printf("disallowed\n");
		rc = munmap(q, page_size);
		if (rc != 0)
			FAIL("munmap(normal below): %s", strerror(errno));
	} else {
		if (r != q)
			FAIL("mremap() moved without MREMAP_MAYMOVE!?");

		verbose_printf("testing...");
		do_readback(q, 2*page_size, "normal below expanded");
		rc = munmap(q, 2*page_size);
		if (rc != 0)
			FAIL("munmap(normal below expanded): %s", strerror(errno));
	}

	rc = munmap(p, hpage_size);
	if (rc != 0)
		FAIL("munmap(huge above)");

	/* Next, normal pages above, huge below */
	p = mmap((void *)(slice_boundary + hpage_size), page_size,
		 PROT_READ|PROT_WRITE,
		 MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
	if (p == MAP_FAILED)
		FAIL("mmap(normal above): %s", strerror(errno));

	do_readback(p, page_size, "normal above");

	q = mmap((void *)(slice_boundary - hpage_size),
		 hpage_size, PROT_READ | PROT_WRITE,
		 MAP_SHARED | MAP_FIXED, fd, 0);
	if (q == MAP_FAILED)
		FAIL("mmap(huge below): %s", strerror(errno));

	do_readback(q, hpage_size, "huge below");

	verbose_printf("Attempting to remap...");

	r = mremap(q, hpage_size, 2*hpage_size, 0);
	if (r == MAP_FAILED) {
		verbose_printf("disallowed\n");
		rc = munmap(q, hpage_size);
		if (rc != 0)
			FAIL("munmap(huge below): %s", strerror(errno));
	} else {
		if (r != q)
			FAIL("mremap() moved without MREMAP_MAYMOVE!?");

		verbose_printf("testing...");
		do_readback(q, 2*hpage_size, "huge below expanded");
		rc = munmap(q, 2*hpage_size);
		if (rc != 0)
			FAIL("munmap(huge below expanded): %s", strerror(errno));
	}

	rc = munmap(p, page_size);
	if (rc != 0)
		FAIL("munmap(normal above)");


	PASS();
}
Пример #15
0
// AddChromName
// If chrom already known then return existing chrom identifier otherwise add to m_pScaffoldChroms
INT32
AddChromName(char *pszChromName)
{
static INT32 PrevChromID = 0;
int Hash;
int HashIdx;
tsPEScaffoldChrom *pScaffoldChrom;

// check to see if chromosome name already known
if(PrevChromID != 0)
	{
	pScaffoldChrom = &m_pScaffoldChroms[PrevChromID-1];
	if(!stricmp(pszChromName,pScaffoldChrom->szChrom))
		return(pScaffoldChrom->ChromID);
	}

Hash = GenNameHash(pszChromName);
if((HashIdx = m_pHashChroms[Hash]) != 0)
	{
	do {
		pScaffoldChrom = &m_pScaffoldChroms[HashIdx-1];
		if(!stricmp(pszChromName,pScaffoldChrom->szChrom))
			{
			PrevChromID = pScaffoldChrom->ChromID;
			return(pScaffoldChrom->ChromID);
			}
		HashIdx = pScaffoldChrom->HashNext;
		}
	while(HashIdx > 0);
	}

// its a new chrom not previously seen
// realloc as may be required to hold this new chrom
if(m_NumScaffoldChroms == m_AllocdNumScaffoldChroms)
	{
	size_t memreq = m_AllocdScaffoldChromsMem + (cAllocChromNames * sizeof(tsPEScaffoldChrom));
#ifdef _WIN32
	pScaffoldChrom = (tsPEScaffoldChrom *) realloc(m_pScaffoldChroms,memreq);
#else
	pScaffoldChrom = (tsPEScaffoldChrom *)mremap(m_pScaffoldChroms,m_AllocdScaffoldChromsMem,memreq,MREMAP_MAYMOVE);
	if(pScaffoldChrom == MAP_FAILED)
		pScaffoldChrom = NULL;
#endif
	if(pScaffoldChrom == NULL)
		{
		gDiagnostics.DiagOut(eDLFatal,gszProcName,"AddChromName: Memory re-allocation to %d bytes - %s",memreq,strerror(errno));
		return(eBSFerrMem);
		}
	m_pScaffoldChroms = pScaffoldChrom;
	m_AllocdScaffoldChromsMem = memreq;
	m_AllocdNumScaffoldChroms += cAllocChromNames;
	}

pScaffoldChrom = &m_pScaffoldChroms[m_NumScaffoldChroms++];
pScaffoldChrom->ChromID = m_NumScaffoldChroms;
pScaffoldChrom->HashNext = m_pHashChroms[Hash];
m_pHashChroms[Hash] = m_NumScaffoldChroms;
strncpy(pScaffoldChrom->szChrom,pszChromName,cMaxChromNameLen);
pScaffoldChrom->szChrom[cMaxChromNameLen] = '\0';
PrevChromID = m_NumScaffoldChroms;
return(m_NumScaffoldChroms);
}
Пример #16
0
void *runtime_realloc(void *start, int canmove, size_t oldlen, size_t newlen)
{
  return mremap(start, oldlen, newlen, canmove ? MREMAP_MAYMOVE : MREMAP_FIXED);
}
Пример #17
0
// AddPEIdent
// If PE name already known then return existing identifier otherwise add to m_pScaffoldChroms
INT32
AddPEIdent(char *pszIdentName)
{
static INT32 PrevPEIdentID = 0;
tsPEIdent *pPEIdent;
int Hash;
int HashIdx;

// check to see if PEIdent name already known
if(PrevPEIdentID != 0)
	{
	pPEIdent = &m_pPEIdents[PrevPEIdentID-1];
	if(!stricmp(pszIdentName,pPEIdent->szIdent))
		return(pPEIdent->IdentID);
	}

Hash = GenNameHash(pszIdentName);
if((HashIdx = m_pHashPEIdents[Hash]) != 0)
	{
	do {
		pPEIdent = &m_pPEIdents[HashIdx-1];
		if(!stricmp(pszIdentName,pPEIdent->szIdent))
			{
			PrevPEIdentID = pPEIdent->IdentID;
			return( pPEIdent->IdentID);
			}
		HashIdx = pPEIdent->HashNext;
		}
	while(HashIdx > 0);
	}

// its a new PE identifier not previously seen
// realloc as may be required to hold this new chrom
if(m_NumPEIdents == m_AllocdNumPEIdents)
	{
	size_t memreq = m_AllocdPEIdentsMem + (cAllocPENames * sizeof(tsPEIdent));
#ifdef _WIN32
	pPEIdent = (tsPEIdent *) realloc(m_pPEIdents,memreq);
#else
	pPEIdent = (tsPEIdent *)mremap(m_pPEIdents,m_AllocdPEIdentsMem,memreq,MREMAP_MAYMOVE);
	if(pPEIdent == MAP_FAILED)
		pPEIdent = NULL;
#endif
	if(pPEIdent == NULL)
		{
		gDiagnostics.DiagOut(eDLFatal,gszProcName,"AddPEIdent: Memory re-allocation to %d bytes - %s",memreq,strerror(errno));
		return(eBSFerrMem);
		}
	m_pPEIdents = pPEIdent;
	m_AllocdPEIdentsMem = memreq;
	m_AllocdNumPEIdents += cAllocPENames;
	}

pPEIdent = &m_pPEIdents[m_NumPEIdents++];
pPEIdent->IdentID = m_NumPEIdents;
pPEIdent->PEScafoldID = 0;
pPEIdent->HashNext = m_pHashPEIdents[Hash];
m_pHashPEIdents[Hash] = m_NumPEIdents;
strncpy(pPEIdent->szIdent,pszIdentName,cMaxChromNameLen);
pPEIdent->szIdent[cMaxChromNameLen] = '\0';
PrevPEIdentID = m_NumPEIdents;
return(m_NumPEIdents);
}
Пример #18
0
/* -*- Mode: C; tab-width: 8; c-basic-offset: 2; indent-tabs-mode: nil; -*- */

#include "rrutil.h"

int main(__attribute((unused)) int argc, char* argv[]) {
  int fd = open(argv[0], O_RDONLY);
  size_t page_size = sysconf(_SC_PAGESIZE);
  char buf[page_size * 2];
  char* p;

  test_assert(fd >= 0);
  test_assert((ssize_t)sizeof(buf) == read(fd, buf, sizeof(buf)));

  p = (char*)mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
  test_assert(p != MAP_FAILED);
  test_assert(p[0] == buf[0]);

  p = (char*)mremap(p, page_size, page_size * 2, MREMAP_MAYMOVE);
  test_assert(p != MAP_FAILED);
  test_assert(p[0] == buf[0]);
  test_assert(p[page_size] == buf[page_size]);

  atomic_puts("EXIT-SUCCESS");
  return 0;
}
Пример #19
0
int
AddScaffold(bool bPE2,						// if false then PE1, if true then PE2
			char *pszPEIdent,				// paired end indentifier used to corelate paired ends
			char *pszChrom,					// PE aligns onto this chromosome
			char Strand)					// '+' or '-'
{
static int PrevScafoldID = 0;
tsPEScaffold *pPEScaffold;
tsPEIdent *pPEIdent;
int ChromID;
int PEIdentID;

ChromID = AddChromName(pszChrom);
PEIdentID = AddPEIdent(pszPEIdent);

// if processing PE2 then check to see if already have scaffold with same PEIdent
if(bPE2)
	{
	if(PrevScafoldID != 0)
		{
		pPEScaffold = &m_pScaffolds[PrevScafoldID-1];
		if(pPEScaffold->PE12SeqID == PEIdentID)
			{
			pPEScaffold->PE2ChromID = ChromID;
			pPEScaffold->PE2Sense = Strand == '+' ? 1 : 0;
			return(PrevScafoldID);
			}
		}

	pPEIdent = &m_pPEIdents[PEIdentID-1];
	if((PrevScafoldID = pPEIdent->PEScafoldID) != 0)
		{
		pPEScaffold = &m_pScaffolds[PrevScafoldID-1];
		pPEScaffold->PE2ChromID = ChromID;
		pPEScaffold->PE2Sense = Strand == '+' ? 1 : 0;
		return(PrevScafoldID);
		}
	}
else	// PE1 processing, normally expect that the PE1 identifiers are unique, here we are just confirming that they are unique
	{   // when satisfied PE1 identifiers are always unique then could skip this check...
	pPEIdent = &m_pPEIdents[PEIdentID-1];
	if((PrevScafoldID = pPEIdent->PEScafoldID) != 0)
		{
		gDiagnostics.DiagOut(eDLFatal,gszProcName,"AddScaffold: duplicate PE1 identifer - %s onto %s",pszPEIdent,pszChrom);
		return(0);
		}
	}

// new scaffold required
// realloc as may be required to hold this new scaffold
if(m_NumScaffolds == m_AllocdNumScaffolds)
	{
	size_t memreq = m_AllocdScaffoldsMem + (cAllocScafolds * sizeof(tsPEScaffold));
#ifdef _WIN32
	pPEScaffold = (tsPEScaffold *) realloc(m_pScaffolds,memreq);
#else
	pPEScaffold = (tsPEScaffold *)mremap(m_pScaffolds,m_AllocdScaffoldsMem,memreq,MREMAP_MAYMOVE);
	if(pPEScaffold == MAP_FAILED)
		pPEScaffold = NULL;
#endif
	if(pPEScaffold == NULL)
		{
		gDiagnostics.DiagOut(eDLFatal,gszProcName,"AddScaffold: Memory re-allocation to %d bytes - %s",memreq,strerror(errno));
		return(eBSFerrMem);
		}
	m_pScaffolds = pPEScaffold;
	m_AllocdScaffoldsMem = memreq;
	m_AllocdNumScaffolds += cAllocScafolds;
	}
pPEScaffold = &m_pScaffolds[m_NumScaffolds++];
memset(pPEScaffold,0,sizeof(tsPEScaffold));
pPEScaffold->PEScafoldID = m_NumScaffolds;
pPEIdent->PEScafoldID = m_NumScaffolds;
pPEScaffold->PE12SeqID = PEIdentID;
if(!bPE2)
	{
	pPEScaffold->PE1ChromID = ChromID;
	pPEScaffold->PE1Sense = Strand == '+' ? 1 : 0;
	}
else
	{
	pPEScaffold->PE2ChromID = ChromID;
	pPEScaffold->PE2Sense = Strand == '+' ? 1 : 0;
	}
return(m_NumScaffolds);
}
Пример #20
0
int _ULCC_HIDDEN _remap_pages_seq(
		struct _page_picker_s *picker,
		const unsigned long *start,
		const unsigned long *end,
		const int n,
		char *do_remap,
		const int movedata)
{
	void *remap_from, *remap_to, *remap_to_end;
	int c_colors = 0, i_remap = 0, i, idr;
	int *index = NULL;
	int ret = 0;

	index = malloc(sizeof(*index) * ULCC_NR_CACHE_COLORS);
	if(!index) {
		_ULCC_PERROR("failed to malloc for index array in _remap_pages_seq");
		ret = -1;
		goto finish;
	}

	/* Compute the number of colors in this request and build index array */
	for(i = 0; i < ULCC_NR_CACHE_COLORS; i++)
		if(picker[i].needed > 0)
			index[c_colors++] = i;
	_ULCC_ASSERT(c_colors > 0);

	/* For each data region, do the remapping */
	for(idr = 0, i = 0; idr < n; idr++) {
		remap_to = (void *)ULCC_ALIGN_HIGHER(start[idr]);
		remap_to_end = (void *)ULCC_ALIGN_LOWER(end[idr]);

		while(remap_to < remap_to_end) {
			/* If this page does not need to be remapped, skip it */
			if(!do_remap[i_remap++]) {
				remap_to += ULCC_PAGE_BYTES;
				continue;
			}

			/* Get the next picked page to remap from.
			 * Infinite looping is guaranteed not to happen as long as the total
			 * amount of picked pages is not fewer than required, which is true
			 * after _pick_pages successfully returned. */
			while(picker[index[i]].picked <= 0)
				i = (i + 1) % c_colors;

			/* Select a page to remap from.
			 * A possible problem with remapping from tail to head is that the
			 * number of continuous physical pages in the virtual memory area
			 * being remapped to will decrease. Consider to remap from head to
			 * tail. */
			remap_from = picker[index[i]].pages[--picker[index[i]].picked];

			/* Copy data before remapping if required so */
			if(movedata)
				memcpy(remap_from, remap_to, ULCC_PAGE_BYTES);

			/* Remap the picked physical page to user data page; this is one
			 * of the key hacks to user-level cache control */
			if(mremap(remap_from, ULCC_PAGE_BYTES, ULCC_PAGE_BYTES,
			   MREMAP_MAYMOVE | MREMAP_FIXED, remap_to) == MAP_FAILED) {
				_ULCC_PERROR("mremap failed in _remap_pages_seq");
				ret = -1;
				goto finish;
			}

			/* Repair the page hole caused by the above remapping; this is one
			 * of the key hacks to user-level cache control */
			if(mmap(remap_from, ULCC_PAGE_BYTES, PROT_READ | PROT_WRITE,
			   MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0, 0) != remap_from) {
				_ULCC_PERROR("mmap failed in _remap_pages_seq");
				ret = -1;
				goto finish;
			}

			remap_to += ULCC_PAGE_BYTES;
			i = (i + 1) % c_colors;
		} /* while */
	} /* for each data region */

finish:
	if(index)
		free(index);

	return ret;
}
Пример #21
0
static PyObject *
mmap_resize_method(mmap_object *self,
                   PyObject *args)
{
    Py_ssize_t new_size;
    CHECK_VALID(NULL);
    if (!PyArg_ParseTuple(args, "n:resize", &new_size) ||
        !is_resizeable(self)) {
        return NULL;
    }
    if (new_size < 0 || PY_SSIZE_T_MAX - new_size < self->offset) {
        PyErr_SetString(PyExc_ValueError, "new size out of range");
        return NULL;
    }

    {
#ifdef MS_WINDOWS
        DWORD dwErrCode = 0;
        DWORD off_hi, off_lo, newSizeLow, newSizeHigh;
        /* First, unmap the file view */
        UnmapViewOfFile(self->data);
        self->data = NULL;
        /* Close the mapping object */
        CloseHandle(self->map_handle);
        self->map_handle = NULL;
        /* Move to the desired EOF position */
        newSizeHigh = (DWORD)((self->offset + new_size) >> 32);
        newSizeLow = (DWORD)((self->offset + new_size) & 0xFFFFFFFF);
        off_hi = (DWORD)(self->offset >> 32);
        off_lo = (DWORD)(self->offset & 0xFFFFFFFF);
        SetFilePointer(self->file_handle,
                       newSizeLow, &newSizeHigh, FILE_BEGIN);
        /* Change the size of the file */
        SetEndOfFile(self->file_handle);
        /* Create another mapping object and remap the file view */
        self->map_handle = CreateFileMapping(
            self->file_handle,
            NULL,
            PAGE_READWRITE,
            0,
            0,
            self->tagname);
        if (self->map_handle != NULL) {
            self->data = (char *) MapViewOfFile(self->map_handle,
                                                FILE_MAP_WRITE,
                                                off_hi,
                                                off_lo,
                                                new_size);
            if (self->data != NULL) {
                self->size = new_size;
                Py_INCREF(Py_None);
                return Py_None;
            } else {
                dwErrCode = GetLastError();
                CloseHandle(self->map_handle);
                self->map_handle = NULL;
            }
        } else {
            dwErrCode = GetLastError();
        }
        PyErr_SetFromWindowsErr(dwErrCode);
        return NULL;
#endif /* MS_WINDOWS */

#ifdef UNIX
#ifndef HAVE_MREMAP
        PyErr_SetString(PyExc_SystemError,
                        "mmap: resizing not available--no mremap()");
        return NULL;
#else
        void *newmap;

        if (self->fd != -1 && ftruncate(self->fd, self->offset + new_size) == -1) {
            PyErr_SetFromErrno(PyExc_OSError);
            return NULL;
        }

#ifdef MREMAP_MAYMOVE
        newmap = mremap(self->data, self->size, new_size, MREMAP_MAYMOVE);
#else
#if defined(__NetBSD__)
        newmap = mremap(self->data, self->size, self->data, new_size, 0);
#else
        newmap = mremap(self->data, self->size, new_size, 0);
#endif /* __NetBSD__ */
#endif
        if (newmap == (void *)-1)
        {
            PyErr_SetFromErrno(PyExc_OSError);
            return NULL;
        }
        self->data = newmap;
        self->size = new_size;
        Py_INCREF(Py_None);
        return Py_None;
#endif /* HAVE_MREMAP */
#endif /* UNIX */
    }
}
Пример #22
0
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
                       abi_ulong new_size, unsigned long flags,
                       abi_ulong new_addr)
{
    int prot;
    void *host_addr;

    mmap_lock();

    if (flags & MREMAP_FIXED) {
        host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
                                     old_size, new_size,
                                     flags,
                                     g2h(new_addr));

        if (RESERVED_VA && host_addr != MAP_FAILED) {
            /* If new and old addresses overlap then the above mremap will
               already have failed with EINVAL.  */
            mmap_reserve(old_addr, old_size);
        }
    } else if (flags & MREMAP_MAYMOVE) {
        abi_ulong mmap_start;

        mmap_start = mmap_find_vma(0, new_size);

        if (mmap_start == -1) {
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        } else {
            host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
                                         old_size, new_size,
                                         flags | MREMAP_FIXED,
                                         g2h(mmap_start));
            if ( RESERVED_VA ) {
                mmap_reserve(old_addr, old_size);
            }
        }
    } else {
        int prot = 0;
        if (RESERVED_VA && old_size < new_size) {
            abi_ulong addr;
            for (addr = old_addr + old_size;
                 addr < old_addr + new_size;
                 addr++) {
                prot |= page_get_flags(addr);
            }
        }
        if (prot == 0) {
            host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
            if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
                mmap_reserve(old_addr + old_size, new_size - old_size);
            }
        } else {
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        }
        /* Check if address fits target address space */
        if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
            /* Revert mremap() changes */
            host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        }
    }

    if (host_addr == MAP_FAILED) {
        new_addr = -1;
    } else {
        new_addr = h2g(host_addr);
        prot = page_get_flags(old_addr);
        page_set_flags(old_addr, old_addr + old_size, 0);
        page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
    }
    tb_invalidate_phys_range(new_addr, new_addr + new_size);
    mmap_unlock();
    return new_addr;
}
Пример #23
0
int main(int argc, char **argv)
{
	sigset_t set;
	InterfaceName *ifname;
	int sig;

	bool syntonize = false;
	int i;
	bool pps = false;
	uint8_t priority1 = 248;

	int restorefd = -1;
	void *restoredata = ((void *) -1);
	char *restoredataptr = NULL;
	off_t restoredatalength;
	off_t restoredatacount;
	bool restorefailed = false;
	LinuxIPCArg *ipc_arg = NULL;
	
	int accelerated_sync_count = 0;
    
	LinuxNetworkInterfaceFactory *default_factory =
		new LinuxNetworkInterfaceFactory;
	OSNetworkInterfaceFactory::registerFactory
		(factory_name_t("default"), default_factory);
	LinuxThreadFactory *thread_factory = new LinuxThreadFactory();
	LinuxTimerQueueFactory *timerq_factory = new LinuxTimerQueueFactory();
	LinuxLockFactory *lock_factory = new LinuxLockFactory();
	LinuxTimerFactory *timer_factory = new LinuxTimerFactory();
	LinuxConditionFactory *condition_factory = new LinuxConditionFactory();
	LinuxSharedMemoryIPC *ipc = new LinuxSharedMemoryIPC();
	/* Create Low level network interface object */
	if( argc < 2 ) {
		printf( "Interface name required\n" );
		print_usage( argv[0] );
		return -1;
	}
	ifname = new InterfaceName( argv[1], strlen(argv[1]) ); 

	/* Process optional arguments */
	for( i = 2; i < argc; ++i ) {
		if( argv[i][0] == '-' ) {
			if( toupper( argv[i][1] ) == 'S' ) {
				// Get syntonize directive from command line
				syntonize = true;
			}
			else if( toupper( argv[i][1] ) == 'F' ) {
				// Open file
				if( i+1 < argc ) {
					restorefd = open
						( argv[i], O_RDWR|O_CREAT, S_IRUSR|S_IWUSR ); ++i;
					if( restorefd == -1 ) printf
						( "Failed to open restore file\n" );
				} else {
					printf( "Restore file must be specified on "
							"command line\n" );
				}
			}
			else if( toupper( argv[i][1] ) == 'A' ) {
				if( i+1 < argc ) {
					accelerated_sync_count = atoi( argv[++i] );
				} else {
					printf( "Accelerated sync count must be specified on the "
							"command line with A option\n" );
				}
			}
			else if( toupper( argv[i][1] ) == 'G' ) {
				if( i+1 < argc ) {
					ipc_arg = new LinuxIPCArg(argv[++i]);
				} else {
					printf( "Must specify group name on the command line\n" );
				}
			}
			else if( toupper( argv[i][1] ) == 'P' ) {
				pps = true;
			}
			else if( toupper( argv[i][1] ) == 'H' ) {
				print_usage( argv[0] );
				_exit(0);
			}
			else if( toupper( argv[i][1] ) == 'R' ) {
				if( i+1 >= argc ) {
					printf( "Priority 1 value must be specified on "
							"command line, using default value\n" );
				} else {
					unsigned long tmp = strtoul( argv[i+1], NULL, 0 ); ++i;
					if( tmp > 254 ) {
						printf( "Invalid priority 1 value, using "
								"default value\n" );
					} else {
						priority1 = (uint8_t) tmp;
					}
				}
			}
		}
	}
    
	if( !ipc->init( ipc_arg ) ) {
	  delete ipc;
	  ipc = NULL;
	}
	if( ipc_arg != NULL ) delete ipc_arg;
	
	if( restorefd != -1 ) {
		// MMAP file
		struct stat stat0;
		if( fstat( restorefd, &stat0 ) == -1 ) {
			printf( "Failed to stat restore file, %s\n", strerror( errno ));
		} else {
			restoredatalength = stat0.st_size;
			if( restoredatalength != 0 ) {
				if(( restoredata = mmap( NULL, restoredatalength,
										 PROT_READ | PROT_WRITE, MAP_SHARED,
										 restorefd, 0 )) == ((void *)-1) ) {
					printf( "Failed to mmap restore file, %s\n",
							strerror( errno ));
				} else {
					restoredatacount = restoredatalength;
					restoredataptr = (char *) restoredata;
				}
			}
		}
	}
	
	if (argc < 2)
		return -1;
	ifname = new InterfaceName(argv[1], strlen(argv[1]));

	HWTimestamper *timestamper = new LinuxTimestamper();
	IEEE1588Clock *clock =
	  new IEEE1588Clock( false, syntonize, priority1, timestamper,
			     timerq_factory , ipc );
	if( restoredataptr != NULL ) {
	  if( !restorefailed )
	    restorefailed =
	      !clock->restoreSerializedState( restoredataptr, 
					      &restoredatacount );
	  restoredataptr = ((char *)restoredata) +
	    (restoredatalength - restoredatacount);
	}

    IEEE1588Port *port =
      new IEEE1588Port
      ( clock, 1, false, accelerated_sync_count, timestamper, 0, ifname,
	condition_factory, thread_factory, timer_factory, lock_factory );
	if (!port->init_port()) {
		printf("failed to initialize port \n");
		return -1;
	}

	if( restoredataptr != NULL ) {
	  if( !restorefailed ) restorefailed =
	    !port->restoreSerializedState( restoredataptr, &restoredatacount );
	  restoredataptr = ((char *)restoredata) +
	    (restoredatalength - restoredatacount);
	}

	// Start PPS if requested
	if( pps ) {
	  if( !timestamper->HWTimestamper_PPS_start()) {
	    printf( "Failed to start pulse per second I/O\n" );
	  }
	}

	port->processEvent(POWERUP);

	sigemptyset(&set);
	sigaddset(&set, SIGINT);
	sigaddset( &set, SIGTERM );
	if (pthread_sigmask(SIG_BLOCK, &set, NULL) != 0) {
		perror("pthread_sigmask()");
		return -1;
	}

	if (sigwait(&set, &sig) != 0) {
		perror("sigwait()");
		return -1;
	}

	fprintf(stderr, "Exiting on %d\n", sig);

	// Stop PPS if previously started
	if( pps ) {
	  if( !timestamper->HWTimestamper_PPS_stop()) {
	    printf( "Failed to stop pulse per second I/O\n" );
	  }
	}

	// If port is either master or slave, save clock and then port state
	if( restorefd != -1 ) {
	  if( port->getPortState() == PTP_MASTER ||
	      port->getPortState() == PTP_SLAVE ) {
	    off_t len;
	    restoredatacount = 0;
	    clock->serializeState( NULL, &len );
	    restoredatacount += len;
	    port->serializeState( NULL, &len );
	    restoredatacount += len;
	
	    if( restoredatacount > restoredatalength ) {
	      ftruncate( restorefd, restoredatacount );
	      if( restoredata != ((void *) -1)) {
		restoredata =
		  mremap( restoredata, restoredatalength, restoredatacount,
			  MREMAP_MAYMOVE );
	      } else {
		restoredata =
		  mmap( NULL, restoredatacount, PROT_READ | PROT_WRITE,
			MAP_SHARED, restorefd, 0 );
	      }
	      if( restoredata == ((void *) -1 )) goto remap_failed;
	      restoredatalength = restoredatacount;
	    }
	    
	    restoredataptr = (char *) restoredata;
	    clock->serializeState( restoredataptr, &restoredatacount );
	    restoredataptr = ((char *)restoredata) +
	      (restoredatalength - restoredatacount);
	    port->serializeState( restoredataptr, &restoredatacount );
	    restoredataptr = ((char *)restoredata) +
	      (restoredatalength - restoredatacount);
	  remap_failed:
	    ;;
	  }
	  
      
	  if( restoredata != ((void *) -1 ))
	    munmap( restoredata, restoredatalength );
	  close( restorefd );
	}

	if( ipc ) delete ipc;

	return 0;
}
Пример #24
0
int main()
{
	int fd, ret;
	struct stat fs;
	void * r;

	/* create a file with something in it */
	fd = open("foobar",O_WRONLY|O_CREAT|O_TRUNC, 0600);
	//staptest// open ("foobar", O_WRONLY|O_CREAT[[[[.O_LARGEFILE]]]]?|O_TRUNC, 0600) = NNNN

	// Why 64k? ppc64 has 64K pages. ia64 has 16k
	// pages. x86_64/i686 has 4k pages. When we specify an offset
	// to mmap(), it must be a multiple of the page size, so we
	// use the biggest.
	lseek(fd, 65536, SEEK_SET);
	write(fd, "abcdef", 6);
	close(fd);
	//staptest// close (NNNN) = 0

	fd = open("foobar", O_RDONLY);
	//staptest// open ("foobar", O_RDONLY[[[[.O_LARGEFILE]]]]?) = NNNN

	/* stat for file size */
	ret = fstat(fd, &fs);
	//staptest// fstat (NNNN, XXXX) = 0

	r = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
	//staptest// mmap[2]* (0x0, 4096, PROT_READ, MAP_SHARED, NNNN, 0) = XXXX

	mlock(r, 4096);
	//staptest// mlock (XXXX, 4096) = 0

	mlock((void *)-1, 4096);
	//staptest// mlock (0x[f]+, 4096) = NNNN

	mlock(0, -1);
#if __WORDSIZE == 64
	//staptest// mlock (0x[0]+, 18446744073709551615) = NNNN
#else
	//staptest// mlock (0x[0]+, 4294967295) = NNNN
#endif

	msync(r, 4096, MS_SYNC);	
	//staptest// msync (XXXX, 4096, MS_SYNC) = 0

	msync((void *)-1, 4096, MS_SYNC);	
	//staptest// msync (0x[f]+, 4096, MS_SYNC) = NNNN

	msync(r, -1, MS_SYNC);	
#if __WORDSIZE == 64
	//staptest// msync (XXXX, 18446744073709551615, MS_SYNC) = NNNN
#else
	//staptest// msync (XXXX, 4294967295, MS_SYNC) = NNNN
#endif

	msync(r, 4096, -1);	
	//staptest// msync (XXXX, 4096, MS_[^ ]+|XXXX) = NNNN

	munlock(r, 4096);
	//staptest// munlock (XXXX, 4096) = 0

	mlockall(MCL_CURRENT);
	//staptest// mlockall (MCL_CURRENT) = 

	mlockall(-1);
	//staptest// mlockall (MCL_[^ ]+|XXXX) = NNNN

	munlockall();
	//staptest// munlockall () = 0

	munmap(r, 4096);
	//staptest// munmap (XXXX, 4096) = 0

	// Ensure the 6th argument is handled correctly..
	r = mmap(NULL, 6, PROT_READ, MAP_PRIVATE, fd, 65536);
	//staptest// mmap[2]* (0x0, 6, PROT_READ, MAP_PRIVATE, NNNN, 65536) = XXXX

	munmap(r, 6);
	//staptest// munmap (XXXX, 6) = 0

	close(fd);

	r = mmap(NULL, 12288, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
	//staptest// mmap[2]* (0x0, 12288, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = XXXX

	mprotect(r, 4096, PROT_READ);
	//staptest// mprotect (XXXX, 4096, PROT_READ) = 0

	munmap(r, 12288);
	//staptest// munmap (XXXX, 12288) = 0

	r = mmap(NULL, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
	//staptest// mmap[2]* (0x0, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = XXXX

	r = mremap(r, 8192, 4096, 0);
	//staptest// mremap (XXXX, 8192, 4096, 0x0, XXXX) = XXXX

	munmap(r, 4096);
	//staptest// munmap (XXXX, 4096) = 0

	// powerpc64's glibc rejects this one. On ia64, the call
	// fails, while on most other architectures it works. So,
	// ignore the return values.
#ifndef __powerpc64__
	r = mmap((void *)-1, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
	//staptest// mmap[2]* (0x[f]+, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 

	munmap(r, 8192);
	//staptest// munmap (XXXX, 8192) = 
#endif

	r = mmap(NULL, -1, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
#if __WORDSIZE == 64
	//staptest// mmap[2]* (0x0, 18446744073709551615, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = -XXXX (ENOMEM)
#else
	//staptest// mmap[2]* (0x0, 4294967295, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = -XXXX (ENOMEM)
#endif

	// powerpc's glibc (both 32-bit and 64-bit) rejects this one.
#ifndef __powerpc__
	r = mmap(NULL, 8192, -1, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
	//staptest// mmap[2]* (0x0, 8192, PROT_[^ ]+|XXXX, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = XXXX

	munmap(r, 8192);
	//staptest// munmap (XXXX, 8192) = 0
#endif

	r = mmap(NULL, 8192, PROT_READ|PROT_WRITE, -1, -1, 0);
	//staptest// mmap[2]* (0x0, 8192, PROT_READ|PROT_WRITE, MAP_[^ ]+|XXXX, -1, 0) = -XXXX

	// Unfortunately, glibc and/or the syscall wrappers will
	// reject a -1 offset, especially on a 32-bit exe on a 64-bit
	// OS. So, we can't really test this one.
	//
	// r = mmap(NULL, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, -1);

	munmap((void *)-1, 8192);
	//staptest// munmap (0x[f]+, 8192) = NNNN

	munmap(r, -1);
#if __WORDSIZE == 64
	//staptest// munmap (XXXX, 18446744073709551615) = NNNN
#else
	//staptest// munmap (XXXX, 4294967295) = NNNN
#endif

	return 0;
}
Пример #25
0
static int open_mapping_shm(int cap)
{
  static int first =1;

  if (cap) Q_printf("MAPPING: open, cap=%s\n",
				decode_mapping_cap(cap));

  if (first) {
    void *ptr1, *ptr2 = MAP_FAILED;
    first = 0;

    /* do a test alias mapping. kernel 2.6.1 doesn't support our mremap trick */
    ptr1 = mmap(0, PAGE_SIZE, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
    if (ptr1 != MAP_FAILED) {
      ptr2 = mremap(ptr1, 0, PAGE_SIZE, MREMAP_MAYMOVE);
      munmap(ptr1, PAGE_SIZE);
      if (ptr2 != MAP_FAILED)
        munmap(ptr2, PAGE_SIZE);
    }
    if (ptr2 == MAP_FAILED) {
      Q_printf("MAPPING: not using mapshm because alias mapping does not work\n");
      if (!cap)return 0;
      leavedos(2);
    }
  }

  /*
   * Now handle individual cases.
   * Don't forget that each of the below code pieces should only
   * be executed once !
   */

#if 0
  if (cap & MAPPING_OTHER) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_EMS) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_DPMI) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_VIDEO) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_VGAEMU) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_HGC) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_HMA) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_SHARED) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_INIT_HWRAM) {
    /* none for now */
  }
#endif
#if 0
  if (cap & MAPPING_INIT_LOWRAM) {
    /* none for now */
  }
#endif

  return 1;
}
TEST(sys_mman, mremap_PTRDIFF_MAX) {
  void* map = mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  ASSERT_NE(MAP_FAILED, map);
  ASSERT_EQ(MAP_FAILED, mremap(map, PAGE_SIZE, kHuge, MREMAP_MAYMOVE));
}
Пример #27
0
int main(void) {
  size_t num_bytes = sysconf(_SC_PAGESIZE);
  int fd = open("temp", O_CREAT | O_EXCL | O_RDWR);
  int* rpage;

  unlink("temp");

  test_assert(fd >= 0);

  int magic = 0x5a5a5a5a;
  size_t i;
  for (i = 0; i < 3 * num_bytes / sizeof(magic); ++i) {
    pwrite64(fd, &magic, sizeof(magic), i * sizeof(magic));
  }

  rpage = mmap(NULL, num_bytes, PROT_READ, MAP_SHARED, fd, 0);
  atomic_printf("rpage:%p\n", rpage);
  test_assert(rpage != MAP_FAILED);

  magic = 0xa5a5a5a5;
  for (i = 0; i < num_bytes / sizeof(magic); ++i) {
    pwrite64(fd, &magic, sizeof(magic), i * sizeof(magic));
  }

  check_mapping(rpage, 0xa5a5a5a5, num_bytes / sizeof(*rpage));

  magic = 0x5a5a5a5a;
  for (i = 0; i < num_bytes / sizeof(magic); ++i) {
    pwrite64(fd, &magic, sizeof(magic), i * sizeof(magic));
  }

  check_mapping(rpage, 0x5a5a5a5a, num_bytes / sizeof(*rpage));

  magic = 0xa5a5a5a5;
  for (i = 0; i < num_bytes / sizeof(magic); ++i) {
    pwrite64(fd, &magic, sizeof(magic), num_bytes + i * sizeof(magic));
  }

  check_mapping(rpage, 0x5a5a5a5a, num_bytes / sizeof(*rpage));

  magic = 0xdeadbeef;
  pwrite64(fd, &magic, sizeof(magic), num_bytes / 2);

  test_assert(rpage[num_bytes / (sizeof(magic) * 2)] == magic);
  test_assert(rpage[0] != magic);

  pwrite64(fd, &magic, sizeof(magic), num_bytes - 2);
  test_assert(rpage[num_bytes / sizeof(magic) - 1] == (int)0xbeef5a5a);

  rpage = mremap(rpage, num_bytes, 5 * num_bytes, MREMAP_MAYMOVE);
  for (i = 3 * num_bytes / sizeof(magic); i < 5 * num_bytes / sizeof(magic);
       ++i) {
    pwrite64(fd, &magic, sizeof(magic), i * sizeof(magic));
  }
  check_mapping(&rpage[(3 * num_bytes) / sizeof(magic)], 0xdeadbeef,
                2 * num_bytes / sizeof(*rpage));

  munmap(rpage, 5 * num_bytes);

  // The case when all pages have been unmapped is special in the
  // implementation - make sure it gets sufficient coverage
  write(fd, &magic, sizeof(magic));
  write(fd, &magic, sizeof(magic));

  rpage = mmap(NULL, num_bytes, PROT_READ, MAP_SHARED, fd, 0);
  atomic_printf("rpage:%p\n", rpage);
  test_assert(rpage != MAP_FAILED);

  // This tests both that the monitor gets activated again if the page is
  // remapped and that `write` works on a monitored page.
  lseek(fd, 0, SEEK_SET);
  magic = 0xb6b6b6b6;
  for (i = 0; i < num_bytes / sizeof(magic); ++i) {
    write(fd, &magic, sizeof(magic));
  }
  check_mapping(rpage, magic, num_bytes / sizeof(*rpage));

  atomic_puts("EXIT-SUCCESS");

  return 0;
}