Пример #1
0
/**
 * Create a memory pool.
 *
 * @param max maximum size of the pool
 */
struct MemoryPool *
MHD_pool_create (size_t max)
{
  struct MemoryPool *pool;

  pool = malloc (sizeof (struct MemoryPool));
  if (pool == NULL)
    return NULL;
#ifdef MAP_ANONYMOUS
  pool->memory = MMAP (NULL, max, PROT_READ | PROT_WRITE,
                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#else
  pool->memory = MAP_FAILED;
#endif
  if ((pool->memory == MAP_FAILED) || (pool->memory == NULL))
    {
      pool->memory = malloc (max);
      if (pool->memory == NULL)
        {
          free (pool);
          return NULL;
        }
      pool->is_mmap = MHD_NO;
    }
  else
    {
      pool->is_mmap = MHD_YES;
    }
  pool->pos = 0;
  pool->end = max;
  pool->size = max;
  return pool;
}
Пример #2
0
static int
shrink_heap (heap_info *h, long diff)
{
  long new_size;

  new_size = (long) h->size - diff;
  if (new_size < (long) sizeof (*h))
    return -1;

  /* Try to re-map the extra heap space freshly to save memory, and make it
     inaccessible.  See malloc-sysdep.h to know when this is true.  */
  if (__glibc_unlikely (check_may_shrink_heap ()))
    {
      if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
                         MAP_FIXED) == (char *) MAP_FAILED)
        return -2;

      h->mprotect_size = new_size;
    }
  else
    __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
  /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/

  h->size = new_size;
  LIBC_PROBE (memory_heap_less, 2, h, h->size);
  return 0;
}
Пример #3
0
	/*******************************************************
		Function: extract_archive_member

		Given an archive of WHIRL objects, extract the one
		specified and put it into a separate file.

	 *******************************************************/
static int
extract_archive_member (bfd *abfd, string path)
{
    int fd = -1;
    int mode = 0666;
    pointer addr = (pointer)-1;
    struct areltdata *p_areltdata = (struct areltdata *)abfd->arelt_data;
    struct ar_hdr *p_hdr = arch_hdr(abfd);

    if ((fd = OPEN (path, O_RDWR|O_CREAT|O_TRUNC, mode)) != -1)
	addr = (pointer) MMAP ( 0, 
	    	    	    	p_hdr->ar_size,  
				PROT_READ|PROT_WRITE,
    	    	    	    	MAP_SHARED, 
				fd, 
				0);
	
    if (fd == -1 || addr == (pointer)-1 || FCHMOD (fd, mode) != 0 ) {
    	perror("cannot create intermediate file");
    	return -1;
    }

    CLOSE (fd);

    MEMCPY (addr, bfd_tell(abfd), p_hdr->ar_size);

    MUNMAP (addr, p_hdr->ar_size);

    return 0;

} /* extract_archive_member */
Пример #4
0
int
main(int argc, char *argv[])
{
	const int test_value = 123456;
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_realloc");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	int *test = vmem_realloc(vmp, NULL, sizeof (int));
	ASSERTne(test, NULL);

	test[0] = test_value;
	ASSERTeq(test[0], test_value);

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
	}

	test = vmem_realloc(vmp, test, sizeof (int) * 10);
	ASSERTne(test, NULL);
	ASSERTeq(test[0], test_value);
	test[1] = test_value;
	test[9] = test_value;

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
	}

	vmem_free(vmp, test);

	vmem_delete(vmp);

	DONE(NULL);
}
Пример #5
0
static void *safe_mmap(void *start, size_t length, int flags)
{
	void *result;
	
	result = MMAP(start, length, flags);
	if (result == (void *)-1) THROW_UNIX_ERROR(errno);
	
	return result;
}
Пример #6
0
/*
 * swap_mappings - given to mmapped regions swap them.
 *
 * Try swapping src and dest by unmapping src, mapping a new dest with
 * the original src address as a hint. If successful, unmap original dest.
 * Map a new src with the original dest as a hint.
 */
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
	char *d = *dest;
	char *s = *src;
	char *td, *ts;

	MUNMAP(*src, size);

	/* mmap destination using src addr as hint */
	td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

	MUNMAP(*dest, size);
	*dest = td;

	/* mmap src using original destination addr as a hint */
	ts = MMAP(d, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS,
		-1, 0);
	*src = ts;
}
Пример #7
0
static void *MMAP_A(size_t pages, size_t alignment)
{
	void *j, *p;
	size_t first_size, rest, begin, end;
	if (pages%malloc_pagesize != 0)
		pages = pages - pages%malloc_pagesize + malloc_pagesize;
	first_size = pages + alignment - malloc_pagesize;
	p = MMAP(first_size);
	rest = ((size_t)p) % alignment;
	j = (rest == 0) ? p : (void*) ((size_t)p + alignment - rest);
	begin = (size_t)j - (size_t)p;
	if (begin != 0) munmap(p, begin);
	end = (size_t)p + first_size - ((size_t)j + pages);
	if(end != 0) munmap( (void*) ((size_t)j + pages), end);

	return j;
}
Пример #8
0
void mmapRegion(uint64_t size) 
{
	uint64_t length; 
	bool mmapRegionsCoalesced = false; 
	// Make sure there's enough allocated memory.
	if (DEFAULT_NUMBER_MMAP_PAGES * sysconf(_SC_PAGE_SIZE) - 
		(sizeof(headerMmapRegion) + sizeof(blockHeader)) < size) {
		uint64_t numberPages = (uint64_t) (size / sysconf(_SC_PAGE_SIZE)) + 1;
		length = numberPages * sysconf(_SC_PAGE_SIZE);
	} else {
		length = DEFAULT_NUMBER_MMAP_PAGES * sysconf(_SC_PAGE_SIZE);
	}

	newMmapRegion = MMAP(length); // TODO: check for errors
	if (newMmapRegion == MAP_FAILED) {
		fprintf(stderr, "memoryManagement.mmapRegion - Memory overflow\n");
		exit(-1);
	}

	// 	Header Mmap region
	struct headerMmapRegion *headerMmap = INIT_STRUCT(headerMmapRegion, newMmapRegion);
	if (!mmapsList) {
		NEXT_MMAP_ADDRESS(headerMmap) = NULL;
	} else if (coalesceMmapRegions(mmapsList, newMmapRegion, length)) {
			mmapRegionsCoalesced = true; 
	} else {
		NEXT_MMAP_ADDRESS(headerMmap) = mmapsList; // Add the new mmap at front.
	}

	if(!mmapRegionsCoalesced) {
		headerMmap->length = BYTES_TO_WORDS(length);
		mmapsList = newMmapRegion; // Update the pointer of mmapsList.

		setMmapFooter(newMmapRegion, length);


		// Set first free region.
		void* beginningFreeRegion = ADDRESS_PLUS_OFFSET(newMmapRegion, headerMmapRegion); 
		// header free region and footer mmap to be excluded.
		uint64_t sizeFreeRegion = length - sizeof(headerMmapRegion) - (2 * sizeof(blockHeader)); 
		initialiseFreeMmapRegion(beginningFreeRegion, sizeFreeRegion);
	}

	numberFreeBlocks++; // Add one free block to the counter.
}
Пример #9
0
struct page *region_get_mem(size_t s)
{
  size_t request_bytes;
  void *mem;
  struct page *newp;

  /* Don't get less than K * RPAGESIZE extra memory (K * RPAGESIZE
     is the minimum useful size for something on unused_pages) */
  if (s + K * RPAGESIZE < MINIMUM_MEM_REQUEST)
    request_bytes = MINIMUM_MEM_REQUEST;
  else
    request_bytes = s;

#if 0
  request_bytes = ALIGN(request_bytes, 65536);
#endif

  mem = (struct page *)MMAP(0, request_bytes+RPAGESIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
  if (!mem) { out_of_memory(); abort(); }

  VALGRIND_MALLOCLIKE_BLOCK(mem, request_bytes+RPAGESIZE, 0, 0);
  // VALGRIND_MAKE_NOACCESS(mem, request_bytes+RPAGESIZE);
  newp = PALIGN(mem, RPAGESIZE);

  VALGRIND_MAKE_WRITABLE(newp, sizeof(struct page));
  memset(newp, 0, sizeof(struct page));

  if (mem == newp) /* Maybe we were lucky! */
    request_bytes += RPAGESIZE;

  addbyaddress(newp);

  /* Add the new memory to unused_pages */
#ifndef NMEMDEBUG
  set_region_range(newp, (char *)newp + s, FREEPAGE);
#endif
  total_page_count += request_bytes >> RPAGELOG;
  newp->pagecount = request_bytes >> RPAGELOG;
  assert(newp->pagecount > 0);
  newp->free = 1;
  addfront(&unused_pages, newp);

  return newp;
}
Пример #10
0
int
main(int argc, char *argv[])
{
    START(argc, argv, "pmem_is_pmem");

    if (argc <  2 || argc > 3)
        UT_FATAL("usage: %s file [env]", argv[0]);

    if (argc == 3)
        UT_ASSERTeq(setenv("PMEM_IS_PMEM_FORCE", argv[2], 1), 0);

    int fd = OPEN(argv[1], O_RDWR);

    ut_util_stat_t stbuf;
    FSTAT(fd, &stbuf);

    Size = stbuf.st_size;
    Addr = MMAP(0, stbuf.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

    CLOSE(fd);

    pthread_t threads[NTHREAD];
    int ret[NTHREAD];

    /* kick off NTHREAD threads */
    for (int i = 0; i < NTHREAD; i++)
        PTHREAD_CREATE(&threads[i], NULL, worker, &ret[i]);

    /* wait for all the threads to complete */
    for (int i = 0; i < NTHREAD; i++)
        PTHREAD_JOIN(threads[i], NULL);

    /* verify that all the threads return the same value */
    for (int i = 1; i < NTHREAD; i++)
        UT_ASSERTeq(ret[0], ret[i]);

    UT_OUT("%d", ret[0]);

    UT_ASSERTeq(unsetenv("PMEM_IS_PMEM_FORCE"), 0);

    UT_OUT("%d", pmem_is_pmem(Addr, Size));

    DONE(NULL);
}
Пример #11
0
static struct pginfo *
alloc_pginfo(void)
{
	struct pginfo *p;
	int i;

	if (pginfo_list == NULL) {
		p = MMAP(malloc_pagesize);
		if (p == MAP_FAILED)
			return NULL;
		for (i = 0; i < malloc_pagesize / sizeof(*p); i++) {
			p[i].next = pginfo_list;
			pginfo_list = &p[i];
		}
	}
	p = pginfo_list;
	pginfo_list = p->next;
	memset(p, 0, sizeof *p);
	return p;
}
Пример #12
0
/*
 * is_zeroed -- read is_zeroed flag from header
 */
static int
is_zeroed(const char *path)
{
	int fd = OPEN(path, O_RDWR);

	struct stat stbuf;
	FSTAT(fd, &stbuf);

	void *addr = MMAP(0, stbuf.st_size, PROT_READ|PROT_WRITE,
			MAP_SHARED, fd, 0);

	struct pmemblk *header = addr;

	int ret = header->is_zeroed;

	MUNMAP(addr, stbuf.st_size);
	CLOSE(fd);

	return ret;
}
Пример #13
0
/*
 * pool_test -- test pool
 *
 * This function creates a memory pool in a file (if dir is not NULL),
 * or in RAM (if dir is NULL) and allocates memory for the test.
 */
void
pool_test(const char *dir)
{
	VMEM *vmp = NULL;

	if (dir != NULL) {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
	} else {
		/* allocate memory for function vmem_create_in_region() */
		void *mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
	}

	if (expect_create_pool == 0) {
		ASSERTeq(vmp, NULL);
		DONE(NULL);
	} else {
		if (vmp == NULL) {
			if (dir == NULL) {
				FATAL("!vmem_create_in_region");
			} else {
				FATAL("!vmem_create");
			}
		}
	}

	char *test = vmem_malloc(vmp, strlen(TEST_STRING_VALUE) + 1);
	ASSERTne(test, NULL);

	strcpy(test, TEST_STRING_VALUE);
	ASSERTeq(strcmp(test, TEST_STRING_VALUE), 0);

	ASSERT(vmem_malloc_usable_size(vmp, test) > 0);

	vmem_free(vmp, test);

	vmem_delete(vmp);
}
Пример #14
0
int
main(int argc, char *argv[])
{
	VMEM *vmp;
	size_t i;

	START(argc, argv, "vmem_create_in_region");

	if (argc > 1)
		FATAL("usage: %s", argv[0]);

	/* allocate memory for function vmem_create_in_region() */
	void *mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

	vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);

	if (vmp == NULL)
		FATAL("!vmem_create_in_region");

	for (i = 0; i < TEST_ALLOCATIONS; ++i) {
		allocs[i] = vmem_malloc(vmp, sizeof (int));

		ASSERTne(allocs[i], NULL);

		/* check that pointer came from mem_pool */
		ASSERTrange(allocs[i], mem_pool, VMEM_MIN_POOL);
	}

	for (i = 0; i < TEST_ALLOCATIONS; ++i) {
		vmem_free(vmp, allocs[i]);
	}

	vmem_delete(vmp);

	DONE(NULL);
}
Пример #15
0
static int
shrink_heap(heap_info *h, long diff)
{
  long new_size;

  new_size = (long)h->size - diff;
  if(new_size < (long)sizeof(*h))
    return -1;
  /* Try to re-map the extra heap space freshly to save memory, and
     make it inaccessible. */
  if (__builtin_expect (__libc_enable_secure, 0))
    {
      if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
		      MAP_FIXED) == (char *) MAP_FAILED)
	return -2;
      h->mprotect_size = new_size;
    }
  else
    madvise ((char *)h + new_size, diff, MADV_DONTNEED);
  /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/

  h->size = new_size;
  return 0;
}
Пример #16
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "checksum");

	if (argc < 2)
		FATAL("usage: %s files...", argv[0]);

	for (int arg = 1; arg < argc; arg++) {
		int fd = OPEN(argv[arg], O_RDONLY);

		struct stat stbuf;
		FSTAT(fd, &stbuf);

		void *addr =
			MMAP(0, stbuf.st_size, PROT_READ|PROT_WRITE,
					MAP_PRIVATE, fd, 0);

		close(fd);

		uint64_t *ptr = addr;

		/*
		 * Loop through, selecting successive locations
		 * where the checksum lives in this block, and
		 * let util_checksum() insert it so it can be
		 * verified against the gold standard fletcher64
		 * routine in this file.
		 */
		while ((void *)(ptr + 1) < addr + stbuf.st_size) {
			/* save whatever was at *ptr */
			uint64_t oldval = *ptr;

			/* mess with it */
			*ptr = htole64(0x123);

			/*
			 * calc a checksum and have it installed
			 */
			util_checksum(addr, stbuf.st_size, ptr, 1);

			uint64_t csum = *ptr;

			/*
			 * verify inserted checksum checks out
			 */
			ASSERT(util_checksum(addr, stbuf.st_size, ptr, 0));

			/* put a zero where the checksum was installed */
			*ptr = 0;

			/* calculate a checksum */
			uint64_t gold_csum = fletcher64(addr, stbuf.st_size);

			/* put the old value back */
			*ptr = oldval;

			/*
			 * verify checksum now fails
			 */
			ASSERT(!util_checksum(addr, stbuf.st_size, ptr, 0));

			/*
			 * verify the checksum matched the gold version
			 */
			ASSERTeq(csum, gold_csum);

			OUT("%s:%lu 0x%lx", argv[arg],
				(void *)ptr - addr, csum);

			ptr++;
		}
	}

	DONE(NULL);
}
Пример #17
0
internal_function
new_heap (size_t size, size_t top_pad)
{
  size_t page_mask = GLRO (dl_pagesize) - 1;
  char *p1, *p2;
  unsigned long ul;
  heap_info *h;

  if (size + top_pad < HEAP_MIN_SIZE)
    size = HEAP_MIN_SIZE;
  else if (size + top_pad <= HEAP_MAX_SIZE)
    size += top_pad;
  else if (size > HEAP_MAX_SIZE)
    return 0;
  else
    size = HEAP_MAX_SIZE;
  size = (size + page_mask) & ~page_mask;

  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
     No swap space needs to be reserved for the following large
     mapping (on Linux, this is the case for all non-writable mappings
     anyway). */
  p2 = MAP_FAILED;
  if (aligned_heap_area)
    {
      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
                          MAP_NORESERVE);
      aligned_heap_area = NULL;
      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
        {
          munmap (p2, HEAP_MAX_SIZE);
          p2 = MAP_FAILED;
        }
    }
  if (p2 == MAP_FAILED)
    {
      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
      if (p1 != MAP_FAILED)
        {
          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
                         & ~(HEAP_MAX_SIZE - 1));
          ul = p2 - p1;
          if (ul)
            munmap (p1, ul);
          else
            aligned_heap_area = p2 + HEAP_MAX_SIZE;
          munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
        }
      else
        {
          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
             is already aligned. */
          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
          if (p2 == MAP_FAILED)
            return 0;

          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
            {
              munmap (p2, HEAP_MAX_SIZE);
              return 0;
            }
        }
    }
  if (mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
    {
      munmap (p2, HEAP_MAX_SIZE);
      return 0;
    }
  h = (heap_info *) p2;
  h->size = size;
  h->mprotect_size = size;
  /* LIBC_PROBE (memory_heap_new, 2, h, h->size); */
  return h;
}
Пример #18
0
/*
 * Allocate a number of pages from the OS
 */
static void *
map_pages(size_t pages)
{
	struct pdinfo	*pi, *spi;
	struct pginfo	**pd;
	u_long		idx, pidx, lidx;
	caddr_t		result, tail;
	u_long		index, lindex;
	void 		*pdregion = NULL;
	size_t		dirs, cnt;

	pages <<= malloc_pageshift;
	result = MMAP(pages + malloc_guard);
	if (result == MAP_FAILED) {
#ifdef MALLOC_EXTRA_SANITY
		wrtwarning("(ES): map_pages fails");
#endif /* MALLOC_EXTRA_SANITY */
		errno = ENOMEM;
		return (NULL);
	}
	index = ptr2index(result);
	tail = result + pages + malloc_guard;
	lindex = ptr2index(tail) - 1;
	if (malloc_guard)
		mprotect(result + pages, malloc_guard, PROT_NONE);

	pidx = PI_IDX(index);
	lidx = PI_IDX(lindex);

	if (tail > malloc_brk) {
		malloc_brk = tail;
		last_index = lindex;
	}

	dirs = lidx - pidx;

	/* Insert directory pages, if needed. */
	if (pdir_lookup(index, &pi) != 0)
		dirs++;
	if (dirs > 0) {
		pdregion = MMAP(malloc_pagesize * dirs);
		if (pdregion == MAP_FAILED) {
			munmap(result, tail - result);
#ifdef MALLOC_EXTRA_SANITY
		wrtwarning("(ES): map_pages fails");
#endif
			errno = ENOMEM;
			return (NULL);
		}
	}
	cnt = 0;
	for (idx = pidx, spi = pi; idx <= lidx; idx++) {
		if (pi == NULL || PD_IDX(pi->dirnum) != idx) {
			pd = (struct pginfo **)((char *)pdregion +
			    cnt * malloc_pagesize);
			cnt++;
			memset(pd, 0, malloc_pagesize);
			pi = (struct pdinfo *) ((caddr_t) pd + pdi_off);
			pi->base = pd;
			pi->prev = spi;
			pi->next = spi->next;
			pi->dirnum = idx * (malloc_pagesize /
			    sizeof(struct pginfo *));

			if (spi->next != NULL)
				spi->next->prev = pi;
			spi->next = pi;
		}
		if (idx > pidx && idx < lidx) {
			pi->dirnum += pdi_mod;
		} else if (idx == pidx) {
			if (pidx == lidx) {
				pi->dirnum += (u_long)(tail - result) >>
				    malloc_pageshift;
			} else {
				pi->dirnum += pdi_mod - PI_OFF(index);
			}
		} else {
Пример #19
0
buffer_mgr_t buffer_mgr_create( int fd, unicap_format_t *format )
{
    buffer_mgr_t mgr = malloc( sizeof( struct buffer_mgr ) );
    int i;

    unicap_data_buffer_init_data_t init_data = { NULL, NULL, NULL, NULL, (unicap_data_buffer_func_t)v4l2_data_buffer_unref, mgr };

    memset( mgr, 0x0, sizeof( buffer_mgr_t ) );

    if( sem_init( &mgr->lock, 0, 1 ) ) {
        TRACE( "sem_init failed\n" );
        free( mgr );
        return NULL;
    }

    mgr->fd = fd;

    struct v4l2_requestbuffers v4l2_reqbuf;
    memset( &v4l2_reqbuf, 0x0, sizeof( struct v4l2_requestbuffers ) );
    v4l2_reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    v4l2_reqbuf.memory = V4L2_MEMORY_MMAP;
    v4l2_reqbuf.count = MAX_BUFFERS;

    if( IOCTL( fd, VIDIOC_REQBUFS, &v4l2_reqbuf ) < 0 )
    {
        TRACE( "VIDIOC_REQBUFS failed: %s\n", strerror( errno ) );
        return NULL;
    }

    mgr->num_buffers = v4l2_reqbuf.count;

    for( i = 0; i < v4l2_reqbuf.count; i++ ) {
        memset( &mgr->buffers[i], 0x0, sizeof( v4l2cpi_buffer_t ) );
        unicap_data_buffer_init (&mgr->buffers[i].data_buffer, format, &init_data);
        unicap_data_buffer_ref (&mgr->buffers[i].data_buffer);

        mgr->buffers[i].v4l2_buffer.type = v4l2_reqbuf.type;
        mgr->buffers[i].v4l2_buffer.memory = V4L2_MEMORY_MMAP;
        mgr->buffers[i].v4l2_buffer.index = i;

        if( IOCTL( mgr->fd, VIDIOC_QUERYBUF, &mgr->buffers[i].v4l2_buffer ) < 0 ) {
            TRACE( "VIDIOC_QUERYBUF ioctl failed: %s, index = %d\n", strerror( errno ), i );
            // TODO: Cleanup
            return NULL;
        }


        mgr->buffers[i].length = mgr->buffers[i].v4l2_buffer.length;
        mgr->buffers[i].start = MMAP( NULL,
                                      mgr->buffers[i].length,
                                      PROT_READ | PROT_WRITE,
                                      MAP_SHARED,
                                      fd,
                                      mgr->buffers[i].v4l2_buffer.m.offset );
        if( mgr->buffers[i].start == MAP_FAILED ) {
            TRACE( "MMAP Failed: %s, index = %d\n", strerror( errno ), i );
            // TODO: Cleanup
            return NULL;
        }

        mgr->buffers[i].data_buffer.buffer_size = mgr->buffers[i].v4l2_buffer.length;
        mgr->buffers[i].data_buffer.data = mgr->buffers[i].start;

        mgr->free_buffers++;
    }

    return mgr;
}
Пример #20
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "checksum");

	if (argc < 2)
		UT_FATAL("usage: %s files...", argv[0]);

	for (int arg = 1; arg < argc; arg++) {
		int fd = OPEN(argv[arg], O_RDONLY);

		os_stat_t stbuf;
		FSTAT(fd, &stbuf);
		size_t size = (size_t)stbuf.st_size;

		void *addr =
			MMAP(NULL, size, PROT_READ|PROT_WRITE,
					MAP_PRIVATE, fd, 0);

		uint64_t *ptr = addr;

		/*
		 * Loop through, selecting successive locations
		 * where the checksum lives in this block, and
		 * let util_checksum() insert it so it can be
		 * verified against the gold standard fletcher64
		 * routine in this file.
		 */
		while ((char *)(ptr + 1) < (char *)addr + size) {
			/* save whatever was at *ptr */
			uint64_t oldval = *ptr;

			/* mess with it */
			*ptr = htole64(0x123);

			/*
			 * calculate a checksum and have it installed
			 */
			util_checksum(addr, size, ptr, 1, 0);

			uint64_t csum = *ptr;

			/*
			 * verify inserted checksum checks out
			 */
			UT_ASSERT(util_checksum(addr, size, ptr, 0, 0));

			/* put a zero where the checksum was installed */
			*ptr = 0;

			/* calculate a checksum */
			uint64_t gold_csum = fletcher64(addr, size);

			/* put the old value back */
			*ptr = oldval;

			/*
			 * verify checksum now fails
			 */
			UT_ASSERT(!util_checksum(addr, size, ptr,
					0, 0));

			/*
			 * verify the checksum matched the gold version
			 */
			UT_ASSERTeq(csum, gold_csum);
			UT_OUT("%s:%" PRIu64 " 0x%" PRIx64, argv[arg],
				(char *)ptr - (char *)addr, csum);

			ptr++;
		}

		uint64_t *addr2 =
			MMAP(NULL, size, PROT_READ|PROT_WRITE,
				MAP_PRIVATE, fd, 0);

		uint64_t *csum = (uint64_t *)addr;

		/*
		 * put a zero where the checksum will be installed
		 * in the second map
		 */
		*addr2 = 0;
		for (size_t i = size / 8 - 1; i > 0; i -= 1) {
			/* calculate a checksum and have it installed */
			util_checksum(addr, size, csum, 1, i * 8);

			/*
			 * put a zero in the second map where an ignored part is
			 */
			*(addr2 + i) = 0;

			/* calculate a checksum */
			uint64_t gold_csum = fletcher64(addr2, size);
			/*
			 * verify the checksum matched the gold version
			 */
			UT_ASSERTeq(*csum, gold_csum);
		}

		CLOSE(fd);
		MUNMAP(addr, size);
		MUNMAP(addr2, size);

	}

	DONE(NULL);
}
Пример #21
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "vmem_delete");

	VMEM *vmp;
	void *ptr;

	if (argc < 2)
		FATAL("usage: %s op:h|f|m|c|r|a|s|d", argv[0]);

	/* allocate memory for function vmem_create_in_region() */
	void *mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

	vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
	if (vmp == NULL)
		FATAL("!vmem_create_in_region");

	ptr = vmem_malloc(vmp, sizeof (long long int));
	if (ptr == NULL)
		ERR("!vmem_malloc");
	vmem_delete(vmp);

	/* arrange to catch SEGV */
	struct sigaction v;
	sigemptyset(&v.sa_mask);
	v.sa_flags = 0;
	v.sa_handler = signal_handler;
	SIGACTION(SIGSEGV, &v, NULL);

	/* go through all arguments one by one */
	for (int arg = 1; arg < argc; arg++) {
		/* Scan the character of each argument. */
		if (strchr("hfmcrasd", argv[arg][0]) == NULL ||
				argv[arg][1] != '\0')
			FATAL("op must be one of: h, f, m, c, r, a, s, d");

		switch (argv[arg][0]) {
		case 'h':
			OUT("Testing vmem_check...");
			if (!sigsetjmp(Jmp, 1)) {
				OUT("\tvmem_check returned %i",
							vmem_check(vmp));
			}
			break;

		case 'f':
			OUT("Testing vmem_free...");
			if (!sigsetjmp(Jmp, 1)) {
				vmem_free(vmp, ptr);
				OUT("\tvmem_free succeeded");
			}
			break;

		case 'm':
			OUT("Testing vmem_malloc...");
			if (!sigsetjmp(Jmp, 1)) {
				ptr = vmem_malloc(vmp, sizeof (long long int));
				if (ptr != NULL)
					OUT("\tvmem_malloc succeeded");
				else
					OUT("\tvmem_malloc returned NULL");
			}
			break;

		case 'c':
			OUT("Testing vmem_calloc...");
			if (!sigsetjmp(Jmp, 1)) {
				ptr = vmem_calloc(vmp, 10, sizeof (int));
				if (ptr != NULL)
					OUT("\tvmem_calloc succeeded");
				else
					OUT("\tvmem_calloc returned NULL");
			}
			break;

		case 'r':
			OUT("Testing vmem_realloc...");
			if (!sigsetjmp(Jmp, 1)) {
				ptr = vmem_realloc(vmp, ptr, 128);
				if (ptr != NULL)
					OUT("\tvmem_realloc succeeded");
				else
					OUT("\tvmem_realloc returned NULL");
			}
			break;

		case 'a':
			OUT("Testing vmem_aligned_alloc...");
			if (!sigsetjmp(Jmp, 1)) {
				ptr = vmem_aligned_alloc(vmp, 128, 128);
				if (ptr != NULL)
					OUT("\tvmem_aligned_alloc succeeded");
				else
					OUT("\tvmem_aligned_alloc"
							" returned NULL");
			}
			break;

		case 's':
			OUT("Testing vmem_strdup...");
			if (!sigsetjmp(Jmp, 1)) {
				ptr = vmem_strdup(vmp, "Test string");
				if (ptr != NULL)
					OUT("\tvmem_strdup succeeded");
				else
					OUT("\tvmem_strdup returned NULL");
			}
			break;

		case 'd':
			OUT("Testing vmem_delete...");
			if (!sigsetjmp(Jmp, 1)) {
				vmem_delete(vmp);
				OUT("\tvmem_delete succeeded");
			}
			break;
		}
	}

	DONE(NULL);
}
Пример #22
0
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_check_allocations");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	size_t object_size;
	for (object_size = 8; object_size <= TEST_MAX_ALLOCATION_SIZE;
							object_size *= 2) {
		size_t i;
		size_t j;

		if (dir == NULL) {
			mem_pool = MMAP(NULL, VMEM_MIN_POOL,
					PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

			vmp = vmem_pool_create_in_region(mem_pool,
				VMEM_MIN_POOL);
			if (vmp == NULL)
				FATAL("!vmem_pool_create_in_region");
		} else {
			vmp = vmem_pool_create(dir, VMEM_MIN_POOL);
			if (vmp == NULL)
				FATAL("!vmem_pool_create");
		}

		memset(allocs, 0, TEST_ALLOCS_SIZE);

		for (i = 0; i < TEST_ALLOCS_SIZE; ++i) {
			allocs[i] =  vmem_malloc(vmp, object_size);
			if (allocs[i] == NULL) {
				/* out of memory in pool */
				break;
			}

			/* check that pointer came from mem_pool */
			if (dir == NULL) {
				ASSERTrange(allocs[i],
					mem_pool, VMEM_MIN_POOL);
			}

			/* fill each allocation with a unique value */
			memset(allocs[i], (char)i, object_size);
		}

		ASSERT((i > 0) && (i + 1 < TEST_MAX_ALLOCATION_SIZE));

		/* check for unexpected modifications of the data */
		for (i = 0; i < TEST_ALLOCS_SIZE && allocs[i] != NULL; ++i) {
			char *buffer = allocs[i];
			for (j = 0; j < object_size; ++j) {
				if (buffer[j] != (char)i)
					FATAL("Content of data object was "
						"modified unexpectedly for "
						"object size: %zu, id: %zu",
						object_size, j);
			}
		}

		vmem_pool_delete(vmp);
	}

	DONE(NULL);
}
Пример #23
0
int
main(int argc, char *argv[])
{
	int fd;
	struct stat stbuf;
	void *dest;
	void *src;
	off_t dest_off = 0;
	off_t src_off = 0;
	uint64_t bytes = 0;
	int who = 0;
	off_t overlap = 0;

	START(argc, argv, "pmem_memmove");

	fd = OPEN(argv[1], O_RDWR);
	FSTAT(fd, &stbuf);

	if (argc < 3)
		USAGE();

	for (int arg = 2; arg < argc; arg++) {
		if (strchr("dsboS",
		    argv[arg][0]) == NULL || argv[arg][1] != ':')
			FATAL("op must be d: or s: or b: or o: or S:");

		off_t val = strtoul(&argv[arg][2], NULL, 0);

		switch (argv[arg][0]) {
		case 'd':
			if (val <= 0)
				FATAL("bad offset (%lu) with d: option", val);
			dest_off = val;
			break;

		case 's':
			if (val <= 0)
				FATAL("bad offset (%lu) with s: option", val);
			src_off = val;
			break;

		case 'b':
			if (val <= 0)
				FATAL("bad length (%lu) with b: option", val);
			bytes = val;
			break;

		case 'o':
			if (val != 1 && val != 2)
				FATAL("bad val (%lu) with o: option", val);
			who = (int)val;
			break;

		case 'S':
			overlap = val;
			break;
		}
	}

	if (who == 0 && overlap != 0)
		USAGE();

	/* for overlap the src and dest must be created differently */
	if (who == 0) {
		/* src > dest */
		dest = pmem_map(fd);
		if (dest == NULL)
			FATAL("!could not mmap dest file %s", argv[1]);

		src = MMAP(dest + stbuf.st_size, stbuf.st_size,
			PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
			-1, 0);
		/*
		 * Its very unlikely that src would not be > dest. pmem_map
		 * chooses the first unused address >= 1TB, large
		 * enough to hold the give range, and 1GB aligned. Log
		 * the error if the mapped addresses cannot be swapped
		 * but allow the test to continue.
		 */
		if (src <= dest) {
			swap_mappings(&dest, &src, stbuf.st_size, fd);
			if (src <= dest)
				ERR("cannot map files in memory order");
		}

		do_memmove(fd, dest, src, argv[1], dest_off, src_off,
			0, bytes);

		/* dest > src */
		swap_mappings(&dest, &src, stbuf.st_size, fd);

		if (dest <= src)
			ERR("cannot map files in memory order");

		do_memmove(fd, dest, src, argv[1], dest_off, src_off, 0,
			bytes);
		MUNMAP(dest, stbuf.st_size);
		MUNMAP(src, stbuf.st_size);
	} else if (who == 1) {
		/* src overlap with dest */
		dest = pmem_map(fd);
		if (dest == NULL)
			FATAL("!Could not mmap %s: \n", argv[1]);

		src = dest + overlap;
		memset(dest, 0, bytes);
		do_memmove(fd, dest, src, argv[1], dest_off, src_off,
			overlap, bytes);
		MUNMAP(dest, stbuf.st_size);
	} else {
		/* dest overlap with src */
		dest = pmem_map(fd);
		if (dest == NULL) {
			FATAL("!Could not mmap %s: \n", argv[1]);
		}
		src = dest;
		dest = src + overlap;
		memset(src, 0, bytes);
		do_memmove(fd, dest, src, argv[1], dest_off, src_off,
			overlap, bytes);
		MUNMAP(src, stbuf.st_size);
	}

	CLOSE(fd);

	DONE(NULL);
}
Пример #24
0
/*
 * Alias ld.so entry point -- receives a bootstrap structure and a vector
 * of strings.  The vector is "well-known" to us, and consists of pointers
 * to string constants.  This aliasing bootstrap requires no relocation in
 * order to run, save for the pointers of constant strings.  This second
 * parameter provides this.  Note that this program is carefully coded in
 * order to maintain the "no bootstrapping" requirement -- it calls only
 * local functions, uses no intrinsics, etc.
 */
void *
__rtld(Elf32_Boot *ebp, const char *strings[], int (*funcs[])())
{
	int i, j, p;			/* working */
	int page_size = 0;		/* size of a page */
	const char *program_name = EMPTY; /* our name */
	int ldfd;			/* fd assigned to ld.so */
	int dzfd = 0;			/* fd assigned to /dev/zero */
	Elf32_Ehdr *ehdr;		/* ELF header of ld.so */
	Elf32_Phdr *phdr;		/* first Phdr in file */
	Elf32_Phdr *pptr;		/* working Phdr */
	Elf32_Phdr *lph;		/* last loadable Phdr */
	Elf32_Phdr *fph = 0;		/* first loadable Phdr */
	caddr_t	maddr;			/* pointer to mapping claim */
	Elf32_Off mlen;			/* total mapping claim */
	caddr_t faddr;			/* first program mapping of ld.so */
	Elf32_Off foff;			/* file offset for segment mapping */
	Elf32_Off flen;			/* file length for segment mapping */
	caddr_t addr;			/* working mapping address */
	caddr_t zaddr;			/* /dev/zero working mapping addr */
	struct stat sb;			/* stat buffer for sizing */
	auxv_t *ap;			/* working aux pointer */

	/*
	 * Discover things about our environment: auxiliary vector (if
	 * any), arguments, program name, and the like.
	 */
	while (ebp->eb_tag != NULL) {
		switch (ebp->eb_tag) {
		case EB_ARGV:
			program_name = *((char **)ebp->eb_un.eb_ptr);
			break;
		case EB_AUXV:
			for (ap = (auxv_t *)ebp->eb_un.eb_ptr;
			    ap->a_type != AT_NULL; ap++)
				if (ap->a_type == AT_PAGESZ) {
					page_size = ap->a_un.a_val;
					break;
				}
			break;
		}
		ebp++;
	}

	/*
	 * If we didn't get a page size from looking in the auxiliary
	 * vector, we need to get one now.
	 */
	if (page_size == 0) {
		page_size = SYSCONFIG(_CONFIG_PAGESIZE);
		ebp->eb_tag = EB_PAGESIZE, (ebp++)->eb_un.eb_val =
		    (Elf32_Word)page_size;
	}

	/*
	 * Map in the real ld.so.  Note that we're mapping it as
	 * an ELF database, not as a program -- we just want to walk it's
	 * data structures.  Further mappings will actually establish the
	 * program in the address space.
	 */
	if ((ldfd = OPEN(LDSO, O_RDONLY)) == -1)
		PANIC(program_name);
/* NEEDSWORK (temp kludge to use xstat so we can run on G6) */
	if (FSTAT(2, ldfd, &sb) == -1)
		PANIC(program_name);
	ehdr = (Elf32_Ehdr *)MMAP(0, sb.st_size, PROT_READ | PROT_EXEC,
	    MAP_SHARED, ldfd, 0);
	if (ehdr == (Elf32_Ehdr *)-1)
		PANIC(program_name);

	/*
	 * Validate the file we're looking at, ensure it has the correct
	 * ELF structures, such as: ELF magic numbers, coded for 386,
	 * is a ".so", etc.
	 */
	if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
	    ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
	    ehdr->e_ident[EI_MAG3] != ELFMAG3)
		PANIC(program_name);
	if (ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
		PANIC(program_name);
	if (ehdr->e_type != ET_DYN)
		PANIC(program_name);
	if (ehdr->e_machine != EM_386)
		PANIC(program_name);
	if (ehdr->e_version > EV_CURRENT)
		PANIC(program_name);

	/*
	 * Point at program headers and start figuring out what to load.
	 */
	phdr = (Elf32_Phdr *)((caddr_t)ehdr + ehdr->e_phoff);
	for (p = 0, pptr = phdr; p < (int)ehdr->e_phnum; p++,
	    pptr = (Elf32_Phdr *)((caddr_t)pptr + ehdr->e_phentsize))
		if (pptr->p_type == PT_LOAD) {
			if (fph == 0) {
				fph = pptr;
			} else if (pptr->p_vaddr <= lph->p_vaddr)
				PANIC(program_name);
			lph = pptr;
		}

	/*
	 * We'd better have at least one loadable segment.
	 */
	if (fph == 0)
		PANIC(program_name);

	/*
	 * Map enough address space to hold the program (as opposed to the
	 * file) represented by ld.so.  The amount to be assigned is the
	 * range between the end of the last loadable segment and the
	 * beginning of the first PLUS the alignment of the first segment.
	 * mmap() can assign us any page-aligned address, but the relocations
	 * assume the alignments included in the program header.  As an
	 * optimization, however, let's assume that mmap() will actually
	 * give us an aligned address -- since if it does, we can save
	 * an munmap() later on.  If it doesn't -- then go try it again.
	 */
	mlen = ROUND((lph->p_vaddr + lph->p_memsz) -
	    ALIGN(fph->p_vaddr, page_size), page_size);
	maddr = (caddr_t)MMAP(0, mlen, PROT_READ | PROT_EXEC,
	    MAP_SHARED, ldfd, 0);
	if (maddr == (caddr_t)-1)
		PANIC(program_name);
	faddr = (caddr_t)ROUND(maddr, fph->p_align);

	/*
	 * Check to see whether alignment skew was really needed.
	 */
	if (faddr != maddr) {
		(void) MUNMAP(maddr, mlen);
		mlen = ROUND((lph->p_vaddr + lph->p_memsz) -
		    ALIGN(fph->p_vaddr, fph->p_align) + fph->p_align,
		    page_size);
		maddr = (caddr_t)MMAP(0, mlen, PROT_READ | PROT_EXEC,
		    MAP_SHARED, ldfd, 0);
		if (maddr == (caddr_t)-1)
			PANIC(program_name);
		faddr = (caddr_t)ROUND(maddr, fph->p_align);
	}

	/*
	 * We have the address space reserved, so map each loadable segment.
	 */
	for (p = 0, pptr = phdr; p < (int)ehdr->e_phnum; p++,
	    pptr = (Elf32_Phdr *)((caddr_t)pptr + ehdr->e_phentsize)) {

		/*
		 * Skip non-loadable segments or segments that don't occupy
		 * any memory.
		 */
		if ((pptr->p_type != PT_LOAD) || (pptr->p_memsz == 0))
			continue;

		/*
		 * Determine the file offset to which the mapping will
		 * directed (must be aligned) and how much to map (might
		 * be more than the file in the case of .bss.)
		 */
		foff = ALIGN(pptr->p_offset, page_size);
		flen = pptr->p_memsz + (pptr->p_offset - foff);

		/*
		 * Set address of this segment relative to our base.
		 */
		addr = (caddr_t)ALIGN(faddr + pptr->p_vaddr, page_size);

		/*
		 * If this is the first program header, record our base
		 * address for later use.
		 */
		if (pptr == phdr) {
			ebp->eb_tag = EB_LDSO_BASE;
			(ebp++)->eb_un.eb_ptr = (Elf32_Addr)addr;
		}

		/*
		 * Unmap anything from the last mapping address to this
		 * one.
		 */
		if (addr - maddr) {
			(void) MUNMAP(maddr, addr - maddr);
			mlen -= addr - maddr;
		}

		/*
		 * Determine the mapping protection from the section
		 * attributes.
		 */
		i = 0;
		if (pptr->p_flags & PF_R)
			i |= PROT_READ;
		if (pptr->p_flags & PF_W)
			i |= PROT_WRITE;
		if (pptr->p_flags & PF_X)
			i |= PROT_EXEC;
		if ((caddr_t)MMAP((caddr_t)addr, flen, i,
		    MAP_FIXED | MAP_PRIVATE, ldfd, foff) == (caddr_t)-1)
			PANIC(program_name);

		/*
		 * If the memory occupancy of the segment overflows the
		 * definition in the file, we need to "zero out" the
		 * end of the mapping we've established, and if necessary,
		 * map some more space from /dev/zero.
		 */
		if (pptr->p_memsz > pptr->p_filesz) {
			foff = (int)faddr + pptr->p_vaddr + pptr->p_filesz;
			zaddr = (caddr_t)ROUND(foff, page_size);
			for (j = 0; j < (int)(zaddr - foff); j++)
				*((char *)foff + j) = 0;
			j = (faddr + pptr->p_vaddr + pptr->p_memsz) - zaddr;
			if (j > 0) {
				if (dzfd == 0) {
					dzfd = OPEN(ZERO, O_RDWR);
					if (dzfd == -1)
						PANIC(program_name);
				}
				if ((caddr_t)MMAP((caddr_t)zaddr, j, i,
				    MAP_FIXED | MAP_PRIVATE, dzfd,
				    0) == (caddr_t)-1)
					PANIC(program_name);
			}
		}

		/*
		 * Update the mapping claim pointer.
		 */
		maddr = addr + ROUND(flen, page_size);
		mlen -= maddr - addr;
	}

	/*
	 * Unmap any final reservation.
	 */
	if (mlen > 0)
		(void) MUNMAP(maddr, mlen);

	/*
	 * Clean up file descriptor space we've consumed.  Pass along
	 * the /dev/zero file descriptor we got -- every cycle counts.
	 */
	(void) CLOSE(ldfd);
	if (dzfd != 0)
		ebp->eb_tag = EB_DEVZERO, (ebp++)->eb_un.eb_val = dzfd;

	ebp->eb_tag = EB_NULL, ebp->eb_un.eb_val = 0;

	/* The two bytes before _rt_boot is for the alias entry point */
	return (void *) (ehdr->e_entry + faddr - 2);
}
Пример #25
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "obj_direct");

	if (argc != 3)
		UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);

	int npools = atoi(argv[2]);
	const char *dir = argv[1];
	int r;

	PMEMobjpool *pops[npools];
	void *guard_after[npools];

	char path[MAX_PATH_LEN];
	for (int i = 0; i < npools; ++i) {
		snprintf(path, MAX_PATH_LEN, "%s/testfile%d", dir, i);
		pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
				S_IWUSR | S_IRUSR);

		/*
		 * Reserve a page after the pool for address checks, if it
		 * doesn't map precisely at that address - it's OK.
		 */
		guard_after[i] =
			MMAP((char *)pops[i] + PMEMOBJ_MIN_POOL, Ut_pagesize,
				PROT_READ | PROT_WRITE,
				MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);

		UT_ASSERTne(guard_after[i], NULL);

		if (pops[i] == NULL)
			UT_FATAL("!pmemobj_create");
	}

	PMEMoid oids[npools];

	for (int i = 0; i < npools; ++i) {
		r = pmemobj_alloc(pops[i], &oids[i], ALLOC_SIZE, 1, NULL, NULL);
		UT_ASSERTeq(r, 0);
	}

	PMEMoid invalid = {123, 321};

	UT_ASSERTeq(pmemobj_pool_by_oid(OID_NULL), NULL);
	UT_ASSERTeq(pmemobj_pool_by_oid(invalid), NULL);

	for (int i = 0; i < npools; ++i) {
		UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]);
	}

	UT_ASSERTeq(pmemobj_pool_by_ptr(NULL), NULL);
	UT_ASSERTeq(pmemobj_pool_by_ptr((void *)0xCBA), NULL);

	for (int i = 0; i < npools; ++i) {
		void *before_pool = (char *)pops[i] - 1;
		void *after_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL + 1;
		void *edge = (char *)pops[i] + PMEMOBJ_MIN_POOL;
		void *middle = (char *)pops[i] + (PMEMOBJ_MIN_POOL / 2);
		void *in_oid = (char *)pmemobj_direct(oids[i]) +
			(ALLOC_SIZE / 2);
		UT_ASSERTeq(pmemobj_pool_by_ptr(before_pool), NULL);
		UT_ASSERTeq(pmemobj_pool_by_ptr(after_pool), NULL);
		UT_ASSERTeq(pmemobj_pool_by_ptr(edge), NULL);
		UT_ASSERTeq(pmemobj_pool_by_ptr(middle), pops[i]);
		UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), pops[i]);
		pmemobj_close(pops[i]);
		UT_ASSERTeq(pmemobj_pool_by_ptr(middle), NULL);
		UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), NULL);

		MUNMAP(guard_after[i], Ut_pagesize);
	}

	DONE(NULL);
}
Пример #26
0
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	VMEM *vmp;
	START(argc, argv, "vmem_freespace");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_pool_create_in_region() */
		void *mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

		vmp = vmem_pool_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_pool_create_in_region");
	} else {
		vmp = vmem_pool_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_pool_create");
	}

	size_t total_space = vmem_pool_freespace(vmp);
	size_t free_space = total_space;

	/* allocate all memory */
	void *prev = NULL;
	void **next;
	while ((next = vmem_malloc(vmp, 128)) != NULL) {
		*next = prev;
		prev = next;
		size_t space = vmem_pool_freespace(vmp);
		/* free space can only decrease */
		ASSERT(space <= free_space);
		free_space = space;
	}

	ASSERTne(prev, NULL);
	/* for small allocations use all memory */
	ASSERTeq(free_space, 0);

	while (prev != NULL) {
		void **act = prev;
		prev = *act;
		vmem_free(vmp, act);
		size_t space = vmem_pool_freespace(vmp);
		/* free space can only increase */
		ASSERT(space >= free_space);
		free_space = space;
	}

	free_space = vmem_pool_freespace(vmp);

	/*
	 * Depending on the distance of the 'mem_pool' from the
	 * chunk alignment (4MB) a different size of free memory
	 * will be wasted on base_alloc inside jemalloc.
	 * Rest of the internal data should not waste more than 10% of space.
	 */
	ASSERT(free_space > ((total_space - 4L * MB) * 9) / 10);

	vmem_pool_delete(vmp);

	DONE(NULL);
}
Пример #27
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "pmem_movnt_align");

	if (argc != 2)
		FATAL("usage: %s type", argv[0]);

	char type = argv[1][0];

	/*
	 * mmap twice the number of bytes to transfer and
	 * separators after each region
	 */
	size_t mmap_size = N_BYTES * 2 + SEPARATOR * 2;

	void *buff = MMAP(NULL, mmap_size,
			PROT_READ|PROT_WRITE,
			MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
	if (buff == NULL)
		FATAL("!mmap");


	void *src;
	void *dst;
	size_t s;
	switch (type) {
	case 'C': /* memcpy */
		src = buff + N_BYTES + SEPARATOR;
		dst = buff;

		/* unmap separators */
		MUNMAP(dst + N_BYTES, SEPARATOR);
		MUNMAP(src + N_BYTES, SEPARATOR);


		/* check memcpy with 0 size */
		check_memcpy(dst, src, 0);

		/* check memcpy with unaligned size */
		for (s = 0; s < CACHELINE; s++)
			check_memcpy(dst, src, N_BYTES - s);

		/* check memcpy with unaligned begin */
		for (s = 0; s < CACHELINE; s++)
			check_memcpy(dst + s, src, N_BYTES - s);

		/* check memcpy with unaligned begin and end */
		for (s = 0; s < CACHELINE; s++)
			check_memcpy(dst + s, src + s, N_BYTES - 2 * s);

		break;
	case 'B': /* memmove backward */
		MUNMAP(buff, SEPARATOR);
		MUNMAP(buff + 2 * N_BYTES, 2 * SEPARATOR);
		src = buff + SEPARATOR;
		dst = buff + N_BYTES;


		/* check memmove in backward direction with 0 size */
		check_memmove(dst, src, 0);

		/* check memmove in backward direction with unaligned size */
		for (s = 0; s < CACHELINE; s++)
			check_memmove(dst, src, N_BYTES - s);

		/* check memmove in backward direction with unaligned begin */
		for (s = 0; s < CACHELINE; s++)
			check_memcpy(dst + s, src, N_BYTES - s);

		/*
		 * check memmove in backward direction with unaligned begin
		 * and end
		 */
		for (s = 0; s < CACHELINE; s++)
			check_memmove(dst + s, src + s, N_BYTES - 2 * s);

		break;
	case 'F': /* memmove forward */
		MUNMAP(buff, SEPARATOR);
		MUNMAP(buff + 2 * N_BYTES, 2 * SEPARATOR);
		src = buff + N_BYTES;
		dst = buff + SEPARATOR;

		/* check memmove in forward direction with 0 size */
		check_memmove(dst, src, 0);

		/* check memmove in forward direction with unaligned size */
		for (s = 0; s < CACHELINE; s++)
			check_memmove(dst, src, N_BYTES - s);

		/* check memmove in forward direction with unaligned begin */
		for (s = 0; s < CACHELINE; s++)
			check_memcpy(dst + s, src, N_BYTES - s);

		/*
		 * check memmove in forward direction with unaligned begin
		 * and end
		 */
		for (s = 0; s < CACHELINE; s++)
			check_memmove(dst + s, src + s, N_BYTES - 2 * s);

		break;
	case 'S': /* memset */
		MUNMAP(buff, SEPARATOR);
		MUNMAP(buff + N_BYTES + SEPARATOR, N_BYTES + SEPARATOR);
		dst = buff + SEPARATOR;

		/* check memset with 0 size */
		check_memset(dst, 0);

		/* check memset with unaligned size */
		for (s = 0; s < CACHELINE; s++)
			check_memset(dst, N_BYTES - s);

		/* check memset with unaligned begin */
		for (s = 0; s < CACHELINE; s++)
			check_memset(dst + s, N_BYTES - s);

		/* check memset with unaligned begin and end */
		for (s = 0; s < CACHELINE; s++)
			check_memset(dst + s, N_BYTES - 2 * s);

		break;
	default:
		FATAL("!wrong type of test");
		break;
	}

	MUNMAP(buff,  mmap_size);

	DONE(NULL);
}