Esempio n. 1
0
/* create the ring */
struct odp_ring *odp_ring_create(const char *name, unsigned count,
				 int socket_id, unsigned flags)
{
	char mz_name[ODP_MEMZONE_NAMESIZE];
	struct odp_ring *r;
	struct odp_tailq_entry *te;
	const struct odp_mm_district *mz;
	ssize_t ring_size;
	int mz_flags = 0;
	struct odp_ring_list *ring_list = NULL;

	ring_list = ODP_TAILQ_CAST(odp_ring_tailq.head, odp_ring_list);

	ring_size = odp_ring_get_memsize(count);
	if (ring_size < 0) {
		odp_err = ring_size;
		return NULL;
	}

	te = malloc(sizeof(*te));
	if (te == NULL) {
		ODP_PRINT("Cannot reserve memory for tailq\n");
		odp_err = ENOMEM;
		return NULL;
	}

	snprintf(mz_name, sizeof(mz_name), "%s%s", ODP_RING_MZ_PREFIX, name);

	odp_rwlock_write_lock(ODP_TAILQ_RWLOCK);

	/* reserve a memory zone for this ring. If we can't get odp_config or
	 * we are secondary process, the mm_district_reserve function will set
	 * odp_err for us appropriately-hence no check in this this function */
	mz = odp_mm_district_reserve(mz_name, mz_name, ring_size,
				     socket_id, mz_flags);
	if (mz != NULL) {
		r = mz->addr;

		/* no need to check return value here, we already checked the
		 * arguments above */

		odp_ring_init(r, name, count, flags);

		te->data = (void *)r;

		TAILQ_INSERT_TAIL(ring_list, te, next);
	} else {
		r = NULL;
		ODP_PRINT("Cannot reserve memory\n");
		free(te);
	}

	odp_rwlock_write_unlock(ODP_TAILQ_RWLOCK);

	return r;
}
Esempio n. 2
0
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
			  uint32_t flags)
{
	uint32_t i;
	odp_shm_block_t *block;
	void *addr;
	int   fd = -1;
	int   map_flag = MAP_SHARED;

	/* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
	int oflag = O_RDWR | O_CREAT | O_TRUNC;
	uint64_t alloc_size;
	uint64_t page_sz;

#ifdef MAP_HUGETLB
	uint64_t huge_sz;
	int need_huge_page = 0;
	uint64_t alloc_hp_size;
#endif

	const struct odp_mm_district *zone = NULL;
	char memdistrict_name[ODP_SHM_NAME_LEN + 8];

	page_sz = odp_sys_page_size();
	alloc_size = size + align;

#ifdef MAP_HUGETLB
	huge_sz = odp_sys_huge_page_size();
	need_huge_page = (huge_sz && alloc_size > page_sz);

	/* munmap for huge pages requires sizes round up by page */
	alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
#endif

	if (flags & ODP_SHM_PROC) {
		/* Creates a file to /dev/shm */
		fd = shm_open(name, oflag,
			      S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
		if (fd == -1) {
			ODP_DBG("%s: shm_open failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(memdistrict_name, sizeof(memdistrict_name),
			 "%s_%d", name, pid);
		zone = odp_mm_district_reserve(memdistrict_name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		zone = odp_mm_district_reserve(name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else {
		map_flag |= MAP_ANONYMOUS;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	if (find_block(name, NULL)) {
		/* Found a block with the same name */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("name %s already used.\n", name);
		return ODP_SHM_INVALID;
	}

	for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++)
		if (odp_shm_tbl->block[i].addr == NULL)
			/* Found free block */
			break;

	if (i > ODP_CONFIG_SHM_BLOCKS - 1) {
		/* Table full */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("%s: no more blocks.\n", name);
		return ODP_SHM_INVALID;
	}

	block = &odp_shm_tbl->block[i];

	block->hdl = to_handle(i);
	addr = MAP_FAILED;

#ifdef MAP_HUGETLB

	/* Try first huge pages */
	if (need_huge_page) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_hp_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s: ftruncate huge pages failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_hp_size, PROT_READ | PROT_WRITE,
			    map_flag | MAP_HUGETLB, fd, 0);
		if (addr == MAP_FAILED) {
			ODP_DBG(" %s: No huge pages, fall back to normal pages,"
				"check: /proc/sys/vm/nr_hugepages.\n",
				name);
		} else {
			block->alloc_size = alloc_hp_size;
			block->huge = 1;
			block->page_sz = huge_sz;
		}
	}
#endif

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY)
		addr = zone->addr;

	/* Use normal pages for small or failed huge page allocations */
	if (addr == MAP_FAILED) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_ERR("%s: ftruncate failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
			    map_flag, fd, 0);
		if (addr == MAP_FAILED) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s mmap failed.\n", name);
			return ODP_SHM_INVALID;
		}

		block->alloc_size = alloc_size;
		block->huge = 0;
		block->page_sz = page_sz;
	}

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		block->alloc_size = alloc_size;
		block->huge = 1;
		block->page_sz = ODP_MEMZONE_2MB;
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(zone->addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = -1;
		block->addr = addr;
	} else {
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = fd;
		block->addr = addr;
	}

	odp_spinlock_unlock(&odp_shm_tbl->lock);

	return block->hdl;
}