static void test_access(int fd) { uint32_t handle, flink, handle2; struct drm_i915_gem_mmap_gtt mmap_arg; int fd2; handle = gem_create(fd, OBJECT_SIZE); igt_assert(handle); fd2 = drm_open_driver(DRIVER_INTEL); /* Check that fd1 can mmap. */ mmap_arg.handle = handle; do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_arg.offset)); /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset) == MAP_FAILED); igt_assert(errno == EACCES); flink = gem_flink(fd, handle); igt_assert(flink); handle2 = gem_open(fd2, flink); igt_assert(handle2); /* Recheck that it works after flink. */ /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset)); }
static void test_short(int fd) { struct drm_i915_gem_mmap_gtt mmap_arg; int pages, p; mmap_arg.handle = gem_create(fd, OBJECT_SIZE); igt_assert(mmap_arg.handle); igt_assert(drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg) == 0); for (pages = 1; pages <= OBJECT_SIZE / PAGE_SIZE; pages <<= 1) { uint8_t *r, *w; w = mmap64(0, pages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_arg.offset); igt_assert(w != MAP_FAILED); r = mmap64(0, pages * PAGE_SIZE, PROT_READ, MAP_SHARED, fd, mmap_arg.offset); igt_assert(r != MAP_FAILED); for (p = 0; p < pages; p++) { w[p*PAGE_SIZE] = r[p*PAGE_SIZE]; w[p*PAGE_SIZE+(PAGE_SIZE-1)] = r[p*PAGE_SIZE+(PAGE_SIZE-1)]; } munmap(r, pages * PAGE_SIZE); munmap(w, pages * PAGE_SIZE); } gem_close(fd, mmap_arg.handle); }
/* * map the object into both client and server at the same virtual address */ void * mmap64_join(pid_t pid, void *addr, size_t len, int prot, int flags, int fd, off64_t off) { void *svaddr, *cvaddr = MAP_FAILED; uintptr_t hint = (uintptr_t)addr; uintptr_t start_hint = hint; if ( hint == 0 ) hint = (uintptr_t)CPU_VADDR_SERVER_HINT; do { svaddr = mmap64( (void *)hint, len, prot, flags, fd, off ); if ( svaddr == MAP_FAILED ) { break; } if ( svaddr == cvaddr ) { return svaddr; } cvaddr = mmap64_peer( pid, svaddr, len, prot, flags, fd, off ); if ( cvaddr == MAP_FAILED ) { break; } if ( svaddr == cvaddr ) { return svaddr; } if ( munmap( svaddr, len ) == -1 ) { svaddr = MAP_FAILED; break; } svaddr = mmap64( cvaddr, len, prot, flags, fd, off ); if ( svaddr == MAP_FAILED ) { break; } if ( svaddr == cvaddr ) { return svaddr; } if ( munmap( svaddr, len ) == -1 ) { svaddr = MAP_FAILED; break; } if ( munmap_peer( pid, cvaddr, len ) == -1 ) { cvaddr = MAP_FAILED; break; } hint += __PAGESIZE; } while(hint != start_hint); /* do we really want to wrap all the way */ if ( svaddr != MAP_FAILED ) { munmap( svaddr, len ); } if ( cvaddr != MAP_FAILED ) { munmap_peer( pid, cvaddr, len ); } return MAP_FAILED; }
int ShmTree::setShm(const char* shmName) { unsigned int size = sizeof(MEM_BUF)+1024*1024*1024; // create shared memory if((m_iFd = shm_open(shmName, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) == -1) { if(errno == EEXIST) { struct stat sb; if((m_iFd = shm_open(shmName, O_RDWR, 0)) == -1) { perror("[pIPC] <IPCGet> shm_open"); return -1; } while (true) { if (fstat(m_iFd, &sb) == -1) { perror("fstat"); return -1; } if (sb.st_size != 0) { break; } } flock(m_iFd, LOCK_EX); m_pBuf = (MEM_BUF *)mmap64(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, m_iFd, 0); if(m_pBuf == MAP_FAILED) { perror("[pIPC] <IPCGet> mmap64"); return -1; } printf("[pIPC] Shared memory is opened [name: %s] [size: %d bytes]\n", shmName, size); flock(m_iFd, LOCK_UN); } else { perror("[pIPC] <IPCGet> shm_create"); return -1; } } else { flock(m_iFd, LOCK_EX); if (ftruncate64(m_iFd, size) == -1) { perror("[pIPC] <IPCGet> ftruncate64"); return -1; } m_pBuf = (MEM_BUF *)mmap64(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, m_iFd, 0); if(m_pBuf == MAP_FAILED) { perror("[pIPC] <IPCGet> mmap64"); return -1; } printf("[pIPC] <IPCGet> Shared memory is created [name: %s] [size=%d bytes]\n", shmName, size); flock(m_iFd, LOCK_UN); } return 0; }
int osal_mmap (osal_mmap_t **mm, void **p, osal_handle_t handle, u_int64_t offset, u_int32_t length) { *mm = osal_alloc (sizeof (osal_mmap_t)); if (*mm != NULL) { int align = offset % getpagesize (); void *addr; offset -= align; length += align; addr = mmap64 (NULL, length, PROT_READ, MAP_SHARED, handle.desc, offset); if (addr != MAP_FAILED) { /* success */ (*mm)->start = addr; (*mm)->length = length; *p = (u_int8_t*) addr + align; return (OSAL_OK); } else { /* mmap failed */ osal_free (*mm), *mm = NULL; return (OSAL_ERR); } } else /* malloc failed */ return (OSAL_NO_MEM); }
/* * Map a file (from fd's current offset) into a private, read-only memory * segment. The file offset must be a multiple of the page size. * * On success, returns 0 and fills out "pMap". On failure, returns a nonzero * value and does not disturb "pMap". */ static int sysMapFD(int fd, MemMapping* pMap) { loff_t start; size_t length; void* memPtr; assert(pMap != NULL); if (getFileStartAndLength(fd, &start, &length) < 0) return -1; memPtr = mmap64(NULL, length, PROT_READ, MAP_PRIVATE, fd, start); if (memPtr == MAP_FAILED) { LOGW("mmap(%d, R, PRIVATE, %d, %d) failed: %s\n", (int) length, fd, (int) start, strerror(errno)); return -1; } pMap->addr = memPtr; pMap->length = length; pMap->range_count = 1; pMap->ranges = malloc(sizeof(MappedRange)); pMap->ranges[0].addr = memPtr; pMap->ranges[0].length = length; return 0; }
void _mmap_page(enum dex_sect_id id, uint idx) { struct dex_section_s *s = &dex_section[id]; u64 offs = 0; uint o_align = 0; if (s->mmap_page) munmap(s->mmap_page, s->cur_size); if (idx > s->nof_entries) dub_die("Dex paging fault. Idx %u > %u.", idx, s->nof_entries); offs = s->toc[idx].offs + s->sect_offs; o_align = offs & (getpagesize() - 1); s->first_item = idx; s->cur_offs = s->toc[idx].offs; s->last_item = find_last(s, idx); /* +4096 tries to make sure that our slack bit-twidling routines * in pcode handling can't cause a segmentation fault */ s->cur_size = s->toc[s->last_item + 1].offs - s->cur_offs + o_align + 4096; s->mmap_page = mmap64(0, s->cur_size, PROT_READ, MAP_SHARED, s->sect_fd, offs - o_align); if (s->mmap_page == MAP_FAILED) dub_sysdie("Couldn't mmap page (offs: %llu size: %llu)", offs - o_align, s->cur_size); s->cur_start = s->mmap_page + o_align; }
void open_index_i(const char *name, uint psize) { uint i, fd = 0; page_size = psize; if ((fd = open(name, O_RDONLY | O_LARGEFILE, 0)) == -1) dub_sysdie("Couldn't open index %s", name); read(fd, (struct header_s*)&dex_header, sizeof(struct header_s)); dex_base_size = dex_header.data_offs[0]; dex_base_addr = mmap64(0, dex_base_size, PROT_READ, MAP_SHARED, fd, 0); if (dex_base_addr == MAP_FAILED) dub_sysdie("Couldn't map TOCs in %s", name); memset(dex_section, 0, DEX_NOF_SECT * sizeof(struct dex_section_s)); for (i = 0; i < DEX_NOF_SECT; i++){ dex_section[i].sect_fd = fd; dex_section[i].sect_offs = dex_header.data_offs[i]; dex_section[i].toc = dex_base_addr + dex_header.toc_offs[i]; if (i == DEX_NOF_SECT - 1) dex_section[i].nof_entries = (dex_header.data_offs[0] - dex_header.toc_offs[i]) / sizeof(toc_e) - 1; else{ dex_section[i].nof_entries = (dex_header.toc_offs[i + 1] - dex_header.toc_offs[i]) / sizeof(toc_e) - 1; } } }
static int bd_mmap_cmd_submit(struct tgt_device *dev, int rw, uint32_t datalen, unsigned long *uaddr, uint64_t offset, int *async, void *key) { int fd = dev->fd; void *p; int err = 0; if (*uaddr) *uaddr = *uaddr + offset; else { p = mmap64(NULL, pgcnt(datalen, offset) << PAGE_SHIFT, PROT_READ | PROT_WRITE, MAP_SHARED, fd, offset & ~((1ULL << PAGE_SHIFT) - 1)); *uaddr = (unsigned long) p + (offset & ~PAGE_MASK); if (p == MAP_FAILED) { err = -EINVAL; eprintf("%lx %u %" PRIu64 "\n", *uaddr, datalen, offset); } } printf("%lx %u %" PRIu64 "\n", *uaddr, datalen, offset); return err; }
int __init can_drop_memory(void) { void *addr; int fd, ok = 0; printk("Checking host MADV_REMOVE support..."); fd = create_mem_file(UM_KERN_PAGE_SIZE); if(fd < 0){ printk("Creating test memory file failed, err = %d\n", -fd); goto out; } addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if(addr == MAP_FAILED){ printk("Mapping test memory file failed, err = %d\n", -errno); goto out_close; } if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){ printk("MADV_REMOVE failed, err = %d\n", -errno); goto out_unmap; } printk("OK\n"); ok = 1; out_unmap: munmap(addr, UM_KERN_PAGE_SIZE); out_close: close(fd); out: return ok; }
/* To prepare for enlargements of the mmaped area reserve some address space. On some machines, being a file mapping rather than an anonymous mapping affects the address selection. So do this mapping from the actual file, even though it's only a dummy to reserve address space. */ static void * prepare_address_space (int fd, size_t total, size_t *reserved, int *xflags, void **mmap_base, size_t *mmap_len) { if (total < RESERVE_MMAP_SIZE) { void *p = mmap64 (NULL, RESERVE_MMAP_SIZE, PROT_NONE, MAP_SHARED, fd, 0); if (p != MAP_FAILED) { void *aligned_p = PTR_ALIGN_UP (p, MAP_FIXED_ALIGNMENT); size_t align_adjust = aligned_p - p; *mmap_base = p; *mmap_len = RESERVE_MMAP_SIZE; assert (align_adjust < RESERVE_MMAP_SIZE); *reserved = RESERVE_MMAP_SIZE - align_adjust; *xflags = MAP_FIXED; return aligned_p; } } *reserved = total; *xflags = 0; *mmap_base = NULL; *mmap_len = 0; return NULL; }
/********************************************* SkypeOpenFile(): Memory map a file. This sets the globals. *********************************************/ void SkypeOpenFile (char *Filename) { mystat Stat; /* block re-opens */ SkypeCloseFile(); /* Open file */ FileIn = open(Filename,O_RDONLY|O_LARGEFILE); if (FileIn == -1) { fprintf(stderr,"ERROR: Unable to open file (%s)\n",Filename); exit(-1); } if (fstat64(FileIn,&Stat) == -1) { fprintf(stderr,"ERROR: Unable to stat file (%s)\n",Filename); close(FileIn); exit(-1); } MemorySize=Stat.st_size; if (MemorySize > 0) { Memory=mmap64(0,MemorySize,PROT_READ,MAP_PRIVATE,FileIn,0); if (Memory == MAP_FAILED) { fprintf(stderr,"ERROR: Unable to mmap file (%s)\n",Filename); close(FileIn); exit(-1); } } } /* SkypeOpenFile() */
static bool file_data_available_p (struct locarhandle *ah, uint32_t offset, uint32_t size) { if (offset < ah->mmaped && offset + size <= ah->mmaped) return true; struct stat64 st; if (fstat64 (ah->fd, &st) != 0) return false; if (st.st_size > ah->reserved) return false; const size_t pagesz = getpagesize (); size_t start = ah->mmaped & ~(pagesz - 1); void *p = mmap64 (ah->addr + start, st.st_size - start, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, ah->fd, start); if (p == MAP_FAILED) { ah->mmaped = start; return false; } ah->mmaped = st.st_size; return true; }
static datastore_descriptor do_mmap(long block_size, long num_blocks, char* filename){ int i; // If it isn't inited, let's fill in the table with empty entries if (!xordatastoreinited) { // start the table as entry for (i=0; i<STARTING_XORDATASTORE_TABLESIZE; i++) { xordatastoretable[i].numberofblocks = 0; xordatastoretable[i].sizeofablock = 0; xordatastoretable[i].datastore = NULL; } // We've initted now! xordatastoreinited = 1; } for (i=0; i<xordatastorestablesize; i++) { // Look for an empty entry if (!is_table_entry_used(i)) { xordatastoretable[i].numberofblocks = num_blocks; xordatastoretable[i].sizeofablock = block_size; int dbfd = open(filename, O_RDONLY, 0); if (dbfd < 0){ printf("error opening db %s!\n", filename); exit(1); } xordatastoretable[i].datastore = (__m128i *) mmap64(NULL, num_blocks * block_size, PROT_READ, MAP_SHARED, dbfd, 0); if (xordatastoretable[i].datastore == MAP_FAILED) { printf("mmap failed!\n"); exit(1); } // we can close dbfd here already, mmap still works fine close(dbfd); // check for valid header if (strncmp((char*) xordatastoretable[i].datastore, "RAIDPIRDB_v0.9.5", 16) != 0){ printf("%s is not a valid RAID-PIR db!\n", filename); exit(1); } // skip header, if it was correct xordatastoretable[i].datastore++; return i; } } // The table is full! I should expand it... printf("Internal Error: I need to expand the table size (unimplemented)\n"); return -1; }
/********************************************** SumOpenFile(): Open and mmap a file. Returns structure, or NULL on failure. **********************************************/ CksumFile * SumOpenFile (char *Fname) { CksumFile *CF; struct stat64 Stat; CF=(CksumFile *)calloc(1,sizeof(CksumFile)); if (!CF) return(NULL); /* open the file (memory map) */ #ifdef O_LARGEFILE CF->FileHandle = open(Fname,O_RDONLY|O_LARGEFILE); #else /** BSD does not need nor use O_LARGEFILE **/ CF->FileHandle = open(Fname,O_RDONLY); #endif if (CF->FileHandle == -1) { fprintf(stderr,"ERROR: Unable to open file (%s)\n",Fname); free(CF); return(NULL); } if (fstat64(CF->FileHandle,&Stat) == -1) { fprintf(stderr,"ERROR: Unable to stat file (%s)\n",Fname); close(CF->FileHandle); free(CF); return(NULL); } CF->MmapSize = Stat.st_size; CF->MmapOffset = 0; /* reject files that are too long */ if (CF->MmapSize >= (uint32_t)(-1)) { close(CF->FileHandle); free(CF); return(NULL); } if (CF->MmapSize > 0) { CF->Mmap = mmap64(0,CF->MmapSize,PROT_READ,MAP_PRIVATE,CF->FileHandle,0); if (CF->Mmap == MAP_FAILED) { fprintf(stderr,"ERROR: Unable to mmap file (%s)\n",Fname); close(CF->FileHandle); free(CF); return(NULL); } } return(CF); } /* SumOpenFile() */
/* * In order to mmap a section from the ELF file, we must round down sh_offset * to the previous page boundary, and mmap the surrounding page. We store * the pointer to the start of the actual section data back into sp->cts_data. */ const void * ctf_sect_mmap(ctf_sect_t *sp, int fd) { size_t pageoff = sp->cts_offset & ~_PAGEMASK; caddr_t base = mmap64(NULL, sp->cts_size + pageoff, PROT_READ, MAP_PRIVATE, fd, sp->cts_offset & _PAGEMASK); if (base != MAP_FAILED) sp->cts_data = base + pageoff; return (base); }
int main(int argc, char* argv[]) { int fd = open(argv[0], O_RDONLY); if (fd == -1) return 0; struct stat fs; fstat(fd, &fs); void* ptr = mmap64(NULL, fs.st_size, PROT_READ, MAP_SHARED, fd, 0); if (ptr != MAP_FAILED) { munmap(ptr, fs.st_size); } close(fd); return 0; }
int os_map_memory(void *virt, int fd, unsigned long long off, unsigned long len, int r, int w, int x) { void *loc; int prot; prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | (x ? PROT_EXEC : 0); loc = mmap64((void *) virt, len, prot, MAP_SHARED | MAP_FIXED, fd, off); if (loc == MAP_FAILED) return -errno; return 0; }
/*---------------------------------------------------------------------------*/ void *malloc_huge_pages(size_t size) { int retval; size_t real_size; void *ptr = NULL; if (disable_huge_pages) { int page_size = sysconf(_SC_PAGESIZE); real_size = ALIGN(size, page_size); retval = posix_memalign(&ptr, page_size, real_size); if (retval) { ERROR_LOG("posix_memalign failed sz:%zu. %s\n", real_size, strerror(retval)); return NULL; } return ptr; } /* Use 1 extra page to store allocation metadata */ /* (libhugetlbfs is more efficient in this regard) */ real_size = ALIGN(size + HUGE_PAGE_SZ, HUGE_PAGE_SZ); ptr = mmap64(NULL, real_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB, -1, 0); if (ptr == MAP_FAILED) { /* The mmap() call failed. Try to malloc instead */ int page_size = sysconf(_SC_PAGESIZE); WARN_LOG("mmap64 rdma pool sz:%zu failed (errno=%d %m)\n", real_size, errno); real_size = ALIGN(size + HUGE_PAGE_SZ, page_size); retval = posix_memalign(&ptr, page_size, real_size); if (retval) { ERROR_LOG("posix_memalign failed sz:%zu. %s\n", real_size, strerror(retval)); return NULL; } real_size = 0; } else { DEBUG_LOG("Allocated huge page sz:%zu\n", real_size); } /* Save real_size since mmunmap() requires a size parameter */ *((size_t *)ptr) = real_size; /* Skip the page with metadata */ return ptr + HUGE_PAGE_SZ; }
int write_fd_chunk(struct output_file *out, unsigned int len, int fd, int64_t offset) { int ret; int64_t aligned_offset; int aligned_diff; int buffer_size; char *ptr; aligned_offset = offset & ~(4096 - 1); aligned_diff = offset - aligned_offset; buffer_size = len + aligned_diff; #ifndef USE_MINGW char *data = mmap64(NULL, buffer_size, PROT_READ, MAP_SHARED, fd, aligned_offset); if (data == MAP_FAILED) { return -errno; } ptr = data + aligned_diff; #else off64_t pos; char *data = malloc(len); if (!data) { return -errno; } pos = lseek64(fd, offset, SEEK_SET); if (pos < 0) { return -errno; } ret = read_all(fd, data, len); if (ret < 0) { return ret; } ptr = data; #endif ret = out->sparse_ops->write_data_chunk(out, len, ptr); #ifndef USE_MINGW munmap(data, buffer_size); #else free(data); #endif return ret; }
int main(int argc, char **argv) { int fd, fd1, ret; char *buf; char wbuf[8192]; unsigned long long offset = 0xffffff000ULL; char *p=wbuf; fd = open(FILENAME, O_RDWR|O_CREAT|O_LARGEFILE/*|O_TRUNC*/, 0644); if (fd < 0) { perror(FILENAME); return -1; } ftruncate64(fd, offset + 4096*4); buf = mmap64(NULL, 4096*4, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset); if (buf == MAP_FAILED) { perror("mmap"); return -1; } fd1 = open(FILENAME, O_RDWR|O_DIRECT|O_LARGEFILE, 0644); if (fd < 0) { perror(FILENAME); return -1; } p = (char *)((unsigned long) p | 4095)+1; if (fork()) { while(1) { /* map in the page */ buf[10] = 1; } } else { ret = pwrite64(fd1, p, 4096, offset); if (ret < 4096) { printf("write: %d %p\n", ret, p); perror("write"); return -1; } } return 0; }
buffered_reader::buffered_reader(int cur_fd, int cur_block_size) { #ifdef USE_MMAP struct stat64 finfo; #endif fd = cur_fd; block_size = cur_block_size; buffer = NULL; buffer_length = buffer_pointer = 0; mmap_addr = NULL; /* try do mmap */ #ifdef USE_MMAP if (fstat64(cur_fd, &finfo) == 0) { if (!S_ISFIFO(finfo.st_mode)) { /* mmap */ size_of_file = finfo.st_size; cur_offset = mmap_addr = (char *)mmap64(NULL, size_of_file, PROT_READ, MAP_SHARED, cur_fd, 0); if (!mmap_addr) { fprintf(stderr, "mmap64 failed: %d/%s\n", errno, strerror(errno)); } /* advise the kernel how to treat the mmaped region */ /* FIXME: change to madvise64 as soon as it comes available */ (void)madvise(mmap_addr, size_of_file, MADV_SEQUENTIAL); // fprintf(stderr, "*using mmap*\n"); } } else { fprintf(stderr, "Error obtaining information on fd %d: %d/%s\n", cur_fd, errno, strerror(errno)); } #endif if (!mmap_addr) { #if (_XOPEN_VERSION >= 600) (void)posix_fadvise(cur_fd, 0, 0, POSIX_FADV_SEQUENTIAL); // or POSIX_FADV_NOREUSE? #endif } }
void open_section_i(const char *basename, enum dex_sect_id sect, uint iblock_no) { int toc_fd = 0; int data_fd = 0; off64_t fpos = 0; char *dname = NULL; char *tname = NULL; PPARM_INT(page_size, VM_PAGE_SIZE); asprintf(&dname, "%s.%u.sect", basename, iblock_no); asprintf(&tname, "%s.%u.toc.sect", basename, iblock_no); if ((data_fd = open(dname, O_RDONLY | O_LARGEFILE, 0)) == -1) dub_sysdie("Couldn't open section %s", dname); if ((toc_fd = open(tname, O_RDONLY)) == -1) dub_sysdie("Couldn't open toc %s", tname); memset(&dex_section[sect], 0, sizeof(struct dex_section_s)); dex_section[sect].sect_fd = data_fd; dex_section[sect].sect_offs = 0; if ((fpos = lseek64(toc_fd, 0, SEEK_END)) == (off64_t)-1) dub_sysdie("Couldn't seek toc %s", tname); dex_section[sect].nof_entries = fpos / sizeof(toc_e) - 1; if (!dex_section[sect].nof_entries) dub_die("TOC %s empty.", tname); dub_dbg("Mmapping %u bytes of toc.", fpos); dex_section[sect].toc = mmap64(0, fpos, PROT_READ, MAP_SHARED, toc_fd, 0); if (dex_section[sect].toc == MAP_FAILED) dub_sysdie("Couldn't mmap toc %s", tname); free(dname); free(tname); }
/* * Frees up memory mapped file region of supplied size. The * file descriptor "fd" indicates which memory mapped file. * If successful, returns 0. Otherwise returns -1 if "size" * is zero, or -1 times the number of times msync() failed. */ static int fileset_freemem(int fd, off64_t size) { off64_t left; int ret = 0; for (left = size; left > 0; left -= MMAP_SIZE) { off64_t thismapsize; caddr_t addr; thismapsize = MIN(MMAP_SIZE, left); addr = mmap64(0, thismapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, size - left); ret += msync(addr, thismapsize, MS_INVALIDATE); (void) munmap(addr, thismapsize); } return (ret); }
/* To prepare for enlargements of the mmaped area reserve some address space. On some machines, being a file mapping rather than an anonymous mapping affects the address selection. So do this mapping from the actual file, even though it's only a dummy to reserve address space. */ static void * prepare_address_space (int fd, size_t total, size_t *reserved, int *xflags) { if (total < RESERVE_MMAP_SIZE) { void *p = mmap64 (NULL, RESERVE_MMAP_SIZE, PROT_NONE, MAP_SHARED, fd, 0); if (p != MAP_FAILED) { *reserved = RESERVE_MMAP_SIZE; *xflags = MAP_FIXED; return p; } } *reserved = total; *xflags = 0; return NULL; }
int main(int argc, char **argv) { #ifdef __UCLIBC_HAS_LFS__ void *ptr; ptr = mmap64(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); if (ptr == MAP_FAILED) { perror("mmap"); exit(1); } printf("mmap returned %p\n", ptr); exit(0); #else exit(0); #endif }
mmfile::mmfile(const std::string& filename) { file_handle_ = open64(filename.c_str(), O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (file_handle_ == -1) return; struct stat64 sbuf; if (fstat64(file_handle_, &sbuf) == -1) return; size_ = (long long)sbuf.st_size; // You can static_cast void* pointers. data_ = static_cast<uint8_t*>(mmap64( 0, size_, PROT_READ | PROT_WRITE, MAP_SHARED, file_handle_, 0)); if (data_ == MAP_FAILED) data_ = nullptr; // mmap opens its own file handle. close(file_handle_); }
/* * Create a protection domain */ int usd_ib_cmd_alloc_pd( struct usd_device *dev, uint32_t *handle_o) { uint32_t vfid = 0; uint32_t grp_vect_buf_len = 0; int err; /* Issue IB alloc_pd command, get assigned VF id and group vector size */ err = _usd_ib_cmd_alloc_pd(dev, handle_o, &vfid, &grp_vect_buf_len); if (err) { return err; } /* MAP group vector address to userspace * Kernel module then maps group vector user address to IOMMU and * program VIC HW register */ if (dev->ud_ctx->ucx_caps[USD_CAP_GRP_INTR] > 0) { void *va; off64_t offset; offset = USNIC_ENCODE_PGOFF(vfid, USNIC_MMAP_GRPVECT, 0); va = mmap64(NULL, grp_vect_buf_len, PROT_READ + PROT_WRITE, MAP_SHARED, dev->ud_ctx->ucx_ib_dev_fd, offset); if (va == MAP_FAILED) { usd_err("Failed to map group vector for vf %u, grp_vect_size %u, " "error %d\n", vfid, grp_vect_buf_len, errno); _usd_ib_cmd_dealloc_pd(dev, *handle_o); return -errno; } dev->grp_vect_map.va = va; dev->grp_vect_map.len = grp_vect_buf_len; dev->grp_vect_map.vfid = vfid; } return 0; }
int MapBuffer(IBuffer *ibuffer, void **ptr) { if ((!ibuffer) || (!ibuffer->handle)) { return GI_ERROR; } struct drm_mode_map_dumb req; memset(&req, 0, sizeof(req)); req.handle = ibuffer->handle; if (ioctl(ibuffer->fd, DRM_IOCTL_MODE_MAP_DUMB, &req)) { return GI_ERROR; } void *data = NULL; if (!(data = mmap64(NULL, ibuffer->size, PROT_READ|PROT_WRITE, MAP_SHARED, ibuffer->fd, req.offset))) { return GI_ERROR; } (*ptr) = data; return GI_SUCCESS; }
main(int argc, char * argv[]) { int i; for (i=0; i<512; i++) junk[i]=-1; if ((progname = strrchr(argv[0], '/')) == NULL) progname = argv[0]; else progname++; if (argc < 2) { fprintf(stderr,"Usage: %s filename\n", progname); exit(1); } fd = open(argv[1], O_RDWR|O_CREAT, 0644); if (fd < 0) { fprintf(stderr,"%s: cannot open %s\n", progname, argv[1]); perror(argv[1]); exit(3); } ptr = mmap64(NULL, (size_t)(0x10000000), PROT_WRITE, MAP_SHARED|MAP_AUTOGROW, fd, 0); if (ptr == MAP_FAILED) { fprintf(stderr,"%s: cannot mmap64 %s\n", progname, argv[1]); perror(argv[1]); exit(3); } for(counter=0; ; counter++) { junk[0] = counter; bcopy(junk, ptr, sizeof(junk)); ptr+=sizeof(junk); } printf("%s complete.\n", progname); return 0; }