static void * minix_mmap_emulator(void *addrhint, size_t size, int prot, int flags, int fd, off_t off) { void *ret; int mapflags; size_t s; mapflags = flags & (MAP_FIXED); #ifdef MINIXVERBOSE if(addrhint) { fprintf(stderr, "0x%lx-0x%lx requested\n", addrhint, (char *) addrhint + size); } #endif if((ret = minix_mmap(addrhint, size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE|MAP_PREALLOC|mapflags, -1, 0)) == MAP_FAILED) { return ret; } if(!(mapflags & MAP_ANON)) { if((s=pread(fd, ret, size, off)) <= 0) { munmap(ret, size); return MAP_FAILED; } } #ifdef MINIXVERBOSE fprintf(stderr, "0x%lx-0x%lx mapped\n", ret, (char *) ret + size); #endif return ret; }
static void lmfs_alloc_block(struct buf *bp) { ASSERT(!bp->data); ASSERT(bp->lmfs_bytes == 0); if((bp->data = minix_mmap(0, fs_block_size, PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { free_unused_blocks(); if((bp->data = minix_mmap(0, fs_block_size, PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { panic("libminixfs: could not allocate block"); } } assert(bp->data); bp->lmfs_bytes = fs_block_size; bp->lmfs_needsetcache = 1; }
void *alloc_contig(size_t len, int flags, phys_bytes *phys) { vir_bytes buf; int mmapflags = MAP_PREALLOC|MAP_CONTIG|MAP_ANON; if(flags & AC_LOWER16M) mmapflags |= MAP_LOWER16M; if(flags & AC_LOWER1M) mmapflags |= MAP_LOWER1M; if(flags & AC_ALIGN64K) mmapflags |= MAP_ALIGN64K; /* First try to get memory with minix_mmap. This is guaranteed * to be page-aligned, and we can tell VM it has to be * pre-allocated and contiguous. */ errno = 0; buf = (vir_bytes) minix_mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0); /* If that failed, maybe we're not running in paged mode. * If that's the case, ENXIO will be returned. * Memory returned with malloc() will be preallocated and * contiguous, so fallback on that, and ask for a little extra * so we can page align it ourselves. */ if(buf == (vir_bytes) MAP_FAILED) { u32_t align = 0; if(errno != (_SIGN ENXIO)) { return NULL; } if(flags & AC_ALIGN4K) align = 4*1024; if(flags & AC_ALIGN64K) align = 64*1024; if(len + align < len) return NULL; len += align; if(!(buf = (vir_bytes) malloc(len))) { return NULL; } if(align) buf += align - (buf % align); } /* Get physical address, if requested. */ if(phys != NULL && sys_umap_data_fb(SELF, buf, len, phys) != OK) panic("sys_umap_data_fb failed"); return (void *) buf; }
void basic_regression(void) { void *block; #define BLOCKSIZE (PAGE_SIZE*10) block = minix_mmap(0, BLOCKSIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if(block == MAP_FAILED) { e(1); exit(1); } memset(block, 0, BLOCKSIZE); /* shrink from bottom */ minix_munmap(block, PAGE_SIZE); }
int readblock(int b, int blocksize, u32_t seed, char *data) { u64_t offset; int fd; char *mmapdata; int pread_first = random() % 2; get_fd_offset(b, blocksize, &offset, &fd); if(pread_first) { if(pread(fd, data, blocksize, offset) < blocksize) { perror("pread"); return -1; } } if((mmapdata = minix_mmap(NULL, blocksize, PROT_READ, MAP_PRIVATE | MAP_FILE, fd, offset)) == MAP_FAILED) { perror("mmap"); return -1; } if(!pread_first) { if(pread(fd, data, blocksize, offset) < blocksize) { perror("pread"); return -1; } } if(memcmp(mmapdata, data, blocksize)) { fprintf(stderr, "readblock: mmap, pread mismatch\n"); return -1; } if(minix_munmap(mmapdata, blocksize) < 0) { perror("munmap"); return -1; } return blocksize; }
int main(int argc, char *argv[]) { #define CHUNKSIZE 8192 #define CHUNKS1 3 #define CHUNKS2 2 #define CHUNKS (CHUNKS1+CHUNKS2) #define LARGESIZE 262144 int i, fd; char *v[CHUNKS]; #define STARTV 0x90000000 char *vaddr = (char *) STARTV; ssize_t l; pid_t f; printf("Test 44 "); fflush(stdout); for(i = 0; i < CHUNKS; i++) { v[i] = minix_mmap(vaddr, CHUNKSIZE, PROT_READ|PROT_WRITE, 0, -1, 0); if(v[i] == MAP_FAILED) { perror("minix_mmap"); fprintf(stderr, "minix_mmap failed\n"); exit(1); } if(v[i] != vaddr) { fprintf(stderr, "minix_mmap said 0x%p but i wanted 0x%p\n", v[i], vaddr); exit(1); } vaddr += CHUNKSIZE; } #define DEV_ZERO "/dev/zero" if((fd=open(DEV_ZERO, O_RDONLY)) < 0) { perror("open"); fprintf(stderr, "open failed for %s\n", DEV_ZERO); exit(1); } #define TOTAL1 (CHUNKS1*CHUNKSIZE) /* Make single read cross region boundary. */ if((l=read(fd, v[0], TOTAL1)) != TOTAL1) { fprintf(stderr, "read %d but expected %d\n", l, TOTAL1); exit(1); } /* Force single copy to cross region boundary. */ { char *t; t = v[CHUNKS1]+CHUNKSIZE-2; if((l=read(fd, t, CHUNKSIZE)) != CHUNKSIZE) { fprintf(stderr, "read %d but expected %d\n", l, CHUNKSIZE); exit(1); } } /* Now start a child to test bogus memory access */ if((f = fork()) == -1) { perror("fork"); exit(1); } if(f > 0) { int st; /* Parent waits. */ if(waitpid(f, &st, 0) < 0) { perror("waitpid"); exit(1); } if(!WIFEXITED(st)) { fprintf(stderr, "child not signaled\n"); exit(1); } if(WEXITSTATUS(st) != 0) { fprintf(stderr, "child exited with nonzero status\n"); exit(1); } } else { /* Child performs bogus read */ int res; char *buf = v[CHUNKS-1]; errno = 0; res = read(fd, buf, LARGESIZE); if(res >= 0) { fprintf(stderr, "res %d\n", res); exit(1); } if(errno != EFAULT) { fprintf(stderr, "errno %d\n", errno); exit(1); } exit(0); } printf("ok\n"); exit(0); }
/* minix-without-mmap version of _rtld_map_object() */ Obj_Entry * _rtld_map_object_fallback(const char *path, int fd, const struct stat *sb) { Obj_Entry *obj; Elf_Ehdr *ehdr; Elf_Phdr *phdr; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) Elf_Phdr *phtls; #endif size_t phsize; Elf_Phdr *phlimit; Elf_Phdr *segs[2]; int nsegs; caddr_t mapbase = MAP_FAILED; size_t mapsize = 0; int mapflags; Elf_Off base_offset; #ifdef MAP_ALIGNED Elf_Addr base_alignment; #endif Elf_Addr base_vaddr; Elf_Addr base_vlimit; Elf_Addr text_vlimit; int text_flags; caddr_t base_addr; Elf_Off data_offset; Elf_Addr data_vaddr; Elf_Addr data_vlimit; int data_flags; caddr_t data_addr; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) Elf_Addr tls_vaddr = 0; /* Noise GCC */ #endif Elf_Addr phdr_vaddr; size_t phdr_memsz; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) caddr_t gap_addr; size_t gap_size; #endif int i; #ifdef RTLD_LOADER Elf_Addr clear_vaddr; caddr_t clear_addr; size_t nclear; #endif if (sb != NULL && sb->st_size < (off_t)sizeof (Elf_Ehdr)) { _rtld_error("%s: not ELF file (too short)", path); return NULL; } obj = _rtld_obj_new(); obj->path = xstrdup(path); obj->pathlen = strlen(path); if (sb != NULL) { obj->dev = sb->st_dev; obj->ino = sb->st_ino; } #ifdef __minix ehdr = minix_mmap(NULL, _rtld_pagesz, PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, (off_t)0); Pread(ehdr, _rtld_pagesz, fd, 0); #if MINIXVERBOSE fprintf(stderr, "minix mmap for header: 0x%lx\n", ehdr); #endif #else ehdr = mmap(NULL, _rtld_pagesz, PROT_READ, MAP_FILE | MAP_SHARED, fd, (off_t)0); #endif obj->ehdr = ehdr; if (ehdr == MAP_FAILED) { _rtld_error("%s: read error: %s", path, xstrerror(errno)); goto bad; } /* Make sure the file is valid */ if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0) { _rtld_error("%s: not ELF file (magic number bad)", path); goto bad; } if (ehdr->e_ident[EI_CLASS] != ELFCLASS) { _rtld_error("%s: invalid ELF class %x; expected %x", path, ehdr->e_ident[EI_CLASS], ELFCLASS); goto bad; } /* Elf_e_ident includes class */ if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || ehdr->e_version != EV_CURRENT || ehdr->e_ident[EI_DATA] != ELFDEFNNAME(MACHDEP_ENDIANNESS)) { _rtld_error("%s: unsupported file version", path); goto bad; } if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) { _rtld_error("%s: unsupported file type", path); goto bad; } switch (ehdr->e_machine) { ELFDEFNNAME(MACHDEP_ID_CASES) default: _rtld_error("%s: unsupported machine", path); goto bad; } /* * We rely on the program header being in the first page. This is * not strictly required by the ABI specification, but it seems to * always true in practice. And, it simplifies things considerably. */ assert(ehdr->e_phentsize == sizeof(Elf_Phdr)); assert(ehdr->e_phoff + ehdr->e_phnum * sizeof(Elf_Phdr) <= _rtld_pagesz); /* * Scan the program header entries, and save key information. * * We rely on there being exactly two load segments, text and data, * in that order. */ phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff); #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) phtls = NULL; #endif phsize = ehdr->e_phnum * sizeof(phdr[0]); obj->phdr = NULL; phdr_vaddr = EA_UNDEF; phdr_memsz = 0; phlimit = phdr + ehdr->e_phnum; nsegs = 0; while (phdr < phlimit) { switch (phdr->p_type) { case PT_INTERP: obj->interp = (void *)(uintptr_t)phdr->p_vaddr; dbg(("%s: PT_INTERP %p", obj->path, obj->interp)); break; case PT_LOAD: if (nsegs < 2) segs[nsegs] = phdr; ++nsegs; #if ELFSIZE == 64 #define PRImemsz PRIu64 #else #define PRImemsz PRIu32 #endif dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_LOAD", (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz)); break; case PT_PHDR: phdr_vaddr = phdr->p_vaddr; phdr_memsz = phdr->p_memsz; dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_PHDR", (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz)); break; case PT_DYNAMIC: obj->dynamic = (void *)(uintptr_t)phdr->p_vaddr; dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_DYNAMIC", (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz)); break; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) case PT_TLS: phtls = phdr; dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_TLS", (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz)); break; #endif } ++phdr; } phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff); obj->entry = (void *)(uintptr_t)ehdr->e_entry; if (!obj->dynamic) { _rtld_error("%s: not dynamically linked", path); goto bad; } if (nsegs != 2) { _rtld_error("%s: wrong number of segments (%d != 2)", path, nsegs); goto bad; } /* * Map the entire address space of the object as a file * region to stake out our contiguous region and establish a * base for relocation. We use a file mapping so that * the kernel will give us whatever alignment is appropriate * for the platform we're running on. * * We map it using the text protection, map the data segment * into the right place, then map an anon segment for the bss * and unmap the gaps left by padding to alignment. */ #ifdef MAP_ALIGNED base_alignment = segs[0]->p_align; #endif base_offset = round_down(segs[0]->p_offset); base_vaddr = round_down(segs[0]->p_vaddr); base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz); text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz); text_flags = protflags(segs[0]->p_flags); data_offset = round_down(segs[1]->p_offset); data_vaddr = round_down(segs[1]->p_vaddr); data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz); data_flags = protflags(segs[1]->p_flags); #ifdef RTLD_LOADER clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz; #endif obj->textsize = text_vlimit - base_vaddr; obj->vaddrbase = base_vaddr; obj->isdynamic = ehdr->e_type == ET_DYN; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) if (phtls != NULL) { ++_rtld_tls_dtv_generation; obj->tlsindex = ++_rtld_tls_max_index; obj->tlssize = phtls->p_memsz; obj->tlsalign = phtls->p_align; obj->tlsinitsize = phtls->p_filesz; tls_vaddr = phtls->p_vaddr; } #endif obj->phdr_loaded = false; for (i = 0; i < nsegs; i++) { if (phdr_vaddr != EA_UNDEF && segs[i]->p_vaddr <= phdr_vaddr && segs[i]->p_memsz >= phdr_memsz) { obj->phdr_loaded = true; break; } if (segs[i]->p_offset <= ehdr->e_phoff && segs[i]->p_memsz >= phsize) { phdr_vaddr = segs[i]->p_vaddr + ehdr->e_phoff; phdr_memsz = phsize; obj->phdr_loaded = true; break; } } if (obj->phdr_loaded) { obj->phdr = (void *)(uintptr_t)phdr_vaddr; obj->phsize = phdr_memsz; } else { Elf_Phdr *buf; buf = xmalloc(phsize); if (buf == NULL) { _rtld_error("%s: cannot allocate program header", path); goto bad; } memcpy(buf, phdr, phsize); obj->phdr = buf; obj->phsize = phsize; } dbg(("%s: phdr %p phsize %zu (%s)", obj->path, obj->phdr, obj->phsize, obj->phdr_loaded ? "loaded" : "allocated")); /* Unmap header if it overlaps the first load section. */ if (base_offset < _rtld_pagesz) { munmap(ehdr, _rtld_pagesz); obj->ehdr = MAP_FAILED; } /* * Calculate log2 of the base section alignment. */ mapflags = 0; #ifdef MAP_ALIGNED if (base_alignment > _rtld_pagesz) { unsigned int log2 = 0; for (; base_alignment > 1; base_alignment >>= 1) log2++; mapflags = MAP_ALIGNED(log2); }
mapflags = MAP_ALIGNED(log2); } #endif #ifdef RTLD_LOADER base_addr = obj->isdynamic ? NULL : (caddr_t)base_vaddr; #else base_addr = NULL; #endif mapsize = base_vlimit - base_vaddr; #ifndef __minix mapbase = mmap(base_addr, mapsize, text_flags, mapflags | MAP_FILE | MAP_PRIVATE, fd, base_offset); #else mapbase = minix_mmap(base_addr, mapsize, PROT_READ|PROT_WRITE, MAP_ANON | MAP_PREALLOC, -1, 0); #if MINIXVERBOSE fprintf(stderr, "minix mmap for whole block: 0x%lx-0x%lx\n", mapbase, mapbase+mapsize); #endif Pread(mapbase, obj->textsize, fd, 0); #endif if (mapbase == MAP_FAILED) { _rtld_error("mmap of entire address space failed: %s", xstrerror(errno)); goto bad; } /* Overlay the data segment onto the proper region. */ data_addr = mapbase + (data_vaddr - base_vaddr); #ifdef __minix Pread(data_addr, data_vlimit - data_vaddr, fd, data_offset);
/*===========================================================================* * m_block_ioctl * *===========================================================================*/ static int m_block_ioctl(dev_t minor, unsigned int request, endpoint_t endpt, cp_grant_id_t grant) { /* I/O controls for the block devices of the memory driver. Currently there is * one I/O control specific to the memory driver: * - MIOCRAMSIZE: to set the size of the RAM disk. */ struct device *dv; u32_t ramdev_size; int s; void *mem; if (request != MIOCRAMSIZE) return EINVAL; /* Someone wants to create a new RAM disk with the given size. * A ramdisk can be created only once, and only on RAM disk device. */ if ((dv = m_block_part(minor)) == NULL) return ENXIO; if((minor < RAM_DEV_FIRST || minor > RAM_DEV_LAST) && minor != RAM_DEV_OLD) { printf("MEM: MIOCRAMSIZE: %d not a ramdisk\n", minor); return EINVAL; } /* Get request structure */ s= sys_safecopyfrom(endpt, grant, 0, (vir_bytes)&ramdev_size, sizeof(ramdev_size)); if (s != OK) return s; if(m_vaddrs[minor] && !cmp64(dv->dv_size, cvul64(ramdev_size))) { return(OK); } /* openct is 1 for the ioctl(). */ if(openct[minor] != 1) { printf("MEM: MIOCRAMSIZE: %d in use (count %d)\n", minor, openct[minor]); return(EBUSY); } if(m_vaddrs[minor]) { u32_t size; if(ex64hi(dv->dv_size)) { panic("huge old ramdisk"); } size = ex64lo(dv->dv_size); minix_munmap((void *) m_vaddrs[minor], size); m_vaddrs[minor] = (vir_bytes) NULL; } #if DEBUG printf("MEM:%d: allocating ramdisk of size 0x%x\n", minor, ramdev_size); #endif /* Try to allocate a piece of memory for the RAM disk. */ if((mem = minix_mmap(NULL, ramdev_size, PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { printf("MEM: failed to get memory for ramdisk\n"); return(ENOMEM); } m_vaddrs[minor] = (vir_bytes) mem; dv->dv_size = cvul64(ramdev_size); return(OK); }
/*===========================================================================* * do_shmget * *===========================================================================*/ int do_shmget(message *m) { struct shm_struct *shm; long key, size, old_size; int flag; int id; key = m->SHMGET_KEY; old_size = size = m->SHMGET_SIZE; flag = m->SHMGET_FLAG; if ((shm = shm_find_key(key))) { if (!check_perm(&shm->shmid_ds.shm_perm, who_e, flag)) return EACCES; if ((flag & IPC_CREAT) && (flag & IPC_EXCL)) return EEXIST; if (size && shm->shmid_ds.shm_segsz < size) return EINVAL; id = shm->id; } else { /* no key found */ if (!(flag & IPC_CREAT)) return ENOENT; if (size <= 0) return EINVAL; /* round up to a multiple of PAGE_SIZE */ if (size % I386_PAGE_SIZE) size += I386_PAGE_SIZE - size % I386_PAGE_SIZE; if (size <= 0) return EINVAL; if (shm_list_nr == MAX_SHM_NR) return ENOMEM; /* TODO: shmmni should be changed... */ if (identifier == SHMMNI) return ENOSPC; shm = &shm_list[shm_list_nr]; memset(shm, 0, sizeof(struct shm_struct)); shm->page = (vir_bytes) minix_mmap(0, size, PROT_READ|PROT_WRITE, MAP_CONTIG|MAP_PREALLOC|MAP_ANON|MAP_IPC_SHARED, -1, 0); if (shm->page == (vir_bytes) MAP_FAILED) return ENOMEM; shm->phys = vm_getphys(SELF_E, (void *) shm->page); memset((void *)shm->page, 0, size); shm->shmid_ds.shm_perm.cuid = shm->shmid_ds.shm_perm.uid = getnuid(who_e); shm->shmid_ds.shm_perm.cgid = shm->shmid_ds.shm_perm.gid = getngid(who_e); shm->shmid_ds.shm_perm.mode = flag & 0777; shm->shmid_ds.shm_segsz = old_size; shm->shmid_ds.shm_atime = 0; shm->shmid_ds.shm_dtime = 0; shm->shmid_ds.shm_ctime = time(NULL); shm->shmid_ds.shm_cpid = getnpid(who_e); shm->shmid_ds.shm_lpid = 0; shm->shmid_ds.shm_nattch = 0; shm->id = id = identifier++; shm->key = key; shm_list_nr++; } m->SHMGET_RETID = id; return OK; }