Exemple #1
0
int vmfree(void *addr, unsigned long size, int type) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    if (type & (MEM_DECOMMIT | MEM_RELEASE)) {
        vaddr = (char *) addr;
        for (i = 0; i < pages; i++) {
            if (page_directory_mapped(vaddr)) {
                pte_t flags = get_page_flags(vaddr);
                unsigned long pfn = BTOP(virt2phys(vaddr));

                if (flags & PT_FILE) {
                    handle_t h = (flags & PT_PRESENT) ? pfdb[pfn].owner : pfn;
                    struct filemap *newfm = (struct filemap *) hlookup(h);
                    if (newfm != fm) {
                        if (fm) {
                            if (fm->pages == 0) {
                                rc = free_filemap(fm);
                            } else {
                                rc = unlock_filemap(fm);
                            }
                            if (rc < 0) return rc;
                        }
                        fm = newfm;
                        rc = wait_for_object(fm, INFINITE);
                        if (rc < 0) return rc;
                    }
                    fm->pages--;
                    unmap_page(vaddr);
                    if (flags & PT_PRESENT) free_pageframe(pfn);
                } else  if (flags & PT_PRESENT) {
                    unmap_page(vaddr);
                    free_pageframe(pfn);
                }
            }

            vaddr += PAGESIZE;
        }
    }

    if (fm) {
        if (fm->pages == 0) {
            rc = free_filemap(fm);
        } else {
            rc = unlock_filemap(fm);
        }
        if (rc < 0) return rc;
    } else if (type & MEM_RELEASE) {
        rmap_free(vmap, BTOP(addr), pages);
    }

    return 0;
}
Exemple #2
0
void *vmmap(void *addr, unsigned long size, int protect, struct file *filp, off64_t offset, int *rc) {
    int pages = PAGES(size);
    unsigned long flags = pte_flags_from_protect(protect);
    struct filemap *fm;
    int i;
    char *vaddr;

    if (rc) *rc = 0;
    if (size == 0 || flags == 0xFFFFFFFF) {
        if (rc) *rc = -EINVAL;
        return NULL;
    }
    addr = (void *) PAGEADDR(addr);
    if (addr == NULL) {
        addr = (void *) PTOB(rmap_alloc(vmap, pages));
        if (addr == NULL) {
            if (rc) *rc = -ENOMEM;
            return NULL;
        }
    } else {
        if (rmap_reserve(vmap, BTOP(addr), pages)) {
            if (rc) *rc = -ENOMEM;
            return NULL;
        }
    }

    fm = (struct filemap *) kmalloc(sizeof(struct filemap));
    if (!fm) {
        rmap_free(vmap, BTOP(addr), pages);
        if (rc) *rc = -ENOMEM;
        return NULL;
    }
    init_object(&fm->object, OBJECT_FILEMAP);
    fm->self = halloc(&fm->object);
    fm->file = halloc(&filp->iob.object);
    if (fm->self < 0 || fm->file < 0) {
        if (rc) *rc = -ENFILE;
        return NULL;
    }
    hprotect(fm->self);
    hprotect(fm->file);
    fm->offset = offset;
    fm->pages = pages;
    fm->object.signaled = 1;
    fm->addr = addr;
    fm->size = size;
    fm->protect = flags | PT_FILE;

    vaddr = (char *) addr;
    flags = (flags & ~PT_USER) | PT_FILE;
    for (i = 0; i < pages; i++) {
        map_page(vaddr, fm->self, flags);
        vaddr += PAGESIZE;
    }

    return addr;
}
Exemple #3
0
void *kmalloc_tag(int size, unsigned long tag) {
  struct bucket *b;
  int bucket;
  void *addr;

  // Handle large allocation by allocating pages
  if (size > PAGESIZE / 2) {
    // Allocate pages
    addr = alloc_pages(PAGES(size), tag ? tag : 'ALOC');

    // Set size in pfn entry
    pfdb[BTOP(virt2phys(addr))].size = PAGES(size) + PAGESHIFT;

    return addr;
  }

  // Otherwise allocate from one of the buckets
  bucket = BUCKET(size);
  b = &buckets[bucket];

  // If bucket is empty the allocate one more page for the bucket
  if (b->mem == 0) {
    char *p;
    int i;

    // Allocate new page
    addr = alloc_pages(1, 'HEAP');

    // Set bucket number in pfn entry
    pfdb[BTOP(virt2phys(addr))].size = bucket;

    // Split page into chunks
    p = (char *) addr;
    for (i = 0; i < PAGESIZE; i += b->size)  {
      *(void **)(p + i) = b->mem;
      b->mem = p + i;
    }

    // Update count of pages used for this bucket
    b->pages++;
  }

  // Allocate chunk from bucket
  addr = b->mem;
  b->mem = *(void **) addr;

  // Return allocated chunk
  return addr;
}
Exemple #4
0
void miounmap(void *addr, int size) {
    int i;
    int pages = PAGES(size);

    for (i = 0; i < pages; i++) unmap_page((char *) addr + PTOB(i));
    rmap_free(vmap, BTOP(addr), pages);
}
Exemple #5
0
static int valid_range(void *addr, int size) {
    int pages = PAGES(size);

    if ((unsigned long) addr < VMEM_START) return 0;
    if (KERNELSPACE((unsigned long) addr + pages * PAGESIZE)) return 0;
    if (rmap_status(vmap, BTOP(addr), pages) != 1) return 0;
    return 1;
}
Exemple #6
0
/*ARGSUSED2*/
static int
mmmmap(dev_t dev, off_t off, int prot)
{
	pfn_t pf;
	struct memlist *pmem;
	minor_t minor = getminor(dev);

	switch (minor) {
	case M_MEM:
		pf = btop(off);
		memlist_read_lock();
		for (pmem = phys_install; pmem != NULL; pmem = pmem->next) {
			if (pf >= BTOP(pmem->address) &&
			    pf < BTOP(pmem->address + pmem->size)) {
				memlist_read_unlock();
				return (impl_obmem_pfnum(pf));
			}
		}
		memlist_read_unlock();
		break;

	case M_KMEM:
	case M_ALLKMEM:
		/* no longer supported with KPR */
		return (-1);

	case M_ZERO:
		/*
		 * We shouldn't be mmap'ing to /dev/zero here as
		 * mmsegmap() should have already converted
		 * a mapping request for this device to a mapping
		 * using seg_vn for anonymous memory.
		 */
		break;

	}
	return (-1);
}
Exemple #7
0
static int free_filemap(struct filemap *fm) {
    int rc;

    hunprotect(fm->file);
    rc = hfree(fm->file);
    if (rc < 0) return rc;

    rmap_free(vmap, BTOP(fm->addr), PAGES(fm->size));

    hunprotect(fm->self);
    rc = hfree(fm->self);
    if (rc < 0) return rc;

    return 0;
}
Exemple #8
0
void *miomap(unsigned long addr, int size, int protect) {
    char *vaddr;
    int i;
    unsigned long flags = pte_flags_from_protect(protect);
    int pages = PAGES(size);

    vaddr = (char *) PTOB(rmap_alloc(vmap, pages));
    if (vaddr == NULL) return NULL;

    for (i = 0; i < pages; i++) {
        map_page(vaddr + PTOB(i), BTOP(addr) + i, flags | PT_PRESENT);
    }

    return vaddr;
}
Exemple #9
0
int vmsync(void *addr, unsigned long size) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    vaddr = (char *) addr;
    for (i = 0; i < pages; i++) {
        if (page_directory_mapped(vaddr)) {
            pte_t flags = get_page_flags(vaddr);
            if ((flags & (PT_FILE | PT_PRESENT | PT_DIRTY)) == (PT_FILE | PT_PRESENT | PT_DIRTY)) {
                unsigned long pfn = BTOP(virt2phys(vaddr));
                struct filemap *newfm = (struct filemap *) hlookup(pfdb[pfn].owner);
                if (newfm != fm) {
                    if (fm) {
                        rc = unlock_filemap(fm);
                        if (rc < 0) return rc;
                    }
                    fm = newfm;
                    rc = wait_for_object(fm, INFINITE);
                    if (rc < 0) return rc;
                }

                rc = save_file_page(fm, vaddr);
                if (rc < 0) return rc;
            }
        }
        vaddr += PAGESIZE;
    }

    if (fm) {
        rc = unlock_filemap(fm);
        if (rc < 0) return rc;
    }

    return 0;
}
Exemple #10
0
void kfree(void *addr) {
  unsigned long bucket;
  struct bucket *b;

  // Check for NULL
  if (!addr) return;

  // Get page information
  bucket = pfdb[BTOP(virt2phys(addr))].size;

  // If a whole page or more, free directly
  if (bucket >= PAGESHIFT) {
    free_pages(addr, bucket - PAGESHIFT);
    return;
  }

  // Get bucket
  b = &buckets[bucket];

  // Free chunk to bucket
  *(void **) addr = b->mem;
  b->mem = addr;
}
Exemple #11
0
unsigned long virt2pfn(void *vaddr) {
  return BTOP(GET_PTE(vaddr) & PT_PFNMASK);
}
Exemple #12
0
/*ARGSUSED3*/
static int
mmrw(dev_t dev, struct uio *uio, enum uio_rw rw, cred_t *cred)
{
	pfn_t v;
	struct iovec *iov;
	int error = 0;
	size_t c;
	ssize_t oresid = uio->uio_resid;
	minor_t minor = getminor(dev);

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor) {

		case M_MEM:
			memlist_read_lock();
			if (!address_in_memlist(phys_install,
			    (uint64_t)uio->uio_loffset, 1)) {
				memlist_read_unlock();
				error = EFAULT;
				break;
			}
			memlist_read_unlock();

			v = BTOP((u_offset_t)uio->uio_loffset);
			error = mmio(uio, rw, v,
			    uio->uio_loffset & PAGEOFFSET, 0, NULL);
			break;

		case M_KMEM:
		case M_ALLKMEM:
			{
			page_t **ppp = NULL;
			caddr_t vaddr = (caddr_t)uio->uio_offset;
			int try_lock = NEED_LOCK_KVADDR(vaddr);
			int locked = 0;

			if ((error = plat_mem_do_mmio(uio, rw)) != ENOTSUP)
				break;

			/*
			 * If vaddr does not map a valid page, as_pagelock()
			 * will return failure. Hence we can't check the
			 * return value and return EFAULT here as we'd like.
			 * seg_kp and seg_kpm do not properly support
			 * as_pagelock() for this context so we avoid it
			 * using the try_lock set check above.  Some day when
			 * the kernel page locking gets redesigned all this
			 * muck can be cleaned up.
			 */
			if (try_lock)
				locked = (as_pagelock(&kas, &ppp, vaddr,
				    PAGESIZE, S_WRITE) == 0);

			v = hat_getpfnum(kas.a_hat,
			    (caddr_t)(uintptr_t)uio->uio_loffset);
			if (v == PFN_INVALID) {
				if (locked)
					as_pageunlock(&kas, ppp, vaddr,
					    PAGESIZE, S_WRITE);
				error = EFAULT;
				break;
			}

			error = mmio(uio, rw, v, uio->uio_loffset & PAGEOFFSET,
			    minor == M_ALLKMEM || mm_kmem_io_access,
			    (locked && ppp) ? *ppp : NULL);
			if (locked)
				as_pageunlock(&kas, ppp, vaddr, PAGESIZE,
				    S_WRITE);
			}

			break;

		case M_ZERO:
			if (rw == UIO_READ) {
				label_t ljb;

				if (on_fault(&ljb)) {
					no_fault();
					error = EFAULT;
					break;
				}
				uzero(iov->iov_base, iov->iov_len);
				no_fault();
				uio->uio_resid -= iov->iov_len;
				uio->uio_loffset += iov->iov_len;
				break;
			}
			/* else it's a write, fall through to NULL case */
			/*FALLTHROUGH*/

		case M_NULL:
			if (rw == UIO_READ)
				return (0);
			c = iov->iov_len;
			iov->iov_base += c;
			iov->iov_len -= c;
			uio->uio_loffset += c;
			uio->uio_resid -= c;
			break;

		}
	}
	return (uio->uio_resid == oresid ? error : 0);
}
Exemple #13
0
int vmem_proc(struct proc_file *pf, void *arg) {
    return list_memmap(pf, vmap, BTOP(VMEM_START));
}
Exemple #14
0
void *vmalloc(void *addr, unsigned long size, int type, int protect, unsigned long tag, int *rc) {
    int pages = PAGES(size);
    unsigned long flags = pte_flags_from_protect(protect);
    int i;

    if (rc) *rc = 0;
    if (size == 0) {
        if (rc) *rc = -EINVAL;
        return NULL;
    }
    if ((type & MEM_COMMIT) != 0 && flags == 0xFFFFFFFF) {
        if (rc) *rc = -EINVAL;
        return NULL;
    }
    addr = (void *) PAGEADDR(addr);
    if (!addr && (type & MEM_COMMIT) != 0) type |= MEM_RESERVE;
    if (!tag) tag = 'VM';

    if (type & MEM_RESERVE) {
        if (addr == NULL) {
            if (type & MEM_ALIGN64K) {
                addr = (void *) PTOB(rmap_alloc_align(vmap, pages, 64 * 1024 / PAGESIZE));
            } else {
                addr = (void *) PTOB(rmap_alloc(vmap, pages));
            }

            if (addr == NULL) {
                if (rc) *rc = -ENOMEM;
                return NULL;
            }
        } else {
            if (rmap_reserve(vmap, BTOP(addr), pages)) {
                if (rc) *rc = -ENOMEM;
                return NULL;
            }
        }
    } else {
        if (!valid_range(addr, size)) {
            if (rc) *rc = -EFAULT;
            return NULL;
        }
    }

    if (type & MEM_COMMIT) {
        char *vaddr;
        unsigned long pfn;

        vaddr = (char *) addr;
        for (i = 0; i < pages; i++) {
            if (page_mapped(vaddr)) {
                set_page_flags(vaddr, flags | PT_PRESENT);
            } else {
                pfn = alloc_pageframe(tag);
                if (pfn == 0xFFFFFFFF) {
                    if (rc) *rc = -ENOMEM;
                    return NULL;
                }

                map_page(vaddr, pfn, flags | PT_PRESENT);
                memset(vaddr, 0, PAGESIZE);
            }
            vaddr += PAGESIZE;
        }
    }

    return addr;
}
Exemple #15
0
void init_vmm() {
    vmap = (struct rmap *) kmalloc(VMAP_ENTRIES * sizeof(struct rmap));
    rmap_init(vmap, VMAP_ENTRIES);
    rmap_free(vmap, BTOP(VMEM_START), BTOP(OSBASE - VMEM_START));
}
Exemple #16
0
/* 
 *  Just pass in an unused filename.
 */
void
cmd_snap(void)
{
        int c, fd, n;
	physaddr_t paddr;
	size_t offset;
	char *buf;
	char *filename;
	struct node_table *nt;
	int type;
	char *elf_header;
	Elf64_Phdr *load;
	int load_index;

	if (!supported)
		error(FATAL, "command not supported on the %s architecture\n",
			pc->machine_type);

	filename = NULL;
	buf = GETBUF(PAGESIZE()); 
	type = KDUMP_ELF64;

        while ((c = getopt(argcnt, args, "n")) != EOF) {
                switch(c)
                {
		case 'n':
			if (machine_type("X86_64"))
				option_not_supported('n');
			else
				type = NETDUMP_ELF64;
			break;
                default:
                        argerrs++;
                        break;
                }
        }

        if (argerrs || !args[optind])
                cmd_usage(pc->curcmd, SYNOPSIS);

	while (args[optind]) {
		if (filename)
                	cmd_usage(pc->curcmd, SYNOPSIS);

		if (file_exists(args[optind], NULL))
			error(FATAL, "%s: file already exists\n", args[optind]);
		else if ((fd = open(args[optind], O_RDWR|O_CREAT, 0644)) < 0)
			error(FATAL, args[optind]);

		filename = args[optind];
		optind++;
	}

	if (!filename)
                cmd_usage(pc->curcmd, SYNOPSIS);

	init_ram_segments();

	if (!(elf_header = generate_elf_header(type, fd, filename)))
		error(FATAL, "cannot generate ELF header\n");

	load = (Elf64_Phdr *)(elf_header + sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr));
	load_index = machine_type("X86_64") || machine_type("IA64") ? 1 : 0;

	for (n = 0; n < vt->numnodes; n++) {
		nt = &vt->node_table[n];
		paddr = nt->start_paddr;
		offset = load[load_index + n].p_offset;

		for (c = 0; c < nt->size; c++, paddr += PAGESIZE()) {
			if (!verify_paddr(paddr))
				continue;
			if (!readmem(paddr, PHYSADDR, &buf[0], PAGESIZE(), 
			    "memory page", QUIET|RETURN_ON_ERROR))
				continue;

			lseek(fd, (off_t)(paddr + offset - nt->start_paddr), SEEK_SET);
			if (write(fd, &buf[0], PAGESIZE()) != PAGESIZE())
				error(FATAL, "write to dumpfile failed\n");

			if (!print_progress(filename, BTOP(paddr)))
				return;
		}
	}

        fprintf(stderr, "\r%s: [100%%] ", filename);
	fprintf(fp, "\n");
	sprintf(buf, "/bin/ls -l %s\n", filename);
	system(buf);

	FREEBUF(elf_header);
	FREEBUF(buf);
}