Beispiel #1
0
static int
elf64_exec(struct preloaded_file *fp)
{
	struct file_metadata	*md;
	Elf_Ehdr		*hdr;
	struct ia64_pte		pte;
	struct bootinfo		*bi;

	if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL)
		return(EFTYPE);			/* XXX actually EFUCKUP */
	hdr = (Elf_Ehdr *)&(md->md_data);

	/*
	 * Ugly hack, similar to linux. Dump the bootinfo into a
	 * special page reserved in the link map.
	 */
	bi = &bootinfo;
	bzero(bi, sizeof(struct bootinfo));
	bi_load(bi, fp);

	/*
	 * Region 6 is direct mapped UC and region 7 is direct mapped
	 * WC. The details of this is controlled by the Alt {I,D}TLB
	 * handlers. Here we just make sure that they have the largest 
	 * possible page size to minimise TLB usage.
	 */
	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));

	bzero(&pte, sizeof(pte));
	pte.pte_p = 1;
	pte.pte_ma = PTE_MA_WB;
	pte.pte_a = 1;
	pte.pte_d = 1;
	pte.pte_pl = PTE_PL_KERN;
	pte.pte_ar = PTE_AR_RWX;
	pte.pte_ppn = 0;

	__asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7)));
	__asm __volatile("mov cr.itir=%0" :: "r"(28 << 2));
	__asm __volatile("srlz.i;;");
	__asm __volatile("itr.i itr[%0]=%1;;"
			 :: "r"(0), "r"(*(u_int64_t*)&pte));
	__asm __volatile("srlz.i;;");
	__asm __volatile("itr.d dtr[%0]=%1;;"
			 :: "r"(0), "r"(*(u_int64_t*)&pte));
	__asm __volatile("srlz.i;;");

	enter_kernel(fp->f_name, hdr->e_entry, bi);
}
Beispiel #2
0
static int
elf64_exec(struct preloaded_file *fp)
{
    struct file_metadata	*md;
    Elf_Ehdr		*hdr;
    pt_entry_t		pte;
    uint64_t		bi_addr;

    md = file_findmetadata(fp, MODINFOMD_ELFHDR);
    if (md == NULL)
        return (EINVAL);
    hdr = (Elf_Ehdr *)&(md->md_data);

    bi_load(fp, &bi_addr);

    printf("Entering %s at 0x%lx...\n", fp->f_name, hdr->e_entry);

    ldr_enter(fp->f_name);

    __asm __volatile("rsm psr.ic|psr.i;;");
    __asm __volatile("srlz.i;;");

    /*
     * Region 6 is direct mapped UC and region 7 is direct mapped
     * WC. The details of this is controlled by the Alt {I,D}TLB
     * handlers. Here we just make sure that they have the largest
     * possible page size to minimise TLB usage.
     */
    ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
    ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));

    pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
          PTE_PL_KERN | PTE_AR_RWX | PTE_ED;

    __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7)));
    __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2));
    __asm __volatile("ptr.i %0,%1" :: "r"(IA64_RR_BASE(7)), "r"(28<<2));
    __asm __volatile("ptr.d %0,%1" :: "r"(IA64_RR_BASE(7)), "r"(28<<2));
    __asm __volatile("srlz.i;;");
    __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(pte));
    __asm __volatile("srlz.i;;");
    __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(pte));
    __asm __volatile("srlz.i;;");

    enter_kernel(hdr->e_entry, bi_addr);

    /* NOTREACHED */
    return (0);
}
Beispiel #3
0
/*
 * allow user processes to MMAP some memory sections
 * instead of going through read/write
 */
int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int prot, vm_memattr_t *memattr)
{
	void *ptr;
	u_long limit;
	int error;

	/*
	 * /dev/mem is the only one that makes sense through this
	 * interface.  For /dev/kmem any physaddr we return here
	 * could be transient and hence incorrect or invalid at
	 * a later time.
	 */
	if (dev2unit(dev) != CDEV_MINOR_MEM)
		return (ENXIO);

	error = mem_phys2virt(offset, prot, &ptr, &limit);
	if (error)
		return (error);

	*paddr = offset;
	*memattr = ((uintptr_t)ptr >= IA64_RR_BASE(7)) ?
	    VM_MEMATTR_WRITE_BACK : VM_MEMATTR_UNCACHEABLE;
	return (0);
}
static void *
va2pa(vm_offset_t va, size_t *len)
{
	uint64_t pa;

	if (va >= IA64_RR_BASE(7)) {
		pa = IA64_RR_MASK(va);
		return ((void *)pa);
	}

	printf("\n%s: va=%lx, *len=%lx\n", __func__, va, *len);
	*len = 0;
	return (NULL);
}
Beispiel #5
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	off_t ofs;
	vm_offset_t addr;
	void *ptr;
	u_long limit;
	int count, error, phys, rw;

	error = 0;
	rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		ofs = uio->uio_offset;

		phys = (dev2unit(dev) == CDEV_MINOR_MEM) ? 1 : 0;
		if (phys == 0 && ofs >= IA64_RR_BASE(6)) {
			ofs = IA64_RR_MASK(ofs);
			phys++;
		}

		if (phys) {
			error = mem_phys2virt(ofs, rw, &ptr, &limit);
			if (error)
				return (error);

			count = min(uio->uio_resid, limit);
			error = uiomove(ptr, count, uio);
		} else {
			ptr = (void *)ofs;
			count = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			limit = round_page(ofs + count);
			addr = trunc_page(ofs);
			if (addr < VM_MAXUSER_ADDRESS)
				return (EINVAL);
			for (; addr < limit; addr += PAGE_SIZE) {
				if (pmap_kextract(addr) == 0)
					return (EFAULT);
			}
			if (!kernacc(ptr, count, rw))
				return (EFAULT);
			error = uiomove(ptr, count, uio);
		}
		/* else panic! */
	}
	return (error);
}
Beispiel #6
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	vm_offset_t addr, eaddr, o, v;
	int c, error, rw;

	error = 0;
	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			/* Allow reads only in RAM. */
			rw = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;
			if ((ia64_pa_access(v) & rw) != rw) {
				error = EFAULT;
				c = 0;
				break;
			}

			o = uio->uio_offset & PAGE_MASK;
			c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
			error = uiomove((caddr_t)IA64_PHYS_TO_RR7(v), c, uio);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= IA64_RR_BASE(6)) {
				v = IA64_RR_MASK(v);
				goto kmemphys;
			}

			c = min(iov->iov_len, MAXPHYS);

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);
			for (; addr < eaddr; addr += PAGE_SIZE) {
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);
			}
			if (!kernacc((caddr_t)v, c, (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE))
				return (EFAULT);
			error = uiomove((caddr_t)v, c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}