Esempio n. 1
0
/*
 * fp_open:
 *
 *	Open a file as specified.  Use O_* flags for flags.
 *
 *	vn_open() asserts that the cred must match the process's cred.
 *
 *	NOTE! when fp_open() is called from a pure thread, root creds are
 *	used.
 */
int
fp_open(const char *path, int flags, int mode, file_t *fpp)
{
    struct nlookupdata nd;
    struct thread *td;
    struct file *fp;
    int error;

    if ((error = falloc(NULL, fpp, NULL)) != 0)
	return (error);
    fp = *fpp;
    td = curthread;
    if (td->td_proc)
	fsetcred(fp, td->td_proc->p_ucred);
    error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_LOCKVP);
    flags = FFLAGS(flags);
    if (error == 0)
	error = vn_open(&nd, fp, flags, mode);
    nlookup_done(&nd);
    if (error) {
	fdrop(fp);
	*fpp = NULL;
    }
    return(error);
}
Esempio n. 2
0
/*
 * MPALMOSTSAFE
 */
int
sys_otruncate(struct otruncate_args *uap)
{
    struct nlookupdata nd;
    int error;

    get_mplock();
    error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
    if (error == 0)
        error = kern_truncate(&nd, uap->length);
    nlookup_done(&nd);
    rel_mplock();
    return (error);
}
Esempio n. 3
0
/*
 * MPALMOSTSAFE
 */
int
sys_linux_execve(struct linux_execve_args *args)
{
	struct nlookupdata nd;
	struct image_args exec_args;
	char *path;
	int error;

	error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS);
	if (error)
		return (error);
#ifdef DEBUG
	if (ldebug(execve))
		kprintf(ARGS(execve, "%s"), path);
#endif
	get_mplock();
	error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
	bzero(&exec_args, sizeof(exec_args));
	if (error == 0) {
		error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE,
					args->argp, args->envp);
	}
	if (error == 0)
		error = kern_execve(&nd, &exec_args);
	nlookup_done(&nd);

	/*
	 * The syscall result is returned in registers to the new program.
	 * Linux will register %edx as an atexit function and we must be
	 * sure to set it to 0.  XXX
	 */
	if (error == 0) {
		args->sysmsg_result64 = 0;
		if (curproc->p_sysent == &elf_linux_sysvec)
   		  	error = emuldata_init(curproc, NULL, 0);
	}

	exec_free_args(&exec_args);
	linux_free_path(&path);

	if (error < 0) {
		/* We hit a lethal error condition.  Let's die now. */
		exit1(W_EXITCODE(0, SIGABRT));
		/* NOTREACHED */
	}
	rel_mplock();

	return(error);
}
Esempio n. 4
0
/*
 * MPALMOSTSAFE
 */
int
sys_ocreat(struct ocreat_args *uap)
{
    struct nlookupdata nd;
    int error;

    get_mplock();
    error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
    if (error == 0) {
        error = kern_open(&nd, O_WRONLY | O_CREAT | O_TRUNC,
                          uap->mode, &uap->sysmsg_iresult);
    }
    rel_mplock();
    return (error);
}
Esempio n. 5
0
/*
 * MPALMOSTSAFE
 */
int
sys_olstat(struct olstat_args *uap)
{
	struct nlookupdata nd;
	struct stat st;
	int error;

	get_mplock();
	error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
	if (error == 0) {
		error = kern_stat(&nd, &st);
		if (error == 0)
			error = compat_43_copyout_stat(&st, uap->ub);
		nlookup_done(&nd);
	}
	rel_mplock();
	return (error);
}
static int
hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly)
{
	int error;
	struct nlookupdata nd;

	/*
	 * Get the device vnode
	 */
	if (*devvpp == NULL) {
		error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
		if (error == 0)
			error = nlookup(&nd);
		if (error == 0)
			error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp);
		nlookup_done(&nd);
	} else {
		error = 0;
	}

	if (error == 0) {
		if (vn_isdisk(*devvpp, &error)) {
			error = vfs_mountedon(*devvpp);
		}
	}
	if (error == 0 && vcount(*devvpp) > 0)
		error = EBUSY;
	if (error == 0) {
		vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
		error = vinvalbuf(*devvpp, V_SAVE, 0, 0);
		if (error == 0) {
			error = VOP_OPEN(*devvpp,
					 (ronly ? FREAD : FREAD|FWRITE),
					 FSCRED, NULL);
		}
		vn_unlock(*devvpp);
	}
	if (error && *devvpp) {
		vrele(*devvpp);
		*devvpp = NULL;
	}
	return (error);
}
Esempio n. 7
0
/*
 * nlookup_init() for "at" family of syscalls.
 *
 * Works similarly to nlookup_init() but if path is relative and fd is not
 * AT_FDCWD, path is interpreted relative to the directory pointed to by fd.
 * In this case, the file entry pointed to by fd is ref'ed and returned in
 * *fpp. 
 *
 * If the call succeeds, nlookup_done_at() must be called to clean-up the nd
 * and release the ref to the file entry.
 */
int
nlookup_init_at(struct nlookupdata *nd, struct file **fpp, int fd, 
		const char *path, enum uio_seg seg, int flags)
{
	struct thread *td = curthread;
	struct file* fp;
	struct vnode *vp;
	int error;

	*fpp = NULL;

	if  ((error = nlookup_init(nd, path, seg, flags)) != 0) {
		return (error);
	}

	if (nd->nl_path[0] != '/' && fd != AT_FDCWD) {
		if ((error = holdvnode(td, fd, &fp)) != 0)
			goto done;
		vp = (struct vnode*)fp->f_data;
		if (vp->v_type != VDIR || fp->f_nchandle.ncp == NULL) {
			fdrop(fp);
			fp = NULL;
			error = ENOTDIR;
			goto done;
		}
		if (nd->nl_flags & NLC_NCDIR) {
			cache_drop_ncdir(&nd->nl_nch);
			nd->nl_flags &= ~NLC_NCDIR;
		} else {
			cache_drop(&nd->nl_nch);
		}
		cache_copy(&fp->f_nchandle, &nd->nl_nch);
		*fpp = fp;
	}


done:
	if (error)
		nlookup_done(nd);
	return (error);

}
Esempio n. 8
0
/*
 * Simple all-in-one nlookup.  Returns a locked namecache structure or NULL
 * if an error occured. 
 *
 * Note that the returned ncp is not checked for permissions, though VEXEC
 * is checked on the directory path leading up to the result.  The caller
 * must call naccess() to check the permissions of the returned leaf.
 */
struct nchandle
nlookup_simple(const char *str, enum uio_seg seg,
	       int niflags, int *error)
{
    struct nlookupdata nd;
    struct nchandle nch;

    *error = nlookup_init(&nd, str, seg, niflags);
    if (*error == 0) {
	    if ((*error = nlookup(&nd)) == 0) {
		    nch = nd.nl_nch;	/* keep hold ref from structure */
		    cache_zero(&nd.nl_nch); /* and NULL out */
	    } else {
		    cache_zero(&nch);
	    }
	    nlookup_done(&nd);
    } else {
	    cache_zero(&nch);
    }
    return(nch);
}
Esempio n. 9
0
/*
 * stat_args(char *path, struct dfbsd12_stat *ub)
 *
 * Get file status; this version follows links.
 *
 * MPALMOSTSAFE
 */
int
sys_dfbsd12_stat(struct dfbsd12_stat_args *uap)
{
	struct nlookupdata nd;
	struct dfbsd12_stat ost;
	struct stat st;
	int error;

	get_mplock();
	error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
	if (error == 0) {
		error = kern_stat(&nd, &st);
		if (error == 0) {
			cvtstat(&ost, &st);
			error = copyout(&ost, uap->ub, sizeof(ost));
		}
	}
	nlookup_done(&nd);
	rel_mplock();
	return (error);
}
Esempio n. 10
0
				if (pmp->pm_flags & MSDOSFSMNT_NOWIN95)
					pmp->pm_flags |= MSDOSFSMNT_SHORTNAME;
			}
#endif
			/*
			 * Process export requests.
			 */
			return (vfs_export(mp, &pmp->pm_export, &args.export));
		}
	}
	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible block device.
	 */
	devvp = NULL;
	error = nlookup_init(&nd, args.fspec, UIO_USERSPACE, NLC_FOLLOW);
	if (error == 0)
		error = nlookup(&nd);
	if (error == 0)
		error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
	nlookup_done(&nd);
	if (error)
		return (error);

	if (!vn_isdisk(devvp, &error)) {
		vrele(devvp);
		return (error);
	}
	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
Esempio n. 11
0
static int
link_elf_load_file(const char* filename, linker_file_t* result)
{
    struct nlookupdata nd;
    struct thread *td = curthread;	/* XXX */
    struct proc *p = td->td_proc;
    struct vnode *vp;
    Elf_Ehdr *hdr;
    caddr_t firstpage;
    int nbytes, i;
    Elf_Phdr *phdr;
    Elf_Phdr *phlimit;
    Elf_Phdr *segs[2];
    int nsegs;
    Elf_Phdr *phdyn;
    caddr_t mapbase;
    size_t mapsize;
    Elf_Addr base_vaddr;
    Elf_Addr base_vlimit;
    int error = 0;
    int resid;
    elf_file_t ef;
    linker_file_t lf;
    char *pathname;
    Elf_Shdr *shdr;
    int symtabindex;
    int symstrindex;
    int symcnt;
    int strcnt;

    /* XXX Hack for firmware loading where p == NULL */
    if (p == NULL) {
	p = &proc0;
    }

    KKASSERT(p != NULL);
    if (p->p_ucred == NULL) {
	kprintf("link_elf_load_file: cannot load '%s' from filesystem"
		" this early\n", filename);
	return ENOENT;
    }
    shdr = NULL;
    lf = NULL;
    pathname = linker_search_path(filename);
    if (pathname == NULL)
	return ENOENT;

    error = nlookup_init(&nd, pathname, UIO_SYSSPACE, NLC_FOLLOW|NLC_LOCKVP);
    if (error == 0)
	error = vn_open(&nd, NULL, FREAD, 0);
    kfree(pathname, M_LINKER);
    if (error) {
	nlookup_done(&nd);
	return error;
    }
    vp = nd.nl_open_vp;
    nd.nl_open_vp = NULL;
    nlookup_done(&nd);

    /*
     * Read the elf header from the file.
     */
    firstpage = kmalloc(PAGE_SIZE, M_LINKER, M_WAITOK);
    hdr = (Elf_Ehdr *)firstpage;
    error = vn_rdwr(UIO_READ, vp, firstpage, PAGE_SIZE, 0,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    nbytes = PAGE_SIZE - resid;
    if (error)
	goto out;

    if (!IS_ELF(*hdr)) {
	error = ENOEXEC;
	goto out;
    }

    if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS
      || hdr->e_ident[EI_DATA] != ELF_TARG_DATA) {
	link_elf_error("Unsupported file layout");
	error = ENOEXEC;
	goto out;
    }
    if (hdr->e_ident[EI_VERSION] != EV_CURRENT
      || hdr->e_version != EV_CURRENT) {
	link_elf_error("Unsupported file version");
	error = ENOEXEC;
	goto out;
    }
    if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) {
	error = ENOSYS;
	goto out;
    }
    if (hdr->e_machine != ELF_TARG_MACH) {
	link_elf_error("Unsupported machine");
	error = ENOEXEC;
	goto out;
    }

    /*
     * We rely on the program header being in the first page.  This is
     * not strictly required by the ABI specification, but it seems to
     * always true in practice.  And, it simplifies things considerably.
     */
    if (!((hdr->e_phentsize == sizeof(Elf_Phdr)) &&
	  (hdr->e_phoff + hdr->e_phnum*sizeof(Elf_Phdr) <= PAGE_SIZE) &&
	  (hdr->e_phoff + hdr->e_phnum*sizeof(Elf_Phdr) <= nbytes)))
	link_elf_error("Unreadable program headers");

    /*
     * Scan the program header entries, and save key information.
     *
     * We rely on there being exactly two load segments, text and data,
     * in that order.
     */
    phdr = (Elf_Phdr *) (firstpage + hdr->e_phoff);
    phlimit = phdr + hdr->e_phnum;
    nsegs = 0;
    phdyn = NULL;
    while (phdr < phlimit) {
	switch (phdr->p_type) {

	case PT_LOAD:
	    if (nsegs == 2) {
		link_elf_error("Too many sections");
		error = ENOEXEC;
		goto out;
	    }
	    segs[nsegs] = phdr;
	    ++nsegs;
	    break;

	case PT_PHDR:
	    break;

	case PT_DYNAMIC:
	    phdyn = phdr;
	    break;

	case PT_INTERP:
	    error = ENOSYS;
	    goto out;
	}

	++phdr;
    }
    if (phdyn == NULL) {
	link_elf_error("Object is not dynamically-linked");
	error = ENOEXEC;
	goto out;
    }

    /*
     * Allocate the entire address space of the object, to stake out our
     * contiguous region, and to establish the base address for relocation.
     */
    base_vaddr = trunc_page(segs[0]->p_vaddr);
    base_vlimit = round_page(segs[1]->p_vaddr + segs[1]->p_memsz);
    mapsize = base_vlimit - base_vaddr;

    ef = kmalloc(sizeof(struct elf_file), M_LINKER, M_WAITOK | M_ZERO);
    ef->address = kmalloc(mapsize, M_LINKER, M_WAITOK);
    mapbase = ef->address;

    /*
     * Read the text and data sections and zero the bss.
     */
    for (i = 0; i < 2; i++) {
	caddr_t segbase = mapbase + segs[i]->p_vaddr - base_vaddr;
	error = vn_rdwr(UIO_READ, vp,
			segbase, segs[i]->p_filesz, segs[i]->p_offset,
			UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
	if (error) {
	    kfree(ef->address, M_LINKER);
	    kfree(ef, M_LINKER);
	    goto out;
	}
	bzero(segbase + segs[i]->p_filesz,
	      segs[i]->p_memsz - segs[i]->p_filesz);
    }

    ef->dynamic = (const Elf_Dyn *) (mapbase + phdyn->p_vaddr - base_vaddr);

    lf = linker_make_file(filename, ef, &link_elf_file_ops);
    if (lf == NULL) {
	kfree(ef->address, M_LINKER);
	kfree(ef, M_LINKER);
	error = ENOMEM;
	goto out;
    }
    lf->address = ef->address;
    lf->size = mapsize;

    error = parse_dynamic(lf);
    if (error)
	goto out;
    link_elf_reloc_local(lf);
    error = linker_load_dependencies(lf);
    if (error)
	goto out;
    error = relocate_file(lf);
    if (error)
	goto out;

    /* Try and load the symbol table if it's present.  (you can strip it!) */
    nbytes = hdr->e_shnum * hdr->e_shentsize;
    if (nbytes == 0 || hdr->e_shoff == 0)
	goto nosyms;
    shdr = kmalloc(nbytes, M_LINKER, M_WAITOK | M_ZERO);
    error = vn_rdwr(UIO_READ, vp,
		    (caddr_t)shdr, nbytes, hdr->e_shoff,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    if (error)
	goto out;
    symtabindex = -1;
    symstrindex = -1;
    for (i = 0; i < hdr->e_shnum; i++) {
	if (shdr[i].sh_type == SHT_SYMTAB) {
	    symtabindex = i;
	    symstrindex = shdr[i].sh_link;
	}
    }
    if (symtabindex < 0 || symstrindex < 0)
	goto nosyms;

    symcnt = shdr[symtabindex].sh_size;
    ef->symbase = kmalloc(symcnt, M_LINKER, M_WAITOK);
    strcnt = shdr[symstrindex].sh_size;
    ef->strbase = kmalloc(strcnt, M_LINKER, M_WAITOK);
    error = vn_rdwr(UIO_READ, vp,
		    ef->symbase, symcnt, shdr[symtabindex].sh_offset,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    if (error)
	goto out;
    error = vn_rdwr(UIO_READ, vp,
		    ef->strbase, strcnt, shdr[symstrindex].sh_offset,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    if (error)
	goto out;

    ef->ddbsymcnt = symcnt / sizeof(Elf_Sym);
    ef->ddbsymtab = (const Elf_Sym *)ef->symbase;
    ef->ddbstrcnt = strcnt;
    ef->ddbstrtab = ef->strbase;

nosyms:

    *result = lf;

out:
    if (error && lf)
	linker_file_unload(lf);
    if (shdr)
	kfree(shdr, M_LINKER);
    if (firstpage)
	kfree(firstpage, M_LINKER);
    vn_unlock(vp);
    vn_close(vp, FREAD, NULL);

    return error;
}
Esempio n. 12
0
static int
link_elf_obj_load_file(const char *filename, linker_file_t * result)
{
	struct nlookupdata nd;
	struct thread  *td = curthread;	/* XXX */
	struct proc    *p = td->td_proc;
	char           *pathname;
	struct vnode   *vp;
	Elf_Ehdr       *hdr;
	Elf_Shdr       *shdr;
	Elf_Sym        *es;
	int		nbytes, i, j;
	vm_offset_t	mapbase;
	size_t		mapsize;
	int		error = 0;
	int		resid;
	elf_file_t	ef;
	linker_file_t	lf;
	int		symtabindex;
	int		symstrindex;
	int		shstrindex;
	int		nsym;
	int		pb, rl, ra;
	int		alignmask;

	/* XXX Hack for firmware loading where p == NULL */
	if (p == NULL) {
		p = &proc0;
	}

	KKASSERT(p != NULL);
	if (p->p_ucred == NULL) {
		kprintf("link_elf_obj_load_file: cannot load '%s' from filesystem"
			" this early\n", filename);
		return ENOENT;
	}
	shdr = NULL;
	lf = NULL;
	mapsize = 0;
	hdr = NULL;
	pathname = linker_search_path(filename);
	if (pathname == NULL)
		return ENOENT;

	error = nlookup_init(&nd, pathname, UIO_SYSSPACE, NLC_FOLLOW | NLC_LOCKVP);
	if (error == 0)
		error = vn_open(&nd, NULL, FREAD, 0);
	kfree(pathname, M_LINKER);
	if (error) {
		nlookup_done(&nd);
		return error;
	}
	vp = nd.nl_open_vp;
	nd.nl_open_vp = NULL;
	nlookup_done(&nd);

	/*
	 * Read the elf header from the file.
	 */
	hdr = kmalloc(sizeof(*hdr), M_LINKER, M_WAITOK);
	error = vn_rdwr(UIO_READ, vp, (void *)hdr, sizeof(*hdr), 0,
			UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
	if (error)
		goto out;
	if (resid != 0) {
		error = ENOEXEC;
		goto out;
	}
	if (!IS_ELF(*hdr)) {
		error = ENOEXEC;
		goto out;
	}

	if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS
	    || hdr->e_ident[EI_DATA] != ELF_TARG_DATA) {
		link_elf_obj_error(filename, "Unsupported file layout");
		error = ENOEXEC;
		goto out;
	}
	if (hdr->e_ident[EI_VERSION] != EV_CURRENT
	    || hdr->e_version != EV_CURRENT) {
		link_elf_obj_error(filename, "Unsupported file version");
		error = ENOEXEC;
		goto out;
	}
	if (hdr->e_type != ET_REL) {
		error = ENOSYS;
		goto out;
	}
	if (hdr->e_machine != ELF_TARG_MACH) {
		link_elf_obj_error(filename, "Unsupported machine");
		error = ENOEXEC;
		goto out;
	}

	ef = kmalloc(sizeof(struct elf_file), M_LINKER, M_WAITOK | M_ZERO);
	lf = linker_make_file(filename, ef, &link_elf_obj_file_ops);
	if (lf == NULL) {
		kfree(ef, M_LINKER);
		error = ENOMEM;
		goto out;
	}
	ef->nprogtab = 0;
	ef->e_shdr = NULL;
	ef->nreltab = 0;
	ef->nrelatab = 0;

	/* Allocate and read in the section header */
	nbytes = hdr->e_shnum * hdr->e_shentsize;
	if (nbytes == 0 || hdr->e_shoff == 0 ||
	    hdr->e_shentsize != sizeof(Elf_Shdr)) {
		error = ENOEXEC;
		goto out;
	}
	shdr = kmalloc(nbytes, M_LINKER, M_WAITOK);
	ef->e_shdr = shdr;
	error = vn_rdwr(UIO_READ, vp, (caddr_t) shdr, nbytes, hdr->e_shoff,
			UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
	if (error)
		goto out;
	if (resid) {
		error = ENOEXEC;
		goto out;
	}
	/* Scan the section header for information and table sizing. */
	nsym = 0;
	symtabindex = -1;
	symstrindex = -1;
	for (i = 0; i < hdr->e_shnum; i++) {
		if (shdr[i].sh_size == 0)
			continue;
		switch (shdr[i].sh_type) {
		case SHT_PROGBITS:
		case SHT_NOBITS:
			ef->nprogtab++;
			break;
		case SHT_SYMTAB:
			nsym++;
			symtabindex = i;
			symstrindex = shdr[i].sh_link;
			break;
		case SHT_REL:
			ef->nreltab++;
			break;
		case SHT_RELA:
			ef->nrelatab++;
			break;
		case SHT_STRTAB:
			break;
		}
	}
	if (ef->nprogtab == 0) {
		link_elf_obj_error(filename, "file has no contents");
		error = ENOEXEC;
		goto out;
	}
	if (nsym != 1) {
		/* Only allow one symbol table for now */
		link_elf_obj_error(filename, "file has no valid symbol table");
		error = ENOEXEC;
		goto out;
	}
	if (symstrindex < 0 || symstrindex > hdr->e_shnum ||
	    shdr[symstrindex].sh_type != SHT_STRTAB) {
		link_elf_obj_error(filename, "file has invalid symbol strings");
		error = ENOEXEC;
		goto out;
	}
	/* Allocate space for tracking the load chunks */
	if (ef->nprogtab != 0)
		ef->progtab = kmalloc(ef->nprogtab * sizeof(*ef->progtab),
				      M_LINKER, M_WAITOK | M_ZERO);
	if (ef->nreltab != 0)
		ef->reltab = kmalloc(ef->nreltab * sizeof(*ef->reltab),
				     M_LINKER, M_WAITOK | M_ZERO);
	if (ef->nrelatab != 0)
		ef->relatab = kmalloc(ef->nrelatab * sizeof(*ef->relatab),
				      M_LINKER, M_WAITOK | M_ZERO);
	if ((ef->nprogtab != 0 && ef->progtab == NULL) ||
	    (ef->nreltab != 0 && ef->reltab == NULL) ||
	    (ef->nrelatab != 0 && ef->relatab == NULL)) {
		error = ENOMEM;
		goto out;
	}
	if (symtabindex == -1)
		panic("lost symbol table index");
	/* Allocate space for and load the symbol table */
	ef->ddbsymcnt = shdr[symtabindex].sh_size / sizeof(Elf_Sym);
	ef->ddbsymtab = kmalloc(shdr[symtabindex].sh_size, M_LINKER, M_WAITOK);
	error = vn_rdwr(UIO_READ, vp, (void *)ef->ddbsymtab,
			shdr[symtabindex].sh_size, shdr[symtabindex].sh_offset,
			UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
	if (error)
		goto out;
	if (resid != 0) {
		error = EINVAL;
		goto out;
	}
	if (symstrindex == -1)
		panic("lost symbol string index");
	/* Allocate space for and load the symbol strings */
	ef->ddbstrcnt = shdr[symstrindex].sh_size;
	ef->ddbstrtab = kmalloc(shdr[symstrindex].sh_size, M_LINKER, M_WAITOK);
	error = vn_rdwr(UIO_READ, vp, ef->ddbstrtab,
			shdr[symstrindex].sh_size, shdr[symstrindex].sh_offset,
			UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
	if (error)
		goto out;
	if (resid != 0) {
		error = EINVAL;
		goto out;
	}
	/* Do we have a string table for the section names?  */
	shstrindex = -1;
	if (hdr->e_shstrndx != 0 &&
	    shdr[hdr->e_shstrndx].sh_type == SHT_STRTAB) {
		shstrindex = hdr->e_shstrndx;
		ef->shstrcnt = shdr[shstrindex].sh_size;
		ef->shstrtab = kmalloc(shdr[shstrindex].sh_size, M_LINKER,
				       M_WAITOK);
		error = vn_rdwr(UIO_READ, vp, ef->shstrtab,
				shdr[shstrindex].sh_size, shdr[shstrindex].sh_offset,
				UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
		if (error)
			goto out;
		if (resid != 0) {
			error = EINVAL;
			goto out;
		}
	}
	/* Size up code/data(progbits) and bss(nobits). */
	alignmask = 0;
	for (i = 0; i < hdr->e_shnum; i++) {
		if (shdr[i].sh_size == 0)
			continue;
		switch (shdr[i].sh_type) {
		case SHT_PROGBITS:
		case SHT_NOBITS:
			alignmask = shdr[i].sh_addralign - 1;
			mapsize += alignmask;
			mapsize &= ~alignmask;
			mapsize += shdr[i].sh_size;
			break;
		}
	}

	/*
	 * We know how much space we need for the text/data/bss/etc. This
	 * stuff needs to be in a single chunk so that profiling etc can get
	 * the bounds and gdb can associate offsets with modules
	 */
	ef->object = vm_object_allocate(OBJT_DEFAULT,
					round_page(mapsize) >> PAGE_SHIFT);
	if (ef->object == NULL) {
		error = ENOMEM;
		goto out;
	}
	vm_object_hold(ef->object);
	vm_object_reference_locked(ef->object);
	ef->address = (caddr_t) vm_map_min(&kernel_map);
	ef->bytes = 0;

	/*
	 * In order to satisfy x86_64's architectural requirements on the
	 * location of code and data in the kernel's address space, request a
	 * mapping that is above the kernel.
	 *
	 * vkernel64's text+data is outside the managed VM space entirely.
	 */
#if defined(__x86_64__) && defined(_KERNEL_VIRTUAL)
	error = vkernel_module_memory_alloc(&mapbase, round_page(mapsize));
	vm_object_drop(ef->object);
#else
	mapbase = KERNBASE;
	error = vm_map_find(&kernel_map, ef->object, NULL,
			    0, &mapbase, round_page(mapsize),
			    PAGE_SIZE,
			    TRUE, VM_MAPTYPE_NORMAL,
			    VM_PROT_ALL, VM_PROT_ALL, FALSE);
	vm_object_drop(ef->object);
	if (error) {
		vm_object_deallocate(ef->object);
		ef->object = NULL;
		goto out;
	}
	/* Wire the pages */
	error = vm_map_wire(&kernel_map, mapbase,
			    mapbase + round_page(mapsize), 0);
#endif
	if (error != KERN_SUCCESS) {
		error = ENOMEM;
		goto out;
	}
	/* Inform the kld system about the situation */
	lf->address = ef->address = (caddr_t) mapbase;
	lf->size = round_page(mapsize);
	ef->bytes = mapsize;

	/*
	 * Now load code/data(progbits), zero bss(nobits), allocate space for
	 * and load relocs
	 */
	pb = 0;
	rl = 0;
	ra = 0;
	alignmask = 0;
	for (i = 0; i < hdr->e_shnum; i++) {
		if (shdr[i].sh_size == 0)
			continue;
		switch (shdr[i].sh_type) {
		case SHT_PROGBITS:
		case SHT_NOBITS:
			alignmask = shdr[i].sh_addralign - 1;
			mapbase += alignmask;
			mapbase &= ~alignmask;
			if (ef->shstrtab && shdr[i].sh_name != 0)
				ef->progtab[pb].name =
					ef->shstrtab + shdr[i].sh_name;
			else if (shdr[i].sh_type == SHT_PROGBITS)
				ef->progtab[pb].name = "<<PROGBITS>>";
			else
				ef->progtab[pb].name = "<<NOBITS>>";
#if 0
			if (ef->progtab[pb].name != NULL &&
			    !strcmp(ef->progtab[pb].name, "set_pcpu"))
				ef->progtab[pb].addr =
					dpcpu_alloc(shdr[i].sh_size);
#ifdef VIMAGE
			else if (ef->progtab[pb].name != NULL &&
				 !strcmp(ef->progtab[pb].name, VNET_SETNAME))
				ef->progtab[pb].addr =
					vnet_data_alloc(shdr[i].sh_size);
#endif
			else
#endif
				ef->progtab[pb].addr =
					(void *)(uintptr_t) mapbase;
			if (ef->progtab[pb].addr == NULL) {
				error = ENOSPC;
				goto out;
			}
			ef->progtab[pb].size = shdr[i].sh_size;
			ef->progtab[pb].sec = i;
			if (shdr[i].sh_type == SHT_PROGBITS) {
				error = vn_rdwr(UIO_READ, vp,
						ef->progtab[pb].addr,
						shdr[i].sh_size, shdr[i].sh_offset,
						UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred,
						&resid);
				if (error)
					goto out;
				if (resid != 0) {
					error = EINVAL;
					goto out;
				}
#if 0
				/* Initialize the per-cpu or vnet area. */
				if (ef->progtab[pb].addr != (void *)mapbase &&
				    !strcmp(ef->progtab[pb].name, "set_pcpu"))
					dpcpu_copy(ef->progtab[pb].addr,
						   shdr[i].sh_size);
#ifdef VIMAGE
				else if (ef->progtab[pb].addr !=
					 (void *)mapbase &&
					 !strcmp(ef->progtab[pb].name, VNET_SETNAME))
					vnet_data_copy(ef->progtab[pb].addr,
						       shdr[i].sh_size);
#endif
#endif
			} else
				bzero(ef->progtab[pb].addr, shdr[i].sh_size);

			/* Update all symbol values with the offset. */
			for (j = 0; j < ef->ddbsymcnt; j++) {
				es = &ef->ddbsymtab[j];
				if (es->st_shndx != i)
					continue;
				es->st_value += (Elf_Addr) ef->progtab[pb].addr;
			}
			mapbase += shdr[i].sh_size;
			pb++;
			break;
		case SHT_REL:
			ef->reltab[rl].rel = kmalloc(shdr[i].sh_size, M_LINKER, M_WAITOK);
			ef->reltab[rl].nrel = shdr[i].sh_size / sizeof(Elf_Rel);
			ef->reltab[rl].sec = shdr[i].sh_info;
			error = vn_rdwr(UIO_READ, vp,
					(void *)ef->reltab[rl].rel,
					shdr[i].sh_size, shdr[i].sh_offset,
					UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
			if (error)
				goto out;
			if (resid != 0) {
				error = EINVAL;
				goto out;
			}
			rl++;
			break;
		case SHT_RELA:
			ef->relatab[ra].rela = kmalloc(shdr[i].sh_size, M_LINKER, M_WAITOK);
			ef->relatab[ra].nrela = shdr[i].sh_size / sizeof(Elf_Rela);
			ef->relatab[ra].sec = shdr[i].sh_info;
			error = vn_rdwr(UIO_READ, vp,
					(void *)ef->relatab[ra].rela,
					shdr[i].sh_size, shdr[i].sh_offset,
					UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
			if (error)
				goto out;
			if (resid != 0) {
				error = EINVAL;
				goto out;
			}
			ra++;
			break;
		}
	}
	if (pb != ef->nprogtab)
		panic("lost progbits");
	if (rl != ef->nreltab)
		panic("lost reltab");
	if (ra != ef->nrelatab)
		panic("lost relatab");
	if (mapbase != (vm_offset_t) ef->address + mapsize)
		panic("mapbase 0x%lx != address %p + mapsize 0x%lx (0x%lx)",
		      mapbase, ef->address, mapsize,
		      (vm_offset_t) ef->address + mapsize);

	/* Local intra-module relocations */
	link_elf_obj_reloc_local(lf);

	/* Pull in dependencies */
	error = linker_load_dependencies(lf);
	if (error)
		goto out;

	/* External relocations */
	error = relocate_file(lf);
	if (error)
		goto out;

	*result = lf;

out:
	if (error && lf)
		linker_file_unload(lf /*, LINKER_UNLOAD_FORCE */);
	if (hdr)
		kfree(hdr, M_LINKER);
	vn_unlock(vp);
	vn_close(vp, FREAD, NULL);

	return error;
}
static int
ntfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
	size_t		size;
	int		error;
	struct vnode	*devvp;
	struct ntfs_args args;
	struct nlookupdata nd;
	struct vnode *rootvp;

	error = 0;
	/*
	 * Use NULL path to flag a root mount
	 */
	if( path == NULL) {
		/*
		 ***
		 * Mounting root file system
		 ***
		 */

		/* Get vnode for root device*/
		if( bdevvp( rootdev, &rootvp))
			panic("ffs_mountroot: can't setup bdevvp for root");

		/*
		 * FS specific handling
		 */
		mp->mnt_flag |= MNT_RDONLY;	/* XXX globally applicable?*/

		/*
		 * Attempt mount
		 */
		if( ( error = ntfs_mountfs(rootvp, mp, &args, cred)) != 0) {
			/* fs specific cleanup (if any)*/
			goto error_1;
		}

		goto dostatfs;		/* success*/

	}

	/*
	 ***
	 * Mounting non-root file system or updating a file system
	 ***
	 */

	/* copy in user arguments*/
	error = copyin(data, (caddr_t)&args, sizeof (struct ntfs_args));
	if (error)
		goto error_1;		/* can't get arguments*/

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		/* if not updating name...*/
		if (args.fspec == NULL) {
			/*
			 * Process export requests.  Jumping to "success"
			 * will return the vfs_export() error code.
			 */
			struct ntfsmount *ntm = VFSTONTFS(mp);
			error = vfs_export(mp, &ntm->ntm_export, &args.export);
			goto success;
		}

		kprintf("ntfs_mount(): MNT_UPDATE not supported\n");
		error = EINVAL;
		goto error_1;
	}

	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible block device.
	 */
	devvp = NULL;
	error = nlookup_init(&nd, args.fspec, UIO_USERSPACE, NLC_FOLLOW);
	if (error == 0)
		error = nlookup(&nd);
	if (error == 0)
		error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
	nlookup_done(&nd);
	if (error)
		goto error_1;

	if (!vn_isdisk(devvp, &error))
		goto error_2;

	if (mp->mnt_flag & MNT_UPDATE) {
#if 0
		/*
		 ********************
		 * UPDATE
		 ********************
		 */

		if (devvp != ntmp->um_devvp)
			error = EINVAL;	/* needs translation */
		else
			vrele(devvp);
		/*
		 * Update device name only on success
		 */
		if( !error) {
			/* Save "mounted from" info for mount point (NULL pad)*/
			copyinstr(	args.fspec,
					mp->mnt_stat.f_mntfromname,
					MNAMELEN - 1,
					&size);
			bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
		}
#endif
	} else {
		/*
		 ********************
		 * NEW MOUNT
		 ********************
		 */

		/* Save "mounted from" info for mount point (NULL pad)*/
		copyinstr(	args.fspec,			/* device name*/
				mp->mnt_stat.f_mntfromname,	/* save area*/
				MNAMELEN - 1,			/* max size*/
				&size);				/* real size*/
		bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);

		error = ntfs_mountfs(devvp, mp, &args, cred);
	}
	if (error) {
		goto error_2;
	}

dostatfs:
	/*
	 * Initialize FS stat information in mount struct; uses
	 * mp->mnt_stat.f_mntfromname.
	 *
	 * This code is common to root and non-root mounts
	 */
	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);

	goto success;


error_2:	/* error with devvp held*/

	/* release devvp before failing*/
	vrele(devvp);

error_1:	/* no state to back out*/

success:
	return(error);
}
Esempio n. 14
0
int
ufs_quotaon(struct ucred *cred, struct mount *mp, int type, caddr_t fname)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	struct vnode *vp, **vpp;
	struct ufs_dquot *dq;
	int error;
	struct nlookupdata nd;
	struct scaninfo scaninfo;

	vpp = &ump->um_quotas[type];
	error = nlookup_init(&nd, fname, UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
	if (error == 0)
		error = vn_open(&nd, NULL, FREAD|FWRITE, 0);
	if (error == 0 && nd.nl_open_vp->v_type != VREG)
		error = EACCES;
	if (error) {
		nlookup_done(&nd);
		return (error);
	}
	vp = nd.nl_open_vp;
	nd.nl_open_vp = NULL;
	nlookup_done(&nd);

	vn_unlock(vp);
	if (*vpp != vp)
		ufs_quotaoff(mp, type);
	ump->um_qflags[type] |= QTF_OPENING;
	mp->mnt_flag |= MNT_QUOTA;
	vsetflags(vp, VSYSTEM);
	*vpp = vp;
	/* XXX release duplicate vp if *vpp == vp? */
	/*
	 * Save the credential of the process that turned on quotas.
	 * Set up the time limits for this quota.
	 */
	ump->um_cred[type] = crhold(cred);
	ump->um_btime[type] = MAX_DQ_TIME;
	ump->um_itime[type] = MAX_IQ_TIME;
	if (ufs_dqget(NULLVP, 0, ump, type, &dq) == 0) {
		if (dq->dq_btime > 0)
			ump->um_btime[type] = dq->dq_btime;
		if (dq->dq_itime > 0)
			ump->um_itime[type] = dq->dq_itime;
		ufs_dqrele(NULLVP, dq);
	}
	/*
	 * Search vnodes associated with this mount point,
	 * adding references to quota file being opened.
	 * NB: only need to add dquot's for inodes being modified.
	 */
	scaninfo.rescan = 1;
	while (scaninfo.rescan) {
		scaninfo.rescan = 0;
		error = vmntvnodescan(mp, VMSC_GETVP,
					NULL, ufs_quotaon_scan, &scaninfo);
		if (error)
			break;
	}
	ump->um_qflags[type] &= ~QTF_OPENING;
	if (error)
		ufs_quotaoff(mp, type);
	return (error);
}
Esempio n. 15
0
/************************************************************************
 *				VOLUMES					*
 ************************************************************************
 *
 * Load a HAMMER volume by name.  Returns 0 on success or a positive error
 * code on failure.  Volumes must be loaded at mount time, get_volume() will
 * not load a new volume.
 *
 * The passed devvp is vref()'d but not locked.  This function consumes the
 * ref (typically by associating it with the volume structure).
 *
 * Calls made to hammer_load_volume() or single-threaded
 */
int
hammer_install_volume(struct hammer_mount *hmp, const char *volname,
		      struct vnode *devvp)
{
	struct mount *mp;
	hammer_volume_t volume;
	struct hammer_volume_ondisk *ondisk;
	struct nlookupdata nd;
	struct buf *bp = NULL;
	int error;
	int ronly;
	int setmp = 0;

	mp = hmp->mp;
	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);

	/*
	 * Allocate a volume structure
	 */
	++hammer_count_volumes;
	volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
	volume->vol_name = kstrdup(volname, hmp->m_misc);
	volume->io.hmp = hmp;	/* bootstrap */
	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
	volume->io.offset = 0LL;
	volume->io.bytes = HAMMER_BUFSIZE;

	/*
	 * Get the device vnode
	 */
	if (devvp == NULL) {
		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
		if (error == 0)
			error = nlookup(&nd);
		if (error == 0)
			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
		nlookup_done(&nd);
	} else {
		error = 0;
		volume->devvp = devvp;
	}

	if (error == 0) {
		if (vn_isdisk(volume->devvp, &error)) {
			error = vfs_mountedon(volume->devvp);
		}
	}
	if (error == 0 && vcount(volume->devvp) > 0)
		error = EBUSY;
	if (error == 0) {
		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
		if (error == 0) {
			error = VOP_OPEN(volume->devvp, 
					 (ronly ? FREAD : FREAD|FWRITE),
					 FSCRED, NULL);
		}
		vn_unlock(volume->devvp);
	}
	if (error) {
		hammer_free_volume(volume);
		return(error);
	}
	volume->devvp->v_rdev->si_mountpoint = mp;
	setmp = 1;

	/*
	 * Extract the volume number from the volume header and do various
	 * sanity checks.
	 */
	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
	if (error)
		goto late_failure;
	ondisk = (void *)bp->b_data;
	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
		kprintf("hammer_mount: volume %s has an invalid header\n",
			volume->vol_name);
		error = EFTYPE;
		goto late_failure;
	}
	volume->vol_no = ondisk->vol_no;
	volume->buffer_base = ondisk->vol_buf_beg;
	volume->vol_flags = ondisk->vol_flags;
	volume->nblocks = ondisk->vol_nblocks; 
	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
	volume->maxraw_off = ondisk->vol_buf_end;

	if (RB_EMPTY(&hmp->rb_vols_root)) {
		hmp->fsid = ondisk->vol_fsid;
	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
		kprintf("hammer_mount: volume %s's fsid does not match "
			"other volumes\n", volume->vol_name);
		error = EFTYPE;
		goto late_failure;
	}

	/*
	 * Insert the volume structure into the red-black tree.
	 */
	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
		kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
			volume->vol_name, volume->vol_no);
		error = EEXIST;
	}

	/*
	 * Set the root volume .  HAMMER special cases rootvol the structure.
	 * We do not hold a ref because this would prevent related I/O
	 * from being flushed.
	 */
	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
		hmp->rootvol = volume;
		hmp->nvolumes = ondisk->vol_count;
		if (bp) {
			brelse(bp);
			bp = NULL;
		}
		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
	}
late_failure:
	if (bp)
		brelse(bp);
	if (error) {
		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
		if (setmp)
			volume->devvp->v_rdev->si_mountpoint = NULL;
		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
		vn_unlock(volume->devvp);
		hammer_free_volume(volume);
	}
	return (error);
}
Esempio n. 16
0
	/*
	 * XXX: Should we process mount export info ?
	 * If not, returning zero here is enough as the actual ro/rw update is
	 * being done in sys_mount().
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		xmp = MOUNTTONULLMOUNT(mp);
		error = vfs_export(mp, &xmp->export, &args.export);
		return (error);
	}

	/*
	 * Find lower node
	 */
	rootvp = NULL;
	error = nlookup_init(&nd, args.target, UIO_USERSPACE, NLC_FOLLOW);
	if (error)
		goto fail1;
	error = nlookup(&nd);
	if (error)
		goto fail2;
	error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &rootvp);
	if (error)
		goto fail2;

	xmp = (struct null_mount *) kmalloc(sizeof(struct null_mount),
				M_NULLFSMNT, M_WAITOK | M_ZERO);

	/*
	 * Save reference to underlying FS
	 *