Esempio n. 1
0
size_t character_currentAction(character_t* c, char* buffer, size_t n)
{
	size_t cur = 0;

	object_t* o = pool_get(&c->w->objects, c->go_o);
	universe_t* u = c->w->universe;
	char moving = c->go_x != c->o.x || c->go_y != c->o.y;
	if (o == NULL)
	{
		if (moving)
			cur += snprintf(buffer+cur, n-cur, "Marcher");
		else
			cur += snprintf(buffer+cur, n-cur, "Attendre");
	}
	else if (o->t == O_MINE)
	{
		mine_t* m = (mine_t*) o;

		if (moving)
		{
			cur += snprintf(buffer+cur, n-cur, "Aller à %s", m->t->name);
		}
		else
		{
			transform_t* tr = &m->t->harvest;

			if (tr->n_res == 0 || tr->res[0].is_item)
				return cur;

			int id = tr->res[0].id;
			kindOf_material_t* t = &u->materials[id];
			int skill = t->skill;

			float amount = c->inventory.materials[id];
			float max = character_maxOfMaterial(c, t);
			cur += snprintf(buffer+cur, n-cur, "%s (%.0f/%.0f)", u->skills[skill].name, floor(amount), floor(max));
		}
	}
	else if (o->t == O_CHARACTER)
	{
		character_t* t = (character_t*) o;
		const char* name = t->ai == NULL ? "ennemi" : t->ai->name;
		if (t == c)
			cur += snprintf(buffer+cur, n-cur, "S'admirer");
		else if (c->attack)
			cur += snprintf(buffer+cur, n-cur, "Attaquer %s", name);
		else
			cur += snprintf(buffer+cur, n-cur, "Suivre %s", name);
	}
	else if (o->t == O_BUILDING)
	{
		building_t* b = (building_t*) o;
		kindOf_building_t* t = b->t;
		const char* name = t->name;
		if (c->attack)
		{
			cur += snprintf(buffer+cur, n-cur, "Attaquer %s", name);
		}
		else if (moving)
		{
			cur += snprintf(buffer+cur, n-cur, "Se diriger vers %s", name);
		}
		else if (b->owner != c->o.uuid)
		{
			if (b->open)
				cur += snprintf(buffer+cur, n-cur, "Inspecter la marchandise");
			else
				cur += snprintf(buffer+cur, n-cur, "Attendre à l'abri des intempéries");
		}
		else if (b->build_progress != 1)
		{
			cur += snprintf(buffer+cur, n-cur, "%s", u->skills[SK_BUILD].name);
		}
		else if (b->work_n > 0)
		{
			transform_t* tr = &t->items[b->work_list[0]];

			if (tr->n_res == 0 || !tr->res[0].is_item)
				return cur;

			int id = tr->res[0].id;
			kindOf_item_t* it = &u->items[id];

			cur += snprintf(buffer+cur, 1024-cur, "%s (%i%%)", it->name, (int) floor(100*b->work_progress));
		}
		else if (t->make.n_res != 0)
		{
			transform_t* tr = &t->make;

			if (tr->n_res == 0 || tr->res[0].is_item)
				return cur;

			int id = tr->res[0].id;
			kindOf_material_t* t = &u->materials[id];
			int skill = t->skill;

			float amount = c->inventory.materials[id];
			float max = character_maxOfMaterial(c, t);
			cur += snprintf(buffer+cur, n-cur, "%s (%.0f/%.0f)", u->skills[skill].name, floor(amount), floor(max));
		}
		else
			cur += snprintf(buffer+cur, n-cur, "Se tourner les pources");
	}

	return cur;
}
Esempio n. 2
0
/* ARGSUSED */
int
sys_pipe(struct proc *p, void *v, register_t *retval)
{
	struct sys_pipe_args /* {
		syscallarg(int *) fdp;
	} */ *uap = v;
	struct filedesc *fdp = p->p_fd;
	struct file *rf, *wf;
	struct pipe *rpipe, *wpipe = NULL;
	int fds[2], error;

	rpipe = pool_get(&pipe_pool, PR_WAITOK);
	error = pipe_create(rpipe);
	if (error != 0)
		goto free1;
	wpipe = pool_get(&pipe_pool, PR_WAITOK);
	error = pipe_create(wpipe);
	if (error != 0)
		goto free1;

	fdplock(fdp);

	error = falloc(p, &rf, &fds[0]);
	if (error != 0)
		goto free2;
	rf->f_flag = FREAD | FWRITE;
	rf->f_type = DTYPE_PIPE;
	rf->f_data = rpipe;
	rf->f_ops = &pipeops;

	error = falloc(p, &wf, &fds[1]);
	if (error != 0)
		goto free3;
	wf->f_flag = FREAD | FWRITE;
	wf->f_type = DTYPE_PIPE;
	wf->f_data = wpipe;
	wf->f_ops = &pipeops;

	rpipe->pipe_peer = wpipe;
	wpipe->pipe_peer = rpipe;

	FILE_SET_MATURE(rf, p);
	FILE_SET_MATURE(wf, p);

	error = copyout(fds, SCARG(uap, fdp), sizeof(fds));
	if (error != 0) {
		fdrelease(p, fds[0]);
		fdrelease(p, fds[1]);
	}
	fdpunlock(fdp);
	return (error);

free3:
	fdremove(fdp, fds[0]);
	closef(rf, p);
	rpipe = NULL;
free2:
	fdpunlock(fdp);
free1:
	pipeclose(wpipe);
	pipeclose(rpipe);
	return (error);
}
Esempio n. 3
0
int
udf_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct buf *bp;
	struct vnode *devvp;
	struct umount *ump;
	struct proc *p;
	struct vnode *vp, *nvp;
	struct unode *up;
	struct extfile_entry *xfe;
	struct file_entry *fe;
	int error, sector, size;

	p = curproc;
	bp = NULL;
	*vpp = NULL;
	ump = VFSTOUDFFS(mp);

	/* See if we already have this in the cache */
	if ((error = udf_hashlookup(ump, ino, LK_EXCLUSIVE, vpp)) != 0)
		return (error);
	if (*vpp != NULL)
		return (0);

	/*
	 * Allocate memory and check the tag id's before grabbing a new
	 * vnode, since it's hard to roll back if there is a problem.
	 */
	up = pool_get(&unode_pool, PR_WAITOK | PR_ZERO);

	/*
	 * Copy in the file entry.  Per the spec, the size can only be 1 block.
	 */
	sector = ino;
	devvp = ump->um_devvp;
	udf_vat_map(ump, &sector);
	if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		pool_put(&unode_pool, up);
		if (bp != NULL)
			brelse(bp);
		return (error);
	}

	xfe = (struct extfile_entry *)bp->b_data;
	fe = (struct file_entry *)bp->b_data;
	error = udf_checktag(&xfe->tag, TAGID_EXTFENTRY);
	if (error == 0) {
		size = letoh32(xfe->l_ea) + letoh32(xfe->l_ad);
	} else {
		error = udf_checktag(&fe->tag, TAGID_FENTRY);
		if (error) {
			printf("Invalid file entry!\n");
			pool_put(&unode_pool, up);
			if (bp != NULL)
				brelse(bp);
			return (ENOMEM);
		} else
			size = letoh32(fe->l_ea) + letoh32(fe->l_ad);
	}

	/* Allocate max size of FE/XFE. */
	up->u_fentry = malloc(size + UDF_EXTFENTRY_SIZE, M_UDFFENTRY, M_NOWAIT | M_ZERO);
	if (up->u_fentry == NULL) {
		pool_put(&unode_pool, up);
		if (bp != NULL)
			brelse(bp);
		return (ENOMEM); /* Cannot allocate file entry block */
	}

	if (udf_checktag(&xfe->tag, TAGID_EXTFENTRY) == 0)
		bcopy(bp->b_data, up->u_fentry, size + UDF_EXTFENTRY_SIZE);
	else
		bcopy(bp->b_data, up->u_fentry, size + UDF_FENTRY_SIZE);
	
	brelse(bp);
	bp = NULL;

	if ((error = udf_allocv(mp, &vp, p))) {
		free(up->u_fentry, M_UDFFENTRY);
		pool_put(&unode_pool, up);
		return (error); /* Error from udf_allocv() */
	}

	up->u_vnode = vp;
	up->u_ino = ino;
	up->u_devvp = ump->um_devvp;
	up->u_dev = ump->um_dev;
	up->u_ump = ump;
	vp->v_data = up;
	vref(ump->um_devvp);

	lockinit(&up->u_lock, PINOD, "unode", 0, 0);

	/*
	 * udf_hashins() will lock the vnode for us.
	 */
	udf_hashins(up);

	switch (up->u_fentry->icbtag.file_type) {
	default:
		printf("Unrecognized file type (%d)\n", vp->v_type);
		vp->v_type = VREG;
		break;
	case UDF_ICB_FILETYPE_DIRECTORY:
		vp->v_type = VDIR;
		break;
	case UDF_ICB_FILETYPE_BLOCKDEVICE:
		vp->v_type = VBLK;
		break;
	case UDF_ICB_FILETYPE_CHARDEVICE:
		vp->v_type = VCHR;
		break;
	case UDF_ICB_FILETYPE_FIFO:
		vp->v_type = VFIFO;
		break;
	case UDF_ICB_FILETYPE_SOCKET:
		vp->v_type = VSOCK;
		break;
	case UDF_ICB_FILETYPE_SYMLINK:
		vp->v_type = VLNK;
		break;
	case UDF_ICB_FILETYPE_RANDOMACCESS:
	case UDF_ICB_FILETYPE_REALTIME:
	case UDF_ICB_FILETYPE_UNKNOWN:
		vp->v_type = VREG;
		break;
	}

	/* check if this is a vnode alias */
	if ((nvp = checkalias(vp, up->u_dev, ump->um_mountp)) != NULL) {
		printf("found a vnode alias\n");
		/*
		 * Discard unneeded vnode, but save its udf_node.
		 * Note that the lock is carried over in the udf_node
		 */
		nvp->v_data = vp->v_data;
		vp->v_data = NULL;
		vp->v_op = spec_vnodeop_p;
		vrele(vp);
		vgone(vp);
		/*
		 * Reinitialize aliased inode.
		 */
		vp = nvp;
		ump->um_devvp = vp;
	}

	*vpp = vp;

	return (0);
}
Esempio n. 4
0
/*
 * Look up a EXT2FS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ext2fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ext2fs_dinode *dp;
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	if (ino > (ufsino_t)-1)
		panic("ext2fs_vget: alien ino_t %llu",
		    (unsigned long long)ino);

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;

 retry:
	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_EXT2FS, mp, &ext2fs_vops, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}

	ip = pool_get(&ext2fs_inode_pool, PR_WAITOK|PR_ZERO);
	lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_ump = ump;
	ip->i_e2fs = fs = ump->um_e2fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_e2fs_last_lblk = 0;
	ip->i_e2fs_last_blk = 0;

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	error = ufs_ihashins(ip);

	if (error) {
		/*
		 * Inode has not been inserted into the chain, so make sure
		 * we don't try to remove it.
		 */
		ip->i_flag |= IN_UNHASHED;

		/*
		 * VOP_INACTIVE will treat this as a stale file
		 * and recycle it quickly
		 */
		vrele(vp);

		if (error == EEXIST)
			goto retry;

		return (error);
	}

	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, &bp);
	if (error) {
		/*
		 * The inode does not contain anything useful, so it would
	 	 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (error);
	}

	dp = (struct ext2fs_dinode *) ((char *)bp->b_data
	    + EXT2_DINODE_SIZE(fs) * ino_to_fsbo(fs, ino));
	
	ip->i_e2din = pool_get(&ext2fs_dinode_pool, PR_WAITOK);
	e2fs_iload(dp, ip->i_e2din);
	brelse(bp);

	ip->i_effnlink = ip->i_e2fs_nlink;

	/*
	 * The fields for storing the UID and GID of an ext2fs inode are
	 * limited to 16 bits. To overcome this limitation, Linux decided to
	 * scatter the highest bits of these values into a previously reserved
	 * area on the disk inode. We deal with this situation by having two
	 * 32-bit fields *out* of the disk inode to hold the complete values.
	 * Now that we are reading in the inode, compute these fields.
	 */
	ip->i_e2fs_uid = ip->i_e2fs_uid_low | (ip->i_e2fs_uid_high << 16);
	ip->i_e2fs_gid = ip->i_e2fs_gid_low | (ip->i_e2fs_gid_high << 16);

	/* If the inode was deleted, reset all fields */
	if (ip->i_e2fs_dtime != 0) {
		ip->i_e2fs_mode = ip->i_e2fs_nblock = 0;
		(void)ext2fs_setsize(ip, 0);
	}

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 * Note that the underlying vnode may have changed.
	 */
	error = ext2fs_vinit(mp, &ext2fs_specvops, EXT2FS_FIFOOPS, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}

	/*
	 * Finish inode initialization now that aliasing has been resolved.
	 */
	vref(ip->i_devvp);
	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */
	if (ip->i_e2fs_gen == 0) {
		if (++ext2gennumber < (u_long)time_second)
			ext2gennumber = time_second;
		ip->i_e2fs_gen = ext2gennumber;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}

	*vpp = vp;
	return (0);
}
Esempio n. 5
0
/*
 * Look up a FFS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct fs *fs;
	struct inode *ip;
	struct ufs1_dinode *dp1;
#ifdef FFS2
	struct ufs2_dinode *dp2;
#endif
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}
#ifdef VFSDEBUG
	vp->v_flag |= VLOCKSWORK;
#endif
	/* XXX - we use the same pool for ffs and mfs */
	ip = pool_get(&ffs_ino_pool, PR_WAITOK);
	bzero((caddr_t)ip, sizeof(struct inode));
	lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
	ip->i_ump = ump;
	VREF(ip->i_devvp);
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_fs = fs = ump->um_fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_vtbl = &ffs_vtbl;

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	error = ufs_ihashins(ip);
	
	if (error) {
		/*
		 * VOP_INACTIVE will treat this as a stale file
		 * and recycle it quickly
		 */
		vrele(vp);

		if (error == EEXIST)
			goto retry;

		return (error);
	}


	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
		      (int)fs->fs_bsize, NOCRED, &bp);
	if (error) {
		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (error);
	}

#ifdef FFS2
	if (ip->i_ump->um_fstype == UM_UFS2) {
		ip->i_din2 = pool_get(&ffs_dinode2_pool, PR_WAITOK);
		dp2 = (struct ufs2_dinode *) bp->b_data + ino_to_fsbo(fs, ino);
		*ip->i_din2 = *dp2;
	} else
#endif
	{
		ip->i_din1 = pool_get(&ffs_dinode1_pool, PR_WAITOK);
		dp1 = (struct ufs1_dinode *) bp->b_data + ino_to_fsbo(fs, ino);
		*ip->i_din1 = *dp1;
	}

	brelse(bp);

	if (DOINGSOFTDEP(vp))
		softdep_load_inodeblock(ip);
	else
		ip->i_effnlink = DIP(ip, nlink);

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 * Note that the underlying vnode may have changed.
	 */
	error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */
	if (DIP(ip, gen) == 0) {
		DIP_ASSIGN(ip, gen, arc4random() & INT_MAX);
		if (DIP(ip, gen) == 0 || DIP(ip, gen) == -1)
			DIP_ASSIGN(ip, gen, 1);	/* Shouldn't happen */
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}

	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_inodefmt < FS_44INODEFMT) {
		ip->i_ffs1_uid = ip->i_din1->di_ouid;
		ip->i_ffs1_gid = ip->i_din1->di_ogid;
	}

	*vpp = vp;

	return (0);
}
Esempio n. 6
0
int
rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt,
    u_int tableid)
{
	int			 s = splsoftnet(); int error = 0;
	struct rtentry		*rt, *crt;
	struct radix_node	*rn;
	struct radix_node_head	*rnh;
	struct ifaddr		*ifa;
	struct sockaddr		*ndst;
	struct sockaddr_rtlabel	*sa_rl;
#define senderr(x) { error = x ; goto bad; }

	if ((rnh = rt_gettable(info->rti_info[RTAX_DST]->sa_family, tableid)) ==
	    NULL)
		senderr(EAFNOSUPPORT);
	if (info->rti_flags & RTF_HOST)
		info->rti_info[RTAX_NETMASK] = NULL;
	switch (req) {
	case RTM_DELETE:
		if ((rn = rnh->rnh_lookup(info->rti_info[RTAX_DST],
		    info->rti_info[RTAX_NETMASK], rnh)) == NULL)
			senderr(ESRCH);
		rt = (struct rtentry *)rn;
#ifndef SMALL_KERNEL
		/*
		 * if we got multipath routes, we require users to specify
		 * a matching RTAX_GATEWAY.
		 */
		if (rn_mpath_capable(rnh)) {
			rt = rt_mpath_matchgate(rt,
			    info->rti_info[RTAX_GATEWAY]);
			rn = (struct radix_node *)rt;
			if (!rt)
				senderr(ESRCH);
		}
#endif
		if ((rn = rnh->rnh_deladdr(info->rti_info[RTAX_DST],
		    info->rti_info[RTAX_NETMASK], rnh, rn)) == NULL)
			senderr(ESRCH);
		rt = (struct rtentry *)rn;

		/* clean up any cloned children */
		if ((rt->rt_flags & RTF_CLONING) != 0)
			rtflushclone(rnh, rt);

		if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
			panic ("rtrequest delete");

		if (rt->rt_gwroute) {
			rt = rt->rt_gwroute; RTFREE(rt);
			(rt = (struct rtentry *)rn)->rt_gwroute = NULL;
		}

		if (rt->rt_parent) {
			rt->rt_parent->rt_refcnt--;
			rt->rt_parent = NULL;
		}

#ifndef SMALL_KERNEL
		if (rn_mpath_capable(rnh)) {
			if ((rn = rnh->rnh_lookup(info->rti_info[RTAX_DST],
			    info->rti_info[RTAX_NETMASK], rnh)) != NULL &&
			    rn_mpath_next(rn) == NULL)
				((struct rtentry *)rn)->rt_flags &= ~RTF_MPATH;
		}
#endif

		rt->rt_flags &= ~RTF_UP;
		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
			ifa->ifa_rtrequest(RTM_DELETE, rt, info);
		rttrash++;

		if (ret_nrt)
			*ret_nrt = rt;
		else if (rt->rt_refcnt <= 0) {
			rt->rt_refcnt++;
			rtfree(rt);
		}
		break;

	case RTM_RESOLVE:
		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
			senderr(EINVAL);
		if ((rt->rt_flags & RTF_CLONING) == 0)
			senderr(EINVAL);
		ifa = rt->rt_ifa;
		info->rti_flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
		info->rti_flags |= RTF_CLONED;
		info->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
		if ((info->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
			info->rti_flags |= RTF_HOST;
		goto makeroute;

	case RTM_ADD:
		if (info->rti_ifa == 0 && (error = rt_getifa(info)))
			senderr(error);
		ifa = info->rti_ifa;
makeroute:
		rt = pool_get(&rtentry_pool, PR_NOWAIT);
		if (rt == NULL)
			senderr(ENOBUFS);
		Bzero(rt, sizeof(*rt));
		rt->rt_flags = RTF_UP | info->rti_flags;
		LIST_INIT(&rt->rt_timer);
		if (rt_setgate(rt, info->rti_info[RTAX_DST],
		    info->rti_info[RTAX_GATEWAY], tableid)) {
			pool_put(&rtentry_pool, rt);
			senderr(ENOBUFS);
		}
		ndst = rt_key(rt);
		if (info->rti_info[RTAX_NETMASK] != NULL) {
			rt_maskedcopy(info->rti_info[RTAX_DST], ndst,
			    info->rti_info[RTAX_NETMASK]);
		} else
			Bcopy(info->rti_info[RTAX_DST], ndst,
			    info->rti_info[RTAX_DST]->sa_len);
#ifndef SMALL_KERNEL
		/* do not permit exactly the same dst/mask/gw pair */
		if (rn_mpath_capable(rnh) &&
		    rt_mpath_conflict(rnh, rt, info->rti_info[RTAX_NETMASK],
		    info->rti_flags & RTF_MPATH)) {
			if (rt->rt_gwroute)
				rtfree(rt->rt_gwroute);
			Free(rt_key(rt));
			pool_put(&rtentry_pool, rt);
			senderr(EEXIST);
		}
#endif

		if (info->rti_info[RTAX_LABEL] != NULL) {
			sa_rl = (struct sockaddr_rtlabel *)
			    info->rti_info[RTAX_LABEL];
			rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
		}

		ifa->ifa_refcnt++;
		rt->rt_ifa = ifa;
		rt->rt_ifp = ifa->ifa_ifp;
		if (req == RTM_RESOLVE) {
			/*
			 * Copy both metrics and a back pointer to the cloned
			 * route's parent.
			 */
			rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
			rt->rt_parent = *ret_nrt;	 /* Back ptr. to parent. */
			rt->rt_parent->rt_refcnt++;
		}
		rn = rnh->rnh_addaddr((caddr_t)ndst,
		    (caddr_t)info->rti_info[RTAX_NETMASK], rnh, rt->rt_nodes);
		if (rn == NULL && (crt = rtalloc1(ndst, 0, tableid)) != NULL) {
			/* overwrite cloned route */
			if ((crt->rt_flags & RTF_CLONED) != 0) {
				rtdeletemsg(crt, tableid);
				rn = rnh->rnh_addaddr((caddr_t)ndst,
				    (caddr_t)info->rti_info[RTAX_NETMASK],
				    rnh, rt->rt_nodes);
			}
			RTFREE(crt);
		}
		if (rn == 0) {
			IFAFREE(ifa);
			if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
				rtfree(rt->rt_parent);
			if (rt->rt_gwroute)
				rtfree(rt->rt_gwroute);
			Free(rt_key(rt));
			pool_put(&rtentry_pool, rt);
			senderr(EEXIST);
		}

#ifndef SMALL_KERNEL
		if (rn_mpath_capable(rnh) &&
		    (rn = rnh->rnh_lookup(info->rti_info[RTAX_DST],
		    info->rti_info[RTAX_NETMASK], rnh)) != NULL) {
			if (rn_mpath_next(rn) == NULL)
				((struct rtentry *)rn)->rt_flags &= ~RTF_MPATH;
			else
				((struct rtentry *)rn)->rt_flags |= RTF_MPATH;
		}
#endif

		if (ifa->ifa_rtrequest)
			ifa->ifa_rtrequest(req, rt, info);
		if (ret_nrt) {
			*ret_nrt = rt;
			rt->rt_refcnt++;
		}
		if ((rt->rt_flags & RTF_CLONING) != 0) {
			/* clean up any cloned children */
			rtflushclone(rnh, rt);
		}

		if_group_routechange(info->rti_info[RTAX_DST],
			info->rti_info[RTAX_NETMASK]);
		break;
	}
bad:
	splx(s);
	return (error);
}
Esempio n. 7
0
/* ARGSUSED */
int
sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval)
{
	/* {
		syscallarg(const ucontext_t *) ucp;
		syscallarg(u_long) flags;
		syscallarg(lwpid_t *) new_lwp;
	} */
	struct proc *p = l->l_proc;
	struct lwp *l2;
	vaddr_t uaddr;
	bool inmem;
	ucontext_t *newuc;
	int error, lid;

#ifdef KERN_SA
	mutex_enter(p->p_lock);
	if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
		mutex_exit(p->p_lock);
		return EINVAL;
	}
	mutex_exit(p->p_lock);
#endif

	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);

	error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
	if (error) {
		pool_put(&lwp_uc_pool, newuc);
		return error;
	}

	/* XXX check against resource limits */

	inmem = uvm_uarea_alloc(&uaddr);
	if (__predict_false(uaddr == 0)) {
		pool_put(&lwp_uc_pool, newuc);
		return ENOMEM;
	}

	error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED,
	    NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class);
	if (error) {
		uvm_uarea_free(uaddr, curcpu());
		pool_put(&lwp_uc_pool, newuc);
		return error;
	}

	lid = l2->l_lid;
	error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
	if (error) {
		lwp_exit(l2);
		pool_put(&lwp_uc_pool, newuc);
		return error;
	}

	/*
	 * Set the new LWP running, unless the caller has requested that
	 * it be created in suspended state.  If the process is stopping,
	 * then the LWP is created stopped.
	 */
	mutex_enter(p->p_lock);
	lwp_lock(l2);
	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
	    (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
	    	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0)
	    		l2->l_stat = LSSTOP;
		else {
			KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex));
			p->p_nrlwps++;
			l2->l_stat = LSRUN;
			sched_enqueue(l2, false);
		}
		lwp_unlock(l2);
	} else {
		l2->l_stat = LSSUSPENDED;
		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock);
	}
	mutex_exit(p->p_lock);

	return 0;
}
Esempio n. 8
0
void
abf_policy_update (u32 policy_id,
		   u32 acl_index, const fib_route_path_t * rpaths)
{
  abf_policy_t *ap;
  u32 api;

  api = abf_policy_find (policy_id);

  if (INDEX_INVALID == api)
    {
      /*
       * create a new policy
       */
      pool_get (abf_policy_pool, ap);

      api = ap - abf_policy_pool;
      fib_node_init (&ap->ap_node, abf_policy_fib_node_type);
      ap->ap_acl = acl_index;
      ap->ap_id = policy_id;
      ap->ap_pl = fib_path_list_create ((FIB_PATH_LIST_FLAG_SHARED |
					 FIB_PATH_LIST_FLAG_NO_URPF), rpaths);

      /*
       * become a child of the path list so we get poked when
       * the forwarding changes.
       */
      ap->ap_sibling = fib_path_list_child_add (ap->ap_pl,
						abf_policy_fib_node_type,
						api);

      /*
       * add this new policy to the DB
       */
      hash_set (abf_policy_db, policy_id, api);

      /*
       * take a lock on behalf of the CLI/API creation
       */
      fib_node_lock (&ap->ap_node);
    }
  else
    {
      /*
       * update an existing policy.
       * - add the path to the path-list and swap our ancestry
       * - backwalk to poke all attachments to update
       */
      fib_node_index_t old_pl;

      ap = abf_policy_get (api);
      old_pl = ap->ap_pl;

      if (FIB_NODE_INDEX_INVALID != old_pl)
	{
	  ap->ap_pl = fib_path_list_copy_and_path_add (old_pl,
						       (FIB_PATH_LIST_FLAG_SHARED
							|
							FIB_PATH_LIST_FLAG_NO_URPF),
						       rpaths);
	  fib_path_list_child_remove (old_pl, ap->ap_sibling);
	}
      else
	{
	  ap->ap_pl = fib_path_list_create ((FIB_PATH_LIST_FLAG_SHARED |
					     FIB_PATH_LIST_FLAG_NO_URPF),
					    rpaths);
	}

      ap->ap_sibling = fib_path_list_child_add (ap->ap_pl,
						abf_policy_fib_node_type,
						api);

      fib_node_back_walk_ctx_t ctx = {
	.fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE,
      };

      fib_walk_sync (abf_policy_fib_node_type, api, &ctx);
    }
}
Esempio n. 9
0
/*
 * Read an inode from disk and initialize this vnode / inode pair.
 * Caller assures no other thread will try to load this inode.
 */
int
ext2fs_loadvnode(struct mount *mp, struct vnode *vp,
    const void *key, size_t key_len, const void **new_key)
{
	ino_t ino;
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ufsmount *ump;
	struct buf *bp;
	dev_t dev;
	int error;

	KASSERT(key_len == sizeof(ino));
	memcpy(&ino, key, key_len);
	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
	fs = ump->um_e2fs;

	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, 0, &bp);
	if (error)
		return error;

	/* Allocate and initialize inode. */
	ip = pool_get(&ext2fs_inode_pool, PR_WAITOK);
	memset(ip, 0, sizeof(struct inode));
	vp->v_tag = VT_EXT2FS;
	vp->v_op = ext2fs_vnodeop_p;
	vp->v_vflag |= VV_LOCKSWORK;
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_ump = ump;
	ip->i_e2fs = fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_e2fs_last_lblk = 0;
	ip->i_e2fs_last_blk = 0;

	/* Initialize genfs node. */
	genfs_node_init(vp, &ext2fs_genfsops);

	error = ext2fs_loadvnode_content(fs, ino, bp, ip);
	brelse(bp, 0);
	if (error)
		return error;

	/* If the inode was deleted, reset all fields */
	if (ip->i_e2fs_dtime != 0) {
		ip->i_e2fs_mode = 0;
		(void)ext2fs_setsize(ip, 0);
		(void)ext2fs_setnblock(ip, 0);
		memset(ip->i_e2fs_blocks, 0, sizeof(ip->i_e2fs_blocks));
	}

	/* Initialize the vnode from the inode. */
	ext2fs_vinit(mp, ext2fs_specop_p, ext2fs_fifoop_p, &vp);

	/* Finish inode initialization. */
	ip->i_devvp = ump->um_devvp;
	vref(ip->i_devvp);

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */

	if (ip->i_e2fs_gen == 0) {
		if (++ext2gennumber < (u_long)time_second)
			ext2gennumber = time_second;
		ip->i_e2fs_gen = ext2gennumber;
		if ((mp->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	uvm_vnp_setsize(vp, ext2fs_size(ip));
	*new_key = &ip->i_number;
	return 0;
}
Esempio n. 10
0
int
pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af)
{
	struct pfi_dynaddr	*dyn;
	char			 tblname[PF_TABLE_NAME_SIZE];
	struct pf_ruleset	*ruleset = NULL;
	int			 s, rv = 0;

	if (aw->type != PF_ADDR_DYNIFTL)
		return (0);
	if ((dyn = pool_get(&pfi_addr_pl, PR_NOWAIT)) == NULL)
		return (1);
	bzero(dyn, sizeof(*dyn));

	s = splsoftnet();
	if (!strcmp(aw->v.ifname, "self"))
		dyn->pfid_kif = pfi_kif_get(IFG_ALL);
	else
		dyn->pfid_kif = pfi_kif_get(aw->v.ifname);
	if (dyn->pfid_kif == NULL) {
		rv = 1;
		goto _bad;
	}
	pfi_kif_ref(dyn->pfid_kif, PFI_KIF_REF_RULE);

	dyn->pfid_net = pfi_unmask(&aw->v.a.mask);
	if (af == AF_INET && dyn->pfid_net == 32)
		dyn->pfid_net = 128;
	strlcpy(tblname, aw->v.ifname, sizeof(tblname));
	if (aw->iflags & PFI_AFLAG_NETWORK)
		strlcat(tblname, ":network", sizeof(tblname));
	if (aw->iflags & PFI_AFLAG_BROADCAST)
		strlcat(tblname, ":broadcast", sizeof(tblname));
	if (aw->iflags & PFI_AFLAG_PEER)
		strlcat(tblname, ":peer", sizeof(tblname));
	if (aw->iflags & PFI_AFLAG_NOALIAS)
		strlcat(tblname, ":0", sizeof(tblname));
	if (dyn->pfid_net != 128)
		snprintf(tblname + strlen(tblname),
		    sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net);
	if ((ruleset = pf_find_or_create_ruleset(PF_RESERVED_ANCHOR)) == NULL) {
		rv = 1;
		goto _bad;
	}

	if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) {
		rv = 1;
		goto _bad;
	}

	dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE;
	dyn->pfid_iflags = aw->iflags;
	dyn->pfid_af = af;

	TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
	aw->p.dyn = dyn;
	pfi_kif_update(dyn->pfid_kif);
	splx(s);
	return (0);

_bad:
	if (dyn->pfid_kt != NULL)
		pfr_detach_table(dyn->pfid_kt);
	if (ruleset != NULL)
		pf_remove_if_empty_ruleset(ruleset);
	if (dyn->pfid_kif != NULL)
		pfi_kif_unref(dyn->pfid_kif, PFI_KIF_REF_RULE);
	pool_put(&pfi_addr_pl, dyn);
	splx(s);
	return (rv);
}
Esempio n. 11
0
/*
 * Look up a vnode/nfsnode by file handle and store the pointer in *npp.
 * Callers must check for mount points!!
 * An error number is returned.
 */
int
nfs_nget(struct mount *mnt, nfsfh_t *fh, int fhsize, struct nfsnode **npp)
{
	struct nfsmount		*nmp;
	struct nfsnode		*np, find, *np2;
	struct vnode		*vp, *nvp;
	struct proc		*p = curproc;		/* XXX */
	int			 error;

	nmp = VFSTONFS(mnt);

loop:
	rw_enter_write(&nfs_hashlock);
	find.n_fhp = fh;
	find.n_fhsize = fhsize;
	np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
	if (np != NULL) {
		rw_exit_write(&nfs_hashlock);
		vp = NFSTOV(np);
		error = vget(vp, LK_EXCLUSIVE, p);
		if (error)
			goto loop;
		*npp = np;
		return (0);
	}

	/*
	 * getnewvnode() could recycle a vnode, potentially formerly
	 * owned by NFS. This will cause a VOP_RECLAIM() to happen,
	 * which will cause recursive locking, so we unlock before
	 * calling getnewvnode() lock again afterwards, but must check
	 * to see if this nfsnode has been added while we did not hold
	 * the lock.
	 */
	rw_exit_write(&nfs_hashlock);
	error = getnewvnode(VT_NFS, mnt, &nfs_vops, &nvp);
	/* note that we don't have this vnode set up completely yet */
	rw_enter_write(&nfs_hashlock);
	if (error) {
		*npp = NULL;
		rw_exit_write(&nfs_hashlock);
		return (error);
	}
	nvp->v_flag |= VLARVAL;
	np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
	if (np != NULL) {
		vgone(nvp);
		rw_exit_write(&nfs_hashlock);
		goto loop;
	}

	vp = nvp;
	np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO);
	vp->v_data = np;
	/* we now have an nfsnode on this vnode */
	vp->v_flag &= ~VLARVAL;
	np->n_vnode = vp;

	rw_init(&np->n_commitlock, "nfs_commitlk");

	/* 
	 * Are we getting the root? If so, make sure the vnode flags
	 * are correct 
	 */
	if ((fhsize == nmp->nm_fhsize) && !bcmp(fh, nmp->nm_fh, fhsize)) {
		if (vp->v_type == VNON)
			vp->v_type = VDIR;
		vp->v_flag |= VROOT;
	}

	np->n_fhp = &np->n_fh;
	bcopy(fh, np->n_fhp, fhsize);
	np->n_fhsize = fhsize;
	np2 = RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np);
	KASSERT(np2 == NULL);
	np->n_accstamp = -1;
	rw_exit(&nfs_hashlock);
	*npp = np;

	return (0);
}
Esempio n. 12
0
/*
 * Look up a EXT2FS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ext2fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;
	void *cp;

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	error = getnewvnode(VT_EXT2FS, mp, ext2fs_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&ext2fs_inode_pool, PR_WAITOK);

	mutex_enter(&ufs_hashlock);
	if ((*vpp = ufs_ihashget(dev, ino, 0)) != NULL) {
		mutex_exit(&ufs_hashlock);
		ungetnewvnode(vp);
		pool_put(&ext2fs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	memset(ip, 0, sizeof(struct inode));
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_ump = ump;
	ip->i_e2fs = fs = ump->um_e2fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_e2fs_last_lblk = 0;
	ip->i_e2fs_last_blk = 0;
	genfs_node_init(vp, &ext2fs_genfsops);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */

	ufs_ihashins(ip);
	mutex_exit(&ufs_hashlock);

	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, NOCRED, 0, &bp);
	if (error) {

		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */

		vput(vp);
		*vpp = NULL;
		return (error);
	}
	cp = (char *)bp->b_data + (ino_to_fsbo(fs, ino) * EXT2_DINODE_SIZE(fs));
	ip->i_din.e2fs_din = pool_get(&ext2fs_dinode_pool, PR_WAITOK);
	e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din);
	ext2fs_set_inode_guid(ip);
	brelse(bp, 0);

	/* If the inode was deleted, reset all fields */
	if (ip->i_e2fs_dtime != 0) {
		ip->i_e2fs_mode = 0;
		(void)ext2fs_setsize(ip, 0);
		(void)ext2fs_setnblock(ip, 0);
		memset(ip->i_e2fs_blocks, 0, sizeof(ip->i_e2fs_blocks));
	}

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 */

	error = ext2fs_vinit(mp, ext2fs_specop_p, ext2fs_fifoop_p, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}
	/*
	 * Finish inode initialization now that aliasing has been resolved.
	 */

	ip->i_devvp = ump->um_devvp;
	vref(ip->i_devvp);

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */

	if (ip->i_e2fs_gen == 0) {
		if (++ext2gennumber < (u_long)time_second)
			ext2gennumber = time_second;
		ip->i_e2fs_gen = ext2gennumber;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	uvm_vnp_setsize(vp, ext2fs_size(ip));
	*vpp = vp;
	return (0);
}
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct nfsnode *np;
	struct vnode *vp;
	struct nfsmount *nmp = VFSTONFS(mntp);
	int error;
	struct fh_match fhm;

	fhm.fhm_fhp = fhp;
	fhm.fhm_fhsize = fhsize;

loop:
	rw_enter(&nmp->nm_rbtlock, RW_READER);
	np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
	if (np != NULL) {
		vp = NFSTOV(np);
		mutex_enter(vp->v_interlock);
		rw_exit(&nmp->nm_rbtlock);
		error = vget(vp, LK_EXCLUSIVE | lkflags);
		if (error == EBUSY)
			return error;
		if (error)
			goto loop;
		*npp = np;
		return(0);
	}
	rw_exit(&nmp->nm_rbtlock);

	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp);
	if (error) {
		*npp = 0;
		return (error);
	}
	np = pool_get(&nfs_node_pool, PR_WAITOK);
	memset(np, 0, sizeof *np);
	np->n_vnode = vp;

	/*
	 * Insert the nfsnode in the hash queue for its new file handle
	 */

	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
	} else
		np->n_fhp = &np->n_fh;
	memcpy(np->n_fhp, fhp, fhsize);
	np->n_fhsize = fhsize;
	np->n_accstamp = -1;
	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);

	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
		rw_exit(&nmp->nm_rbtlock);
		if (fhsize > NFS_SMALLFH) {
			kmem_free(np->n_fhp, fhsize);
		}
		pool_put(&nfs_vattr_pool, np->n_vattr);
		pool_put(&nfs_node_pool, np);
		ungetnewvnode(vp);
		goto loop;
	}
	vp->v_data = np;
	genfs_node_init(vp, &nfs_genfsops);
	/*
	 * Initalize read/write creds to useful values. VOP_OPEN will
	 * overwrite these.
	 */
	np->n_rcred = curlwp->l_cred;
	kauth_cred_hold(np->n_rcred);
	np->n_wcred = curlwp->l_cred;
	kauth_cred_hold(np->n_wcred);
	VOP_LOCK(vp, LK_EXCLUSIVE);
	NFS_INVALIDATE_ATTRCACHE(np);
	uvm_vnp_setsize(vp, 0);
	(void)rb_tree_insert_node(&nmp->nm_rbtree, np);
	rw_exit(&nmp->nm_rbtlock);

	*npp = np;
	return (0);
}
Esempio n. 14
0
File: flow.c Progetto: vpp-dev/vpp
static int
dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
{
  struct rte_flow_item_ipv4 ip4[2] = { };
  struct rte_flow_item_ipv6 ip6[2] = { };
  struct rte_flow_item_udp udp[2] = { };
  struct rte_flow_item_tcp tcp[2] = { };
  struct rte_flow_action_mark mark = { 0 };
  struct rte_flow_item *item, *items = 0;
  struct rte_flow_action *action, *actions = 0;

  enum
  {
    vxlan_hdr_sz = sizeof (vxlan_header_t),
    raw_sz = sizeof (struct rte_flow_item_raw)
  };

  union
  {
    struct rte_flow_item_raw item;
    u8 val[raw_sz + vxlan_hdr_sz];
  } raw[2];

  u16 src_port, dst_port, src_port_mask, dst_port_mask;
  u8 protocol;
  int rv = 0;

  if (f->actions & (~xd->supported_flow_actions))
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  /* Match items */
  /* Ethernet */
  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_ETH;
  item->spec = any_eth;
  item->mask = any_eth + 1;

  /* VLAN */
  if (f->type != VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_VLAN;
      item->spec = any_vlan;
      item->mask = any_vlan + 1;
    }

  /* IP */
  vec_add2 (items, item, 1);
  if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE)
    {
      vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
      clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
      clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
      item->type = RTE_FLOW_ITEM_TYPE_IPV6;
      item->spec = ip6;
      item->mask = ip6 + 1;

      src_port = t6->src_port.port;
      dst_port = t6->dst_port.port;
      src_port_mask = t6->src_port.mask;
      dst_port_mask = t6->dst_port.mask;
      protocol = t6->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE)
    {
      vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
      ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
      ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
      ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
      ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      src_port = t4->src_port.port;
      dst_port = t4->dst_port.port;
      src_port_mask = t4->src_port.mask;
      dst_port_mask = t4->dst_port.mask;
      protocol = t4->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
      ip4[0].hdr.src_addr = v4->src_addr.as_u32;
      ip4[1].hdr.src_addr = -1;
      ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
      ip4[1].hdr.dst_addr = -1;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      dst_port = v4->dst_port;
      dst_port_mask = -1;
      src_port = 0;
      src_port_mask = 0;
      protocol = IP_PROTOCOL_UDP;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Layer 4 */
  vec_add2 (items, item, 1);
  if (protocol == IP_PROTOCOL_UDP)
    {
      udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_UDP;
      item->spec = udp;
      item->mask = udp + 1;
    }
  else if (protocol == IP_PROTOCOL_TCP)
    {
      tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_TCP;
      item->spec = tcp;
      item->mask = tcp + 1;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Tunnel header match */
  if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      u32 vni = f->ip4_vxlan.vni;
      vxlan_header_t spec_hdr = {
	.flags = VXLAN_FLAGS_I,
	.vni_reserved = clib_host_to_net_u32 (vni << 8)
      };
      vxlan_header_t mask_hdr = {
	.flags = 0xff,
	.vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
      };

      clib_memset (raw, 0, sizeof raw);
      raw[0].item.relative = 1;
      raw[0].item.length = vxlan_hdr_sz;

      clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
      raw[0].item.pattern = raw[0].val + raw_sz;
      clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
      raw[1].item.pattern = raw[1].val + raw_sz;

      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_RAW;
      item->spec = raw;
      item->mask = raw + 1;
    }

  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_END;

  /* Actions */
  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;

  vec_add2 (actions, action, 1);
  mark.id = fe->mark;
  action->type = RTE_FLOW_ACTION_TYPE_MARK;
  action->conf = &mark;

  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_END;

  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
				&xd->last_flow_error);

  if (!fe->handle)
    rv = VNET_FLOW_ERROR_NOT_SUPPORTED;

done:
  vec_free (items);
  vec_free (actions);
  return rv;
}

int
dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
		  u32 flow_index, uword * private_data)
{
  dpdk_main_t *dm = &dpdk_main;
  vnet_flow_t *flow = vnet_get_flow (flow_index);
  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
  dpdk_flow_entry_t *fe;
  dpdk_flow_lookup_entry_t *fle = 0;
  int rv;

  /* recycle old flow lookup entries only after the main loop counter
     increases - i.e. previously DMA'ed packets were handled */
  if (vec_len (xd->parked_lookup_indexes) > 0 &&
      xd->parked_loop_count != dm->vlib_main->main_loop_count)
    {
      u32 *fl_index;

      vec_foreach (fl_index, xd->parked_lookup_indexes)
	pool_put_index (xd->flow_lookup_entries, *fl_index);
      vec_reset_length (xd->flow_lookup_entries);
    }

  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
    {
      ASSERT (*private_data >= vec_len (xd->flow_entries));

      fe = vec_elt_at_index (xd->flow_entries, *private_data);

      if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
				  &xd->last_flow_error)))
	return VNET_FLOW_ERROR_INTERNAL;

      if (fe->mark)
	{
	  /* make sure no action is taken for in-flight (marked) packets */
	  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
	  clib_memset (fle, -1, sizeof (*fle));
	  vec_add1 (xd->parked_lookup_indexes, fe->mark);
	  xd->parked_loop_count = dm->vlib_main->main_loop_count;
	}

      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);

      goto disable_rx_offload;
    }

  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  pool_get (xd->flow_entries, fe);
  fe->flow_index = flow->index;

  if (flow->actions == 0)
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* if we need to mark packets, assign one mark */
  if (flow->actions & (VNET_FLOW_ACTION_MARK |
		       VNET_FLOW_ACTION_REDIRECT_TO_NODE |
		       VNET_FLOW_ACTION_BUFFER_ADVANCE))
    {
      /* reserve slot 0 */
      if (xd->flow_lookup_entries == 0)
	pool_get_aligned (xd->flow_lookup_entries, fle,
			  CLIB_CACHE_LINE_BYTES);
      pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
      fe->mark = fle - xd->flow_lookup_entries;

      /* install entry in the lookup table */
      clib_memset (fle, -1, sizeof (*fle));
      if (flow->actions & VNET_FLOW_ACTION_MARK)
	fle->flow_id = flow->mark_flow_id;
      if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
	fle->next_index = flow->redirect_device_input_next_index;
      if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
	fle->buffer_advance = flow->buffer_advance;
    }
  else
    fe->mark = 0;

  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
    {
      xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  switch (flow->type)
    {
    case VNET_FLOW_TYPE_IP4_N_TUPLE:
    case VNET_FLOW_TYPE_IP6_N_TUPLE:
    case VNET_FLOW_TYPE_IP4_VXLAN:
      if ((rv = dpdk_flow_add (xd, flow, fe)))
	goto done;
      break;
    default:
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  *private_data = fe - xd->flow_entries;

done:
  if (rv)
    {
      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);
      if (fle)
	{
	  clib_memset (fle, -1, sizeof (*fle));
	  pool_put (xd->flow_lookup_entries, fle);
	}
    }
disable_rx_offload:
  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
      && pool_elts (xd->flow_entries) == 0)
    {
      xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  return rv;
}
Esempio n. 15
0
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcb1, *pcb2;
	struct trapframe *tf;
	register_t sp, osp;
	vaddr_t uv;

	KASSERT(round_page(sizeof(struct pcb)) <= PAGE_SIZE);

	pcb1 = lwp_getpcb(l1);
	pcb2 = lwp_getpcb(l2);

	l2->l_md.md_astpending = 0;
	l2->l_md.md_flags = 0;

	/* Flush the parent LWP out of the FPU. */
	hppa_fpu_flush(l1);

	/* Now copy the parent PCB into the child. */
	memcpy(pcb2, pcb1, sizeof(struct pcb));

	pcb2->pcb_fpregs = pool_get(&hppa_fppl, PR_WAITOK);
	*pcb2->pcb_fpregs = *pcb1->pcb_fpregs;

	/* reset any of the pending FPU exceptions from parent */
	pcb2->pcb_fpregs->fpr_regs[0] =
	    HPPA_FPU_FORK(pcb2->pcb_fpregs->fpr_regs[0]);
	pcb2->pcb_fpregs->fpr_regs[1] = 0;
	pcb2->pcb_fpregs->fpr_regs[2] = 0;
	pcb2->pcb_fpregs->fpr_regs[3] = 0;

	l2->l_md.md_bpva = l1->l_md.md_bpva;
	l2->l_md.md_bpsave[0] = l1->l_md.md_bpsave[0];
	l2->l_md.md_bpsave[1] = l1->l_md.md_bpsave[1];

	uv = uvm_lwp_getuarea(l2);
	sp = (register_t)uv + PAGE_SIZE;
	l2->l_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);

	/* copy the l1's trapframe to l2 */
	memcpy(tf, l1->l_md.md_regs, sizeof(*tf));

	/* Fill out all the PAs we are going to need in locore. */
	cpu_activate_pcb(l2);

	if (__predict_true(l2->l_proc->p_vmspace != NULL)) {
		struct proc *p = l2->l_proc;
		pmap_t pmap = p->p_vmspace->vm_map.pmap;
		pa_space_t space = pmap->pm_space;

		/* Load all of the user's space registers. */
		tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = tf->tf_sr2 = 
		tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space;
		tf->tf_iisq_head = tf->tf_iisq_tail = space;

		/* Load the protection registers */
		tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid;

		/*
		 * theoretically these could be inherited from the father,
		 * but just in case.
		 */
		tf->tf_sr7 = HPPA_SID_KERNEL;
		mfctl(CR_EIEM, tf->tf_eiem);
		tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */ |
		    (curcpu()->ci_psw & PSW_O);
	}

	/*
	 * Set up return value registers as libc:fork() expects
	 */
	tf->tf_ret0 = l1->l_proc->p_pid;
	tf->tf_ret1 = 1;	/* ischild */
	tf->tf_t1 = 0;		/* errno */

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build stack frames for the cpu_switchto & co.
	 */
	osp = sp;

	/* lwp_trampoline's frame */
	sp += HPPA_FRAME_SIZE;

	*(register_t *)(sp) = 0;	/* previous frame pointer */
	*(register_t *)(sp + HPPA_FRAME_PSP) = osp;
	*(register_t *)(sp + HPPA_FRAME_CRP) = (register_t)lwp_trampoline;

	*HPPA_FRAME_CARG(2, sp) = KERNMODE(func);
	*HPPA_FRAME_CARG(3, sp) = (register_t)arg;

	/*
	 * cpu_switchto's frame
	 * 	stack usage is std frame + callee-save registers
	 */
	sp += HPPA_FRAME_SIZE + 16*4;
	pcb2->pcb_ksp = sp;
	fdcache(HPPA_SID_KERNEL, uv, sp - uv);
}
Esempio n. 16
0
int
shmget_allocate_segment(struct proc *p,
	struct sys_shmget_args /* {
		syscallarg(key_t) key;
		syscallarg(size_t) size;
		syscallarg(int) shmflg;
	} */ *uap,
	int mode, register_t *retval)
{
	size_t size;
	key_t key;
	int segnum;
	struct ucred *cred = p->p_ucred;
	struct shmid_ds *shmseg;
	struct shm_handle *shm_handle;
	int error = 0;
	
	if (SCARG(uap, size) < shminfo.shmmin ||
	    SCARG(uap, size) > shminfo.shmmax)
		return (EINVAL);
	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
		return (ENOSPC);
	size = round_page(SCARG(uap, size));
	if (shm_committed + atop(size) > shminfo.shmall)
		return (ENOMEM);
	shm_nused++;
	shm_committed += atop(size);

	/*
	 * If a key has been specified and we had to wait for memory
	 * to be freed up we need to verify that no one has allocated
	 * the key we want in the meantime.  Yes, this is ugly.
	 */
	key = SCARG(uap, key);
	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
	    PR_NOWAIT);
	if (shmseg == NULL) {
		shmseg = pool_get(&shm_pool, PR_WAITOK);
		if (shm_find_segment_by_key(key) != -1) {
			pool_put(&shm_pool, shmseg);
			shm_nused--;
			shm_committed -= atop(size);
			return (EAGAIN);
		}
	}

	/* XXX - hash shmids instead */
	if (shm_last_free < 0) {
		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
		    segnum++)
			;
		if (segnum == shminfo.shmmni)
			panic("shmseg free count inconsistent");
	} else {
		segnum = shm_last_free;
		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
			shm_last_free = -1;
	}
	shmsegs[segnum] = shmseg;

	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
	shm_handle->shm_object = uao_create(size, 0);

	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
	shmseg->shm_perm.key = key;
	shmseg->shm_segsz = SCARG(uap, size);
	shmseg->shm_cpid = p->p_p->ps_pid;
	shmseg->shm_lpid = shmseg->shm_nattch = 0;
	shmseg->shm_atime = shmseg->shm_dtime = 0;
	shmseg->shm_ctime = time_second;
	shmseg->shm_internal = shm_handle;

	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
	return (error);
}
Esempio n. 17
0
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user)) > NBPG)
		panic("USPACE too small for user");
#endif
	fpu_proc_save(p1);

	pcbp = &p2->p_addr->u_pcb;
	bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
	/* space is cached for the copy{in,out}'s pleasure */
	pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
	pcbp->pcb_fpstate = pool_get(&hppa_fppl, PR_WAITOK);
	*pcbp->pcb_fpstate = *p1->p_addr->u_pcb.pcb_fpstate;
	/* reset any of the pending FPU exceptions from parent */
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[0] =
	    HPPA_FPU_FORK(pcbp->pcb_fpstate->hfp_regs.fpr_regs[0]);
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[1] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[2] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[3] = 0;

	p2->p_md.md_bpva = p1->p_md.md_bpva;
	p2->p_md.md_bpsave[0] = p1->p_md.md_bpsave[0];
	p2->p_md.md_bpsave[1] = p1->p_md.md_bpsave[1];

	sp = (register_t)p2->p_addr + NBPG;
	p2->p_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(p1->p_md.md_regs, tf, sizeof(*tf));

	tf->tf_cr30 = (paddr_t)pcbp->pcb_fpstate;

	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
	tf->tf_iisq_head = tf->tf_iisq_tail =
		p2->p_vmspace->vm_map.pmap->pm_space;
	tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	mfctl(CR_EIEM, tf->tf_eiem);
	tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
	    (curcpu()->ci_psw & PSL_O);

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build stack frames for the cpu_switchto & co.
	 */
	osp = sp + HPPA_FRAME_SIZE;
	*(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
	*(register_t*)(osp + HPPA_FRAME_CRP) = (register_t)&switch_trampoline;
	*(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);

	sp = osp + HPPA_FRAME_SIZE + 20*4; /* frame + calee-save registers */
	*HPPA_FRAME_CARG(0, sp) = (register_t)arg;
	*HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
	pcbp->pcb_ksp = sp;
}
Esempio n. 18
0
void
vnet_start(struct ifnet *ifp)
{
	struct vnet_softc *sc = ifp->if_softc;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ldc_map *map = sc->sc_lm;
	struct mbuf *m;
	paddr_t pa;
	caddr_t buf;
	uint64_t tx_head, tx_tail, tx_state;
	u_int start, prod, count;
	int err;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	if (IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	/*
	 * We cannot transmit packets until a VIO connection has been
	 * established.
	 */
	if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) ||
	    !ISSET(sc->sc_vio_state, VIO_ACK_RDX))
		return;

	/*
	 * Make sure there is room in the LDC transmit queue to send a
	 * DRING_DATA message.
	 */
	err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
	if (err != H_EOK)
		return;
	tx_tail += sizeof(struct ldc_pkt);
	tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1);
	if (tx_tail == tx_head) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	if (sc->sc_xfer_mode == VIO_DESC_MODE) {
		vnet_start_desc(ifp);
		return;
	}

	start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
	while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count >= (sc->sc_vd->vd_nentries - 1) ||
		    map->lm_count >= map->lm_nentries) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}
		m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN);
		IFQ_DEQUEUE(&ifp->if_snd, m);

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK));
		while (map->lm_slot[map->lm_next].entry != 0) {
			map->lm_next++;
			map->lm_next &= (map->lm_nentries - 1);
		}
		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR;
		atomic_inc_int(&map->lm_count);

		sc->sc_vd->vd_desc[prod].nbytes = max(m->m_pkthdr.len, 60);
		sc->sc_vd->vd_desc[prod].ncookies = 1;
		sc->sc_vd->vd_desc[prod].cookie[0].addr =
		    map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
		sc->sc_vd->vd_desc[prod].cookie[0].size = 2048;
		membar_producer();
		sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY;

		sc->sc_vsd[prod].vsd_map_idx = map->lm_next;
		sc->sc_vsd[prod].vsd_buf = buf;

		sc->sc_tx_prod++;
		prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);

		m_freem(m);
	}

	membar_producer();

	if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) {
		vnet_send_dring_data(sc, start);
		ifp->if_timer = 5;
	}
}
Esempio n. 19
0
/*
 * Convert a pathname into a pointer to a vnode.
 *
 * The FOLLOW flag is set when symbolic links are to be followed
 * when they occur at the end of the name translation process.
 * Symbolic links are always followed for all other pathname
 * components other than the last.
 *
 * If the LOCKLEAF flag is set, a locked vnode is returned.
 *
 * The segflg defines whether the name is to be copied from user
 * space or kernel space.
 *
 * Overall outline of namei:
 *
 *	copy in name
 *	get starting directory
 *	while (!done && !error) {
 *		call lookup to search path.
 *		if symbolic link, massage name in buffer and continue
 *	}
 */
int
namei(struct nameidata *ndp)
{
	struct filedesc *fdp;		/* pointer to file descriptor state */
	char *cp;			/* pointer into pathname argument */
	struct vnode *dp;		/* the directory we are searching */
	struct iovec aiov;		/* uio for reading symbolic links */
	struct uio auio;
	int error, linklen;
	struct componentname *cnp = &ndp->ni_cnd;
	struct proc *p = cnp->cn_proc;

	ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_proc->p_ucred;
#ifdef DIAGNOSTIC
	if (!cnp->cn_cred || !cnp->cn_proc)
		panic ("namei: bad cred/proc");
	if (cnp->cn_nameiop & (~OPMASK))
		panic ("namei: nameiop contaminated with flags");
	if (cnp->cn_flags & OPMASK)
		panic ("namei: flags contaminated with nameiops");
#endif
	fdp = cnp->cn_proc->p_fd;

	/*
	 * Get a buffer for the name to be translated, and copy the
	 * name into the buffer.
	 */
	if ((cnp->cn_flags & HASBUF) == 0)
		cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK);
	if (ndp->ni_segflg == UIO_SYSSPACE)
		error = copystr(ndp->ni_dirp, cnp->cn_pnbuf,
			    MAXPATHLEN, &ndp->ni_pathlen);
	else
		error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf,
			    MAXPATHLEN, &ndp->ni_pathlen);

	/*
	 * Fail on null pathnames
	 */
	if (error == 0 && ndp->ni_pathlen == 1)
		error = ENOENT;

	if (error) {
		pool_put(&namei_pool, cnp->cn_pnbuf);
		ndp->ni_vp = NULL;
		return (error);
	}

#ifdef KTRACE
	if (KTRPOINT(cnp->cn_proc, KTR_NAMEI))
		ktrnamei(cnp->cn_proc, cnp->cn_pnbuf);
#endif
#if NSYSTRACE > 0
	if (ISSET(cnp->cn_proc->p_flag, P_SYSTRACE))
		systrace_namei(ndp);
#endif

	/*
	 *  Strip trailing slashes, as requested
	 */
	if (cnp->cn_flags & STRIPSLASHES) {
		char *end = cnp->cn_pnbuf + ndp->ni_pathlen - 2;

		cp = end;
		while (cp >= cnp->cn_pnbuf && (*cp == '/'))
			cp--;

		/* Still some remaining characters in the buffer */
		if (cp >= cnp->cn_pnbuf) {
			ndp->ni_pathlen -= (end - cp);
			*(cp + 1) = '\0';
		}
	}

	ndp->ni_loopcnt = 0;

	/*
	 * Get starting point for the translation.
	 */
	if ((ndp->ni_rootdir = fdp->fd_rdir) == NULL)
		ndp->ni_rootdir = rootvnode;
	/*
	 * Check if starting from root directory or current directory.
	 */
	if (cnp->cn_pnbuf[0] == '/') {
		dp = ndp->ni_rootdir;
		vref(dp);
	} else {
		dp = fdp->fd_cdir;
		vref(dp);
	}
	for (;;) {
		if (!dp->v_mount) {
			/* Give up if the directory is no longer mounted */
			pool_put(&namei_pool, cnp->cn_pnbuf);
			return (ENOENT);
		}
		cnp->cn_nameptr = cnp->cn_pnbuf;
		ndp->ni_startdir = dp;
		if ((error = lookup(ndp)) != 0) {
			pool_put(&namei_pool, cnp->cn_pnbuf);
			return (error);
		}
		/*
		 * If not a symbolic link, return search result.
		 */
		if ((cnp->cn_flags & ISSYMLINK) == 0) {
			if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0)
				pool_put(&namei_pool, cnp->cn_pnbuf);
			else
				cnp->cn_flags |= HASBUF;
			return (0);
		}
		if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN))
			VOP_UNLOCK(ndp->ni_dvp, 0, p);
		if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
			error = ELOOP;
			break;
		}
		if (ndp->ni_pathlen > 1)
			cp = pool_get(&namei_pool, PR_WAITOK);
		else
			cp = cnp->cn_pnbuf;
		aiov.iov_base = cp;
		aiov.iov_len = MAXPATHLEN;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = 0;
		auio.uio_rw = UIO_READ;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_procp = cnp->cn_proc;
		auio.uio_resid = MAXPATHLEN;
		error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred);
		if (error) {
badlink:
			if (ndp->ni_pathlen > 1)
				pool_put(&namei_pool, cp);
			break;
		}
		linklen = MAXPATHLEN - auio.uio_resid;
		if (linklen == 0) {
			error = ENOENT;
			goto badlink;
		}
		if (linklen + ndp->ni_pathlen >= MAXPATHLEN) {
			error = ENAMETOOLONG;
			goto badlink;
		}
		if (ndp->ni_pathlen > 1) {
			bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
			pool_put(&namei_pool, cnp->cn_pnbuf);
			cnp->cn_pnbuf = cp;
		} else
			cnp->cn_pnbuf[linklen] = '\0';
		ndp->ni_pathlen += linklen;
		vput(ndp->ni_vp);
		dp = ndp->ni_dvp;
		/*
		 * Check if root directory should replace current directory.
		 */
		if (cnp->cn_pnbuf[0] == '/') {
			vrele(dp);
			dp = ndp->ni_rootdir;
			vref(dp);
		}
	}
	pool_put(&namei_pool, cnp->cn_pnbuf);
	vrele(ndp->ni_dvp);
	vput(ndp->ni_vp);
	ndp->ni_vp = NULL;
	return (error);
}
Esempio n. 20
0
void
vnet_start_desc(struct ifnet *ifp)
{
	struct vnet_softc *sc = ifp->if_softc;
	struct ldc_map *map = sc->sc_lm;
	struct vnet_desc_msg dm;
	struct mbuf *m;
	paddr_t pa;
	caddr_t buf;
	u_int prod, count;

	for (;;) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count >= (sc->sc_vd->vd_nentries - 1) ||
		    map->lm_count >= map->lm_nentries) {
			ifp->if_flags |= IFF_OACTIVE;
			return;
		}

		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_flags |= IFF_OACTIVE;
			return;
		}
		m_copydata(m, 0, m->m_pkthdr.len, buf);
		IFQ_DEQUEUE(&ifp->if_snd, m);

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK));
		while (map->lm_slot[map->lm_next].entry != 0) {
			map->lm_next++;
			map->lm_next &= (map->lm_nentries - 1);
		}
		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR;
		atomic_inc_int(&map->lm_count);

		prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1);
		sc->sc_vsd[prod].vsd_map_idx = map->lm_next;
		sc->sc_vsd[prod].vsd_buf = buf;

		bzero(&dm, sizeof(dm));
		dm.tag.type = VIO_TYPE_DATA;
		dm.tag.stype = VIO_SUBTYPE_INFO;
		dm.tag.stype_env = VIO_DESC_DATA;
		dm.tag.sid = sc->sc_local_sid;
		dm.seq_no = sc->sc_seq_no++;
		dm.desc_handle = sc->sc_tx_prod;
		dm.nbytes = max(m->m_pkthdr.len, 60);
		dm.ncookies = 1;
		dm.cookie[0].addr =
			map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
		dm.cookie[0].size = 2048;
		vnet_sendmsg(sc, &dm, sizeof(dm));

		sc->sc_tx_prod++;
		sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);

		m_freem(m);
	}
}
Esempio n. 21
0
/*
 * lookup an anode, check mount's hash table if not found, create
 * return locked and referenced al la vget(vp, 1);
 */
int
adosfs_vget(struct mount *mp, ino_t an, struct vnode **vpp)
{
	struct adosfsmount *amp;
	struct vnode *vp;
	struct anode *ap;
	struct buf *bp;
	char *nam, *tmp;
	int namlen, error;

	error = 0;
	amp = VFSTOADOSFS(mp);
	bp = NULL;

	/*
	 * check hash table. we are done if found
	 */
	if ((*vpp = adosfs_ahashget(mp, an)) != NULL)
		return (0);

	error = getnewvnode(VT_ADOSFS, mp, adosfs_vnodeop_p, NULL, &vp);
	if (error)
		return (error);

	/*
	 * setup, insert in hash, and lock before io.
	 */
	vp->v_data = ap = pool_get(&adosfs_node_pool, PR_WAITOK);
	memset(ap, 0, sizeof(struct anode));
	ap->vp = vp;
	ap->amp = amp;
	ap->block = an;
	ap->nwords = amp->nwords;
	genfs_node_init(vp, &adosfs_genfsops);
	adosfs_ainshash(amp, ap);

	if ((error = bread(amp->devvp, an * amp->bsize / DEV_BSIZE,
			   amp->bsize, NOCRED, 0, &bp)) != 0) {
		vput(vp);
		return (error);
	}

	/*
	 * get type and fill rest in based on that.
	 */
	switch (ap->type = adosfs_getblktype(amp, bp)) {
	case AROOT:
		vp->v_type = VDIR;
		vp->v_vflag |= VV_ROOT;
		ap->mtimev.days = adoswordn(bp, ap->nwords - 10);
		ap->mtimev.mins = adoswordn(bp, ap->nwords - 9);
		ap->mtimev.ticks = adoswordn(bp, ap->nwords - 8);
		ap->created.days = adoswordn(bp, ap->nwords - 7);
		ap->created.mins = adoswordn(bp, ap->nwords - 6);
		ap->created.ticks = adoswordn(bp, ap->nwords - 5);
		break;
	case ALDIR:
	case ADIR:
		vp->v_type = VDIR;
		break;
	case ALFILE:
	case AFILE:
		vp->v_type = VREG;
		ap->fsize = adoswordn(bp, ap->nwords - 47);
		break;
	case ASLINK:		/* XXX soft link */
		vp->v_type = VLNK;
		/*
		 * convert from BCPL string and
		 * from: "part:dir/file" to: "/part/dir/file"
		 */
		nam = (char *)bp->b_data + (6 * sizeof(long));
		namlen = strlen(nam);
		tmp = nam;
		while (*tmp && *tmp != ':')
			tmp++;
		if (*tmp == 0) {
			ap->slinkto = malloc(namlen + 1, M_ANODE, M_WAITOK);
			memcpy(ap->slinkto, nam, namlen);
		} else if (*nam == ':') {
			ap->slinkto = malloc(namlen + 1, M_ANODE, M_WAITOK);
			memcpy(ap->slinkto, nam, namlen);
			ap->slinkto[0] = '/';
		} else {
			ap->slinkto = malloc(namlen + 2, M_ANODE, M_WAITOK);
			ap->slinkto[0] = '/';
			memcpy(&ap->slinkto[1], nam, namlen);
			ap->slinkto[tmp - nam + 1] = '/';
			namlen++;
		}
		ap->slinkto[namlen] = 0;
		ap->fsize = namlen;
		break;
	default:
		brelse(bp, 0);
		vput(vp);
		return (EINVAL);
	}

	/*
	 * Get appropriate data from this block;  hard link needs
	 * to get other data from the "real" block.
	 */

	/*
	 * copy in name (from original block)
	 */
	nam = (char *)bp->b_data + (ap->nwords - 20) * sizeof(u_int32_t);
	namlen = *(u_char *)nam++;
	if (namlen > 30) {
#ifdef DIAGNOSTIC
		printf("adosfs: aget: name length too long blk %llu\n",
		    (unsigned long long)an);
#endif
		brelse(bp, 0);
		vput(vp);
		return (EINVAL);
	}
	memcpy(ap->name, nam, namlen);
	ap->name[namlen] = 0;

	/*
	 * if dir alloc hash table and copy it in
	 */
	if (vp->v_type == VDIR) {
		int i;

		ap->tab = malloc(ANODETABSZ(ap) * 2, M_ANODE, M_WAITOK);
		ap->ntabent = ANODETABENT(ap);
		ap->tabi = (int *)&ap->tab[ap->ntabent];
		memset(ap->tabi, 0, ANODETABSZ(ap));
		for (i = 0; i < ap->ntabent; i++)
			ap->tab[i] = adoswordn(bp, i + 6);
	}

	/*
	 * misc.
	 */
	ap->pblock = adoswordn(bp, ap->nwords - 3);
	ap->hashf = adoswordn(bp, ap->nwords - 4);
	ap->linknext = adoswordn(bp, ap->nwords - 10);
	ap->linkto = adoswordn(bp, ap->nwords - 11);

	/*
	 * setup last indirect block cache.
	 */
	ap->lastlindblk = 0;
	if (ap->type == AFILE)  {
		ap->lastindblk = ap->block;
		if (adoswordn(bp, ap->nwords - 10))
			ap->linkto = ap->block;
	} else if (ap->type == ALFILE) {
		ap->lastindblk = ap->linkto;
		brelse(bp, 0);
		bp = NULL;
		error = bread(amp->devvp, ap->linkto * amp->bsize / DEV_BSIZE,
		    amp->bsize, NOCRED, 0, &bp);
		if (error) {
			vput(vp);
			return (error);
		}
		ap->fsize = adoswordn(bp, ap->nwords - 47);
		/*
		 * Should ap->block be set to the real file header block?
		 */
		ap->block = ap->linkto;
	}

	if (ap->type == AROOT) {
		ap->adprot = 15;
		ap->uid = amp->uid;
		ap->gid = amp->gid;
	} else {
		ap->adprot = adoswordn(bp, ap->nwords - 48) ^ 15;
		/*
		 * ADOS directories do not have a `x' protection bit as
		 * it is known in VFS; this functionality is fulfilled
		 * by the ADOS `r' bit.
		 *
		 * To retain the ADOS behaviour, fake execute permissions
		 * in that case.
		 */
		if ((ap->type == ADIR || ap->type == ALDIR) &&
		    (ap->adprot & 0x00000008) == 0)
			ap->adprot &= ~0x00000002;

		/*
		 * Get uid/gid from extensions in file header
		 * (really need to know if this is a muFS partition)
		 */
		ap->uid = (adoswordn(bp, ap->nwords - 49) >> 16) & 0xffff;
		ap->gid = adoswordn(bp, ap->nwords - 49) & 0xffff;
		if (ap->uid || ap->gid) {
			if (ap->uid == 0xffff)
				ap->uid = 0;
			if (ap->gid == 0xffff)
				ap->gid = 0;
			ap->adprot |= 0x40000000;	/* Kludge */
		}
		else {
			/*
			 * uid & gid extension don't exist,
			 * so use the mount-point uid/gid
			 */
			ap->uid = amp->uid;
			ap->gid = amp->gid;
		}
	}
	ap->mtime.days = adoswordn(bp, ap->nwords - 23);
	ap->mtime.mins = adoswordn(bp, ap->nwords - 22);
	ap->mtime.ticks = adoswordn(bp, ap->nwords - 21);

	*vpp = vp;
	brelse(bp, 0);
	uvm_vnp_setsize(vp, ap->fsize);
	return (0);
}
Esempio n. 22
0
void
vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ldc_map *map = sc->sc_lm;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	caddr_t buf;
	paddr_t pa;
	psize_t nbytes;
	u_int cons;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		nbytes = roundup(dm->nbytes, 8);

		if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
			ifp->if_ierrors++;
			goto skip;
		}

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
		    dm->cookie[0].addr, pa, nbytes, &nbytes);
		if (err != H_EOK) {
			pool_put(&sc->sc_pool, buf);
			ifp->if_ierrors++;
			goto skip;
		}

		/* Stupid OBP doesn't align properly. */
                m = m_devget(buf, dm->nbytes, ETHER_ALIGN);
		pool_put(&sc->sc_pool, buf);
		if (m == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}

		/* Pass it on. */
		ml_enqueue(&ml, m);
		if_input(ifp, &ml);

	skip:
		dm->tag.stype = VIO_SUBTYPE_ACK;
		dm->tag.sid = sc->sc_local_sid;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DESC_DATA\n"));

		if (dm->desc_handle != sc->sc_tx_cons) {
			printf("out of order\n");
			return;
		}

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);

		map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
		atomic_dec_int(&map->lm_count);

		pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
		ifp->if_opackets++;

		sc->sc_tx_cons++;
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DESC_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype));
		break;
	}
}
Esempio n. 23
0
int
timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
    copyin_t fetch_event, struct lwp *l)
{
	int error;
	timer_t timerid;
	struct ptimers *pts;
	struct ptimer *pt;
	struct proc *p;

	p = l->l_proc;

	if ((u_int)id > CLOCK_MONOTONIC)
		return (EINVAL);

	if ((pts = p->p_timers) == NULL)
		pts = timers_alloc(p);

	pt = pool_get(&ptimer_pool, PR_WAITOK);
	if (evp != NULL) {
		if (((error =
		    (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
		    ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
			(pt->pt_ev.sigev_notify > SIGEV_SA)) ||
			(pt->pt_ev.sigev_notify == SIGEV_SIGNAL &&
			 (pt->pt_ev.sigev_signo <= 0 ||
			  pt->pt_ev.sigev_signo >= NSIG))) {
			pool_put(&ptimer_pool, pt);
			return (error ? error : EINVAL);
		}
	}

	/* Find a free timer slot, skipping those reserved for setitimer(). */
	mutex_spin_enter(&timer_lock);
	for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++)
		if (pts->pts_timers[timerid] == NULL)
			break;
	if (timerid == TIMER_MAX) {
		mutex_spin_exit(&timer_lock);
		pool_put(&ptimer_pool, pt);
		return EAGAIN;
	}
	if (evp == NULL) {
		pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
		switch (id) {
		case CLOCK_REALTIME:
		case CLOCK_MONOTONIC:
			pt->pt_ev.sigev_signo = SIGALRM;
			break;
		case CLOCK_VIRTUAL:
			pt->pt_ev.sigev_signo = SIGVTALRM;
			break;
		case CLOCK_PROF:
			pt->pt_ev.sigev_signo = SIGPROF;
			break;
		}
		pt->pt_ev.sigev_value.sival_int = timerid;
	}
	pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
	pt->pt_info.ksi_errno = 0;
	pt->pt_info.ksi_code = 0;
	pt->pt_info.ksi_pid = p->p_pid;
	pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
	pt->pt_info.ksi_value = pt->pt_ev.sigev_value;
	pt->pt_type = id;
	pt->pt_proc = p;
	pt->pt_overruns = 0;
	pt->pt_poverruns = 0;
	pt->pt_entry = timerid;
	pt->pt_queued = false;
	timespecclear(&pt->pt_time.it_value);
	if (!CLOCK_VIRTUAL_P(id))
		callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
	else
		pt->pt_active = 0;

	pts->pts_timers[timerid] = pt;
	mutex_spin_exit(&timer_lock);

	return copyout(&timerid, tid, sizeof(timerid));
}
Esempio n. 24
0
clib_error_t *
policer_add_del (vlib_main_t *vm,
                 u8 * name,
                 sse2_qos_pol_cfg_params_st * cfg,
                 u32 * policer_index,
                 u8 is_add)
{
  vnet_policer_main_t *pm = &vnet_policer_main;
  policer_read_response_type_st test_policer;
  policer_read_response_type_st * policer;
  uword * p;
  u32 pi;
  int rv;

  p = hash_get_mem (pm->policer_config_by_name, name);

  if (is_add == 0)
    {
      if (p == 0)
        {
          vec_free(name);
          return clib_error_return (0, "No such policer configuration");
        }
      hash_unset_mem (pm->policer_config_by_name, name);
      hash_unset_mem (pm->policer_index_by_name, name);
      vec_free(name);
      return 0;
    }

  if (p != 0)
    {
      vec_free(name);
      return clib_error_return (0, "Policer already exists");
    }

  /* Vet the configuration before adding it to the table */
  rv = sse2_pol_logical_2_physical (cfg, &test_policer);

  if (rv == 0)
    {
      policer_read_response_type_st *pp;
      sse2_qos_pol_cfg_params_st *cp;

      pool_get (pm->configs, cp);
      pool_get (pm->policer_templates, pp);

      ASSERT (cp - pm->configs == pp - pm->policer_templates);

      clib_memcpy (cp, cfg, sizeof (*cp));
      clib_memcpy (pp, &test_policer, sizeof (*pp));

      hash_set_mem (pm->policer_config_by_name, name, cp - pm->configs);
      pool_get_aligned (pm->policers, policer, CLIB_CACHE_LINE_BYTES);
      policer[0] = pp[0];
      pi = policer - pm->policers;
      hash_set_mem (pm->policer_index_by_name, name, pi);
      *policer_index = pi;
    }
  else
    {
      vec_free (name);
      return clib_error_return (0, "Config failed sanity check");
    }

  return 0;
}
Esempio n. 25
0
/* ARGSUSED */
int
sys_execve(struct proc *p, void *v, register_t *retval)
{
	struct sys_execve_args /* {
		syscallarg(const char *) path;
		syscallarg(char *const *) argp;
		syscallarg(char *const *) envp;
	} */ *uap = v;
	int error;
	struct exec_package pack;
	struct nameidata nid;
	struct vattr attr;
	struct ucred *cred = p->p_ucred;
	char *argp;
	char * const *cpp, *dp, *sp;
#ifdef KTRACE
	char *env_start;
#endif
	struct process *pr = p->p_p;
	long argc, envc;
	size_t len, sgap;
#ifdef MACHINE_STACK_GROWS_UP
	size_t slen;
#endif
	char *stack;
	struct ps_strings arginfo;
	struct vmspace *vm = pr->ps_vmspace;
	char **tmpfap;
	extern struct emul emul_native;
#if NSYSTRACE > 0
	int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC);
	size_t pathbuflen;
#endif
	char *pathbuf = NULL;
	struct vnode *otvp;

	/* get other threads to stop */
	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
		return (error);

	/*
	 * Cheap solution to complicated problems.
	 * Mark this process as "leave me alone, I'm execing".
	 */
	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE)) {
		systrace_execve0(p);
		pathbuf = pool_get(&namei_pool, PR_WAITOK);
		error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN,
		    &pathbuflen);
		if (error != 0)
			goto clrflag;
	}
#endif
	if (pathbuf != NULL) {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
	} else {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE,
		    SCARG(uap, path), p);
	}

	/*
	 * initialize the fields of the exec package.
	 */
	if (pathbuf != NULL)
		pack.ep_name = pathbuf;
	else
		pack.ep_name = (char *)SCARG(uap, path);
	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
	pack.ep_hdrlen = exec_maxhdrsz;
	pack.ep_hdrvalid = 0;
	pack.ep_ndp = &nid;
	pack.ep_interp = NULL;
	pack.ep_emul_arg = NULL;
	VMCMDSET_INIT(&pack.ep_vmcmds);
	pack.ep_vap = &attr;
	pack.ep_emul = &emul_native;
	pack.ep_flags = 0;

	/* see if we can run it. */
	if ((error = check_exec(p, &pack)) != 0) {
		goto freehdr;
	}

	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */

	/* allocate an argument buffer */
	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
#ifdef DIAGNOSTIC
	if (argp == NULL)
		panic("execve: argp == NULL");
#endif
	dp = argp;
	argc = 0;

	/* copy the fake args list, if there's one, freeing it as we go */
	if (pack.ep_flags & EXEC_HASARGL) {
		tmpfap = pack.ep_fa;
		while (*tmpfap != NULL) {
			char *cp;

			cp = *tmpfap;
			while (*cp)
				*dp++ = *cp++;
			*dp++ = '\0';

			free(*tmpfap, M_EXEC, 0);
			tmpfap++; argc++;
		}
		free(pack.ep_fa, M_EXEC, 0);
		pack.ep_flags &= ~EXEC_HASARGL;
	}

	/* Now get argv & environment */
	if (!(cpp = SCARG(uap, argp))) {
		error = EFAULT;
		goto bad;
	}

	if (pack.ep_flags & EXEC_SKIPARG)
		cpp++;

	while (1) {
		len = argp + ARG_MAX - dp;
		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
			goto bad;
		if (!sp)
			break;
		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
			if (error == ENAMETOOLONG)
				error = E2BIG;
			goto bad;
		}
		dp += len;
		cpp++;
		argc++;
	}

	/* must have at least one argument */
	if (argc == 0) {
		error = EINVAL;
		goto bad;
	}

#ifdef KTRACE
	if (KTRPOINT(p, KTR_EXECARGS))
		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
#endif

	envc = 0;
	/* environment does not need to be there */
	if ((cpp = SCARG(uap, envp)) != NULL ) {
#ifdef KTRACE
		env_start = dp;
#endif
		while (1) {
			len = argp + ARG_MAX - dp;
			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
				goto bad;
			if (!sp)
				break;
			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
				if (error == ENAMETOOLONG)
					error = E2BIG;
				goto bad;
			}
			dp += len;
			cpp++;
			envc++;
		}

#ifdef KTRACE
		if (KTRPOINT(p, KTR_EXECENV))
			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
#endif
	}

	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);

	sgap = STACKGAPLEN;

	/*
	 * If we have enabled random stackgap, the stack itself has already
	 * been moved from a random location, but is still aligned to a page
	 * boundary.  Provide the lower bits of random placement now.
	 */
	if (stackgap_random != 0) {
		sgap += arc4random() & PAGE_MASK;
		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
	}

	/* Now check if args & environ fit into new stack */
	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;

	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;

	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
		error = ENOMEM;
		goto bad;
	}

	/* adjust "active stack depth" for process VSZ */
	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */

	/*
	 * we're committed: any further errors will kill the process, so
	 * kill the other threads now.
	 */
	single_thread_set(p, SINGLE_EXIT, 0);

	/*
	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
	 * pr_vmspace!
	 */
	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);

	vm = pr->ps_vmspace;
	/* Now map address space */
	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
	    trunc_page(pack.ep_taddr));
	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
	    trunc_page(pack.ep_daddr));
	vm->vm_dused = 0;
	vm->vm_ssize = atop(round_page(pack.ep_ssize));
	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
	vm->vm_minsaddr = (char *)pack.ep_minsaddr;

	/* create the new process's VM space by running the vmcmds */
#ifdef DIAGNOSTIC
	if (pack.ep_vmcmds.evs_used == 0)
		panic("execve: no vmcmds");
#endif
	error = exec_process_vmcmds(p, &pack);

	/* if an error happened, deallocate and punt */
	if (error)
		goto exec_abort;

	/* old "stackgap" is gone now */
	pr->ps_stackgap = 0;

#ifdef MACHINE_STACK_GROWS_UP
	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
        if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
            trunc_page(pr->ps_strings), PROT_NONE, TRUE))
                goto exec_abort;
#else
	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
        if (uvm_map_protect(&vm->vm_map,
            round_page(pr->ps_strings + sizeof(arginfo)),
            (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
                goto exec_abort;
#endif

	/* remember information about the process */
	arginfo.ps_nargvstr = argc;
	arginfo.ps_nenvstr = envc;

#ifdef MACHINE_STACK_GROWS_UP
	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
	slen = len - sizeof(arginfo) - sgap;
#else
	stack = (char *)(vm->vm_minsaddr - len);
#endif
	/* Now copy argc, args & environ to new stack */
	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
		goto exec_abort;

	/* copy out the process's ps_strings structure */
	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
		goto exec_abort;

	stopprofclock(pr);	/* stop profiling */
	fdcloseexec(p);		/* handle close on exec */
	execsigs(p);		/* reset caught signals */
	TCB_SET(p, NULL);	/* reset the TCB address */
	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
	pr->ps_kbind_cookie = 0;

	/* set command name & other accounting info */
	memset(p->p_comm, 0, sizeof(p->p_comm));
	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
	memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len);
	pr->ps_acflag &= ~AFORK;

	/* record proc's vnode, for use by sysctl */
	otvp = pr->ps_textvp;
	vref(pack.ep_vp);
	pr->ps_textvp = pack.ep_vp;
	if (otvp)
		vrele(otvp);

	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
	if (pr->ps_flags & PS_PPWAIT) {
		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
		wakeup(pr->ps_pptr);
	}

	/*
	 * If process does execve() while it has a mismatched real,
	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
	 */
	if (cred->cr_uid != cred->cr_ruid ||
	    cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_rgid ||
	    cred->cr_gid != cred->cr_svgid)
		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
	else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);

	atomic_clearbits_int(&pr->ps_flags, PS_TAMED);
	tame_dropwpaths(pr);

	/*
	 * deal with set[ug]id.
	 * MNT_NOEXEC has already been used to disable s[ug]id.
	 */
	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
		int i;

		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);

#ifdef KTRACE
		/*
		 * If process is being ktraced, turn off - unless
		 * root set it.
		 */
		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
			ktrcleartrace(pr);
#endif
		p->p_ucred = cred = crcopy(cred);
		if (attr.va_mode & VSUID)
			cred->cr_uid = attr.va_uid;
		if (attr.va_mode & VSGID)
			cred->cr_gid = attr.va_gid;

		/*
		 * For set[ug]id processes, a few caveats apply to
		 * stdin, stdout, and stderr.
		 */
		error = 0;
		fdplock(p->p_fd);
		for (i = 0; i < 3; i++) {
			struct file *fp = NULL;

			/*
			 * NOTE - This will never return NULL because of
			 * immature fds. The file descriptor table is not
			 * shared because we're suid.
			 */
			fp = fd_getfile(p->p_fd, i);

			/*
			 * Ensure that stdin, stdout, and stderr are already
			 * allocated.  We do not want userland to accidentally
			 * allocate descriptors in this range which has implied
			 * meaning to libc.
			 */
			if (fp == NULL) {
				short flags = FREAD | (i == 0 ? 0 : FWRITE);
				struct vnode *vp;
				int indx;

				if ((error = falloc(p, &fp, &indx)) != 0)
					break;
#ifdef DIAGNOSTIC
				if (indx != i)
					panic("sys_execve: falloc indx != i");
#endif
				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					break;
				}
				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					vrele(vp);
					break;
				}
				if (flags & FWRITE)
					vp->v_writecount++;
				fp->f_flag = flags;
				fp->f_type = DTYPE_VNODE;
				fp->f_ops = &vnops;
				fp->f_data = (caddr_t)vp;
				FILE_SET_MATURE(fp, p);
			}
		}
		fdpunlock(p->p_fd);
		if (error)
			goto exec_abort;
	} else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);

	/*
	 * Reset the saved ugids and update the process's copy of the
	 * creds if the creds have been changed
	 */
	if (cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_svgid) {
		/* make sure we have unshared ucreds */
		p->p_ucred = cred = crcopy(cred);
		cred->cr_svuid = cred->cr_uid;
		cred->cr_svgid = cred->cr_gid;
	}

	if (pr->ps_ucred != cred) {
		struct ucred *ocred;

		ocred = pr->ps_ucred;
		crhold(cred);
		pr->ps_ucred = cred;
		crfree(ocred);
	}

	if (pr->ps_flags & PS_SUGIDEXEC) {
		int i, s = splclock();

		timeout_del(&pr->ps_realit_to);
		for (i = 0; i < nitems(pr->ps_timer); i++) {
			timerclear(&pr->ps_timer[i].it_interval);
			timerclear(&pr->ps_timer[i].it_value);
		}
		splx(s);
	}

	/* reset CPU time usage for the thread, but not the process */
	timespecclear(&p->p_tu.tu_runtime);
	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;

	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);

	/*
	 * notify others that we exec'd
	 */
	KNOTE(&pr->ps_klist, NOTE_EXEC);

	/* setup new registers and do misc. setup. */
	if (pack.ep_emul->e_fixup != NULL) {
		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
			goto free_pack_abort;
	}
#ifdef MACHINE_STACK_GROWS_UP
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
#else
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
#endif

	/* map the process's signal trampoline code */
	if (exec_sigcode_map(pr, pack.ep_emul))
		goto free_pack_abort;

#ifdef __HAVE_EXEC_MD_MAP
	/* perform md specific mappings that process might need */
	if (exec_md_map(p, &pack))
		goto free_pack_abort;
#endif

	if (pr->ps_flags & PS_TRACED)
		psignal(p, SIGTRAP);

	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);

	/*
	 * Call emulation specific exec hook. This can setup per-process
	 * p->p_emuldata or do any other per-process stuff an emulation needs.
	 *
	 * If we are executing process of different emulation than the
	 * original forked process, call e_proc_exit() of the old emulation
	 * first, then e_proc_exec() of new emulation. If the emulation is
	 * same, the exec hook code should deallocate any old emulation
	 * resources held previously by this process.
	 */
	if (pr->ps_emul && pr->ps_emul->e_proc_exit &&
	    pr->ps_emul != pack.ep_emul)
		(*pr->ps_emul->e_proc_exit)(p);

	p->p_descfd = 255;
	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
		p->p_descfd = pack.ep_fd;

	/*
	 * Call exec hook. Emulation code may NOT store reference to anything
	 * from &pack.
	 */
	if (pack.ep_emul->e_proc_exec)
		(*pack.ep_emul->e_proc_exec)(p, &pack);

#if defined(KTRACE) && defined(COMPAT_LINUX)
	/* update ps_emul, but don't ktrace it if native-execing-native */
	if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) {
		pr->ps_emul = pack.ep_emul;

		if (KTRPOINT(p, KTR_EMUL))
			ktremul(p);
	}
#else
	/* update ps_emul, the old value is no longer needed */
	pr->ps_emul = pack.ep_emul;
#endif

	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE) &&
	    wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC))
		systrace_execve1(pathbuf, p);
#endif

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (0);

bad:
	/* free the vmspace-creation commands, and release their references */
	kill_vmcmds(&pack.ep_vmcmds);
	/* kill any opened file descriptor, if necessary */
	if (pack.ep_flags & EXEC_HASFD) {
		pack.ep_flags &= ~EXEC_HASFD;
		fdplock(p->p_fd);
		(void) fdrelease(p, pack.ep_fd);
		fdpunlock(p->p_fd);
	}
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	/* close and put the exec'd file */
	vn_close(pack.ep_vp, FREAD, cred, p);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

 freehdr:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
#if NSYSTRACE > 0
 clrflag:
#endif
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (error);

exec_abort:
	/*
	 * the old process doesn't exist anymore.  exit gracefully.
	 * get rid of the (new) address space we have created, if any, get rid
	 * of our namei data and vnode, and exit noting failure
	 */
	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

free_pack_abort:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);
	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);

	/* NOTREACHED */
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);

	return (0);
}
Esempio n. 26
0
void
cc_trie_clear(struct cc_trie* tp) {
    tp->pool.pos = 0;
    tp->root = pool_get(tp);
}
Esempio n. 27
0
int
exfs_readlink(void *v)
{
	struct vop_readlink_args *ap = v;
	ISONODE	*ip;
	ISODIR	*dirp;
	ISOMNT	*imp;
	struct	buf *bp;
	struct	uio *uio;
	u_short	symlen;
	int	error;
	char	*symname;

	ip  = VTOI(ap->a_vp);
	imp = ip->i_mnt;
	uio = ap->a_uio;

    printf("zawel v exfs_readlink\n");
	if (imp->iso_ftype != ISO_FTYPE_RRIP)
		return (EINVAL);

	/*
	 * Get parents directory record block that this inode included.
	 */
	error = bread(imp->im_devvp,
		      (ip->i_number >> imp->im_bshift) <<
		      (imp->im_bshift - DEV_BSHIFT),
		      imp->logical_block_size, &bp);
	if (error) {
		brelse(bp);
		return (EINVAL);
	}

	/*
	 * Setup the directory pointer for this inode
	 */
	dirp = (ISODIR *)(bp->b_data + (ip->i_number & imp->im_bmask));

	/*
	 * Just make sure, we have a right one....
	 *   1: Check not cross boundary on block
	 */
	if ((ip->i_number & imp->im_bmask) + isonum_711(dirp->length)
	    > imp->logical_block_size) {
		brelse(bp);
		return (EINVAL);
	}

	/*
	 * Now get a buffer
	 * Abuse a namei buffer for now.
	 */
	if (uio->uio_segflg == UIO_SYSSPACE &&
	    uio->uio_iov->iov_len >= MAXPATHLEN)
		symname = uio->uio_iov->iov_base;
	else
		symname = pool_get(&namei_pool, PR_WAITOK);

	/*
	 * Ok, we just gathering a symbolic name in SL record.
	 */
	if (exfs_rrip_getsymname(dirp, symname, &symlen, imp) == 0) {
		if (uio->uio_segflg != UIO_SYSSPACE ||
		    uio->uio_iov->iov_len < MAXPATHLEN)
			pool_put(&namei_pool, symname);
		brelse(bp);
		return (EINVAL);
	}
	/*
	 * Don't forget before you leave from home ;-)
	 */
	brelse(bp);

	/*
	 * return with the symbolic name to caller's.
	 */
	if (uio->uio_segflg != UIO_SYSSPACE ||
	    uio->uio_iov->iov_len < MAXPATHLEN) {
		error = uiomove(symname, symlen, uio);
		pool_put(&namei_pool, symname);
		return (error);
	}
	uio->uio_resid -= symlen;
	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + symlen;
	uio->uio_iov->iov_len -= symlen;
	return (0);
}
Esempio n. 28
0
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	extern register_t switch_tramp_p;

	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user) + sizeof(*tf)) > PAGE_SIZE)
		panic("USPACE too small for user");
#endif
	fpu_proc_save(p1);

	pcbp = &p2->p_addr->u_pcb;
	bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
	/* space is cached for the copy{in,out}'s pleasure */
	pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
	pcbp->pcb_fpstate = pool_get(&hppa_fppl, PR_WAITOK);
	*pcbp->pcb_fpstate = *p1->p_addr->u_pcb.pcb_fpstate;
	/* reset any of the pending FPU exceptions from parent */
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[0] =
	    HPPA_FPU_FORK(pcbp->pcb_fpstate->hfp_regs.fpr_regs[0]);
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[1] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[2] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[3] = 0;

	sp = (register_t)p2->p_addr + PAGE_SIZE;
	p2->p_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(p1->p_md.md_regs, tf, sizeof(*tf));

	tf->tf_vtop = (paddr_t)p2->p_vmspace->vm_map.pmap->pm_pdir;
	tf->tf_cr30 = (paddr_t)pcbp->pcb_fpstate;

	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
	tf->tf_iisq[0] = tf->tf_iisq[1] =
		p2->p_vmspace->vm_map.pmap->pm_space;
	tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	tf->tf_eiem = mfctl(CR_EIEM);
	tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
	    PSL_O | PSL_W;

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		setstack(tf, (u_long)stack, 0);	/* XXX ignore error? */

	/*
	 * Build stack frames for the cpu_switchto & co.
	 */
	osp = sp + HPPA_FRAME_SIZE;
	*(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
	*(register_t*)(osp + HPPA_FRAME_RP) = switch_tramp_p;
	*(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);

	sp = osp + HPPA_FRAME_SIZE + 20*8; /* frame + callee-saved registers */
	*(register_t*)(sp - HPPA_FRAME_SIZE + 0) = (register_t)arg;
	*(register_t*)(sp - HPPA_FRAME_SIZE + 8) = KERNMODE(func);
	*(register_t*)(sp - HPPA_FRAME_SIZE + 16) = 0;	/* cpl */
	pcbp->pcb_ksp = sp;
}
Esempio n. 29
0
/**
 * Clone a token
 */
token_t *
glw_view_token_copy(glw_root_t *gr, token_t *src)
{
  token_t *dst = pool_get(gr->gr_token_pool);

  dst->file = rstr_dup(src->file);
  dst->line = src->line;

  dst->type = src->type;

  switch(src->type) {
  case TOKEN_FLOAT:
    dst->t_float = src->t_float;
    break;

  case TOKEN_INT:
    dst->t_int = src->t_int;
    break;

  case TOKEN_PROPERTY_REF:
    dst->t_prop = prop_ref_inc(src->t_prop);
    break;

  case TOKEN_PROPERTY_OWNER:
    dst->t_prop = prop_xref_addref(src->t_prop);
    break;

  case TOKEN_PROPERTY_SUBSCRIPTION:
  case TOKEN_DIRECTORY:
    dst->propsubr = src->propsubr;
    break;

  case TOKEN_FUNCTION:
    dst->t_func = src->t_func;
    if(dst->t_func->ctor != NULL)
      dst->t_func->ctor(dst);
  case TOKEN_LEFT_BRACKET:
    dst->t_num_args = src->t_num_args;
    break;

  case TOKEN_OBJECT_ATTRIBUTE:
    dst->t_attrib = src->t_attrib;
    break;

  case TOKEN_CSTRING:
    dst->t_cstring = src->t_cstring;
    break;

  case TOKEN_RSTRING:
    dst->t_rstrtype = src->t_rstrtype;
    // FALLTHRU
  case TOKEN_IDENTIFIER:
  case TOKEN_PROPERTY_VALUE_NAME:
  case TOKEN_PROPERTY_CANONICAL_NAME:
    dst->t_rstring = rstr_dup(src->t_rstring);
    break;

  case TOKEN_START:
  case TOKEN_END:
  case TOKEN_HASH:
  case TOKEN_ASSIGNMENT:
  case TOKEN_COND_ASSIGNMENT:
  case TOKEN_END_OF_EXPR:
  case TOKEN_SEPARATOR:
  case TOKEN_BLOCK_OPEN:
  case TOKEN_BLOCK_CLOSE:
  case TOKEN_LEFT_PARENTHESIS:
  case TOKEN_RIGHT_PARENTHESIS:
  case TOKEN_RIGHT_BRACKET:
  case TOKEN_DOT:
  case TOKEN_ADD:
  case TOKEN_SUB:
  case TOKEN_MULTIPLY:
  case TOKEN_DIVIDE:
  case TOKEN_MODULO:
  case TOKEN_DOLLAR:
  case TOKEN_AMPERSAND:
  case TOKEN_BOOLEAN_AND:
  case TOKEN_BOOLEAN_OR:
  case TOKEN_BOOLEAN_XOR:
  case TOKEN_BOOLEAN_NOT:
  case TOKEN_EQ:
  case TOKEN_NEQ:
  case TOKEN_LT:
  case TOKEN_GT:
  case TOKEN_NULL_COALESCE:
  case TOKEN_EXPR:
  case TOKEN_RPN:
  case TOKEN_BLOCK:
  case TOKEN_NOP:
  case TOKEN_VOID:
  case TOKEN_COLON:
    break;

  case TOKEN_LINK:
    dst->t_link_rtitle = rstr_dup(src->t_link_rtitle);
    dst->t_link_rurl   = rstr_dup(src->t_link_rurl);
    break;

  case TOKEN_VECTOR_FLOAT:
  case TOKEN_EVENT:
  case TOKEN_VECTOR:
  case TOKEN_num:
    abort();
  }
  return dst;
}
Esempio n. 30
0
int
udf_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct buf *bp;
	struct vnode *devvp;
	struct umount *ump;
	struct proc *p;
	struct vnode *vp;
	struct unode *up;
	struct file_entry *fe;
	int error, sector, size;

	p = curproc;
	bp = NULL;
	*vpp = NULL;
	ump = VFSTOUDFFS(mp);

	/* See if we already have this in the cache */
	if ((error = udf_hashlookup(ump, ino, LK_EXCLUSIVE, vpp)) != 0)
		return (error);
	if (*vpp != NULL)
		return (0);

	/*
	 * Allocate memory and check the tag id's before grabbing a new
	 * vnode, since it's hard to roll back if there is a problem.
	 */
	up = pool_get(&unode_pool, PR_WAITOK);
	bzero(up, sizeof(struct unode));

	/*
	 * Copy in the file entry.  Per the spec, the size can only be 1 block.
	 */
	sector = ino;
	devvp = ump->um_devvp;
	udf_vat_map(ump, &sector);
	if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		pool_put(&unode_pool, up);
		if (bp != NULL)
			brelse(bp);
		return (error);
	}

	fe = (struct file_entry *)bp->b_data;
	if (udf_checktag(&fe->tag, TAGID_FENTRY)) {
		printf("Invalid file entry!\n");
		pool_put(&unode_pool, up);
		brelse(bp);
		return (ENOMEM);
	}

	size = UDF_FENTRY_SIZE + letoh32(fe->l_ea) + letoh32(fe->l_ad);

	up->u_fentry = malloc(size, M_UDFFENTRY, M_NOWAIT);
	if (up->u_fentry == NULL) {
		pool_put(&unode_pool, up);
		brelse(bp);
		return (ENOMEM); /* Cannot allocate file entry block */
	}

	bcopy(bp->b_data, up->u_fentry, size);
	
	brelse(bp);
	bp = NULL;

	if ((error = udf_allocv(mp, &vp, p))) {
		free(up->u_fentry, M_UDFFENTRY);
		pool_put(&unode_pool, up);
		return (error); /* Error from udf_allocv() */
	}

	up->u_vnode = vp;
	up->u_ino = ino;
	up->u_devvp = ump->um_devvp;
	up->u_dev = ump->um_dev;
	up->u_ump = ump;
	vp->v_data = up;
	VREF(ump->um_devvp);

	lockinit(&up->u_lock, PINOD, "unode", 0, 0);

	/*
	 * udf_hashins() will lock the vnode for us.
	 */
	udf_hashins(up);

	switch (up->u_fentry->icbtag.file_type) {
	default:
		vp->v_type = VBAD;
		break;
	case UDF_ICB_TYPE_DIR:
		vp->v_type = VDIR;
		break;
	case UDF_ICB_TYPE_FILE:
		vp->v_type = VREG;
		break;
	case UDF_ICB_TYPE_BLKDEV:
		vp->v_type = VBLK;
		break;
	case UDF_ICB_TYPE_CHRDEV:
		vp->v_type = VCHR;
		break;
	case UDF_ICB_TYPE_FIFO:
		vp->v_type = VFIFO;
		break;
	case UDF_ICB_TYPE_SOCKET:
		vp->v_type = VSOCK;
		break;
	case UDF_ICB_TYPE_SYMLINK:
		vp->v_type = VLNK;
		break;
	case UDF_ICB_TYPE_VAT_150:
		vp->v_type = VREG;
		break;
	}

	*vpp = vp;

	return (0);
}