Ejemplo n.º 1
0
static void vm_map_setup(vm_map_t *map) {
  TAILQ_INIT(&map->list);
  SPLAY_INIT(&map->tree);
  rw_init(&map->rwlock, "vm map rwlock", 1);
}
Ejemplo n.º 2
0
int
zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version >
	    zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
		(void) printk("Can't mount a version %lld file system "
		    "on a version %lld pool\n. Pool must be upgraded to mount "
		    "this file system.", (u_longlong_t)zsb->z_version,
		    (u_longlong_t)spa_version(dmu_objset_spa(os)));
		error = SET_ERROR(ENOTSUP);
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &zval)) != 0)
		goto out;
	zsb->z_acl_type = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrw_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	avl_create(&zsb->z_ctldir_snaps, snapentry_compare,
	    sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
	mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL);

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;
	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}
Ejemplo n.º 3
0
static void
ti_iic_attach(struct device *parent, struct device *self, void *args)
{
	struct ti_iic_softc *sc = (struct ti_iic_softc *)self;
	struct armv7_attach_args *aa = args;
	struct i2cbus_attach_args iba;
	uint16_t rev;
	const char *mode;
	u_int state;
	char buf[20];
	char *pin;
	/* BBB specific pin names */
	char *pins[6] = {"I2C0_SDA", "I2C0_SCL",
			"SPIO_D1", "SPI0_CS0",
			"UART1_CTSn", "UART1_RTSn"};

	sc->sc_iot = aa->aa_iot;
	rw_init(&sc->sc_buslock, "tiiilk");

	sc->sc_rxthres = sc->sc_txthres = 4;

	if (bus_space_map(sc->sc_iot, aa->aa_dev->mem[0].addr,
	    aa->aa_dev->mem[0].size, 0, &sc->sc_ioh))
		panic("%s: bus_space_map failed!");

	sc->sc_ih = arm_intr_establish(aa->aa_dev->irq[0], IPL_NET,
	    ti_iic_intr, sc, DEVNAME(sc));

	prcm_enablemodule(PRCM_I2C0 + aa->aa_dev->unit);

	if (board_id == BOARD_ID_AM335X_BEAGLEBONE) {
		pin = pins[aa->aa_dev->unit * 2];
		snprintf(buf, sizeof buf, "I2C%d_SDA", aa->aa_dev->unit);
		if (sitara_cm_padconf_set(pin, buf,
		    (0x01 << 4) | (0x01 << 5) | (0x01 << 6)) != 0) {
			printf(": can't switch %s pad\n", buf);
			return;
		}
		if (sitara_cm_padconf_get(pin, &mode, &state) == 0) {
			printf(": %s state %d ", mode, state);
		}

		pin = pins[aa->aa_dev->unit * 2 + 1];
		snprintf(buf, sizeof buf, "I2C%d_SCL", aa->aa_dev->unit);
		if (sitara_cm_padconf_set(pin, buf,
		    (0x01 << 4) | (0x01 << 5) | (0x01 << 6)) != 0) {
			printf(": can't switch %s pad\n", buf);
			return;
		}
		if (sitara_cm_padconf_get(pin, &mode, &state) == 0) {
			printf(": %s state %d ", mode, state);
		}
	}

	rev = I2C_READ_REG(sc, AM335X_I2C_REVNB_LO);
	printf(" rev %d.%d\n",
	    (int)I2C_REVNB_LO_MAJOR(rev),
	    (int)I2C_REVNB_LO_MINOR(rev));

	ti_iic_reset(sc);
	ti_iic_flush(sc);

	sc->sc_ic.ic_cookie = sc;
	sc->sc_ic.ic_acquire_bus = ti_iic_acquire_bus;
	sc->sc_ic.ic_release_bus = ti_iic_release_bus;
	sc->sc_ic.ic_exec = ti_iic_exec;

	bzero(&iba, sizeof iba);
	iba.iba_name = "iic";
	iba.iba_tag = &sc->sc_ic;
	(void) config_found(&sc->sc_dev, &iba, iicbus_print);
}
Ejemplo n.º 4
0
/*
 * srpt_ch_alloc()
 */
srpt_channel_t *
srpt_ch_alloc(srpt_target_port_t *tgt, uint8_t port)
{
	ibt_status_t			status;
	srpt_channel_t			*ch;
	ibt_cq_attr_t			cq_attr;
	ibt_rc_chan_alloc_args_t	ch_args;
	uint32_t			cq_real_size;
	srpt_ioc_t			*ioc;

	ASSERT(tgt != NULL);
	ioc = tgt->tp_ioc;
	ASSERT(ioc != NULL);

	ch = kmem_zalloc(sizeof (*ch), KM_SLEEP);
	rw_init(&ch->ch_rwlock, NULL, RW_DRIVER, NULL);
	mutex_init(&ch->ch_reflock, NULL, MUTEX_DRIVER, NULL);
	cv_init(&ch->ch_cv_complete, NULL, CV_DRIVER, NULL);
	ch->ch_refcnt	= 1;
	ch->ch_cv_waiters = 0;

	ch->ch_state  = SRPT_CHANNEL_CONNECTING;
	ch->ch_tgt    = tgt;
	ch->ch_req_lim_delta = 0;
	ch->ch_ti_iu_len = 0;

	cq_attr.cq_size	 = srpt_send_msg_depth * 2;
	cq_attr.cq_sched = 0;
	cq_attr.cq_flags = IBT_CQ_NO_FLAGS;

	status = ibt_alloc_cq(ioc->ioc_ibt_hdl, &cq_attr, &ch->ch_scq_hdl,
	    &cq_real_size);
	if (status != IBT_SUCCESS) {
		SRPT_DPRINTF_L1("ch_alloc, send CQ alloc error (%d)",
		    status);
		goto scq_alloc_err;
	}

	cq_attr.cq_size	 = srpt_send_msg_depth + 1;
	cq_attr.cq_sched = 0;
	cq_attr.cq_flags = IBT_CQ_NO_FLAGS;

	status = ibt_alloc_cq(ioc->ioc_ibt_hdl, &cq_attr, &ch->ch_rcq_hdl,
	    &cq_real_size);
	if (status != IBT_SUCCESS) {
		SRPT_DPRINTF_L2("ch_alloc, receive CQ alloc error (%d)",
		    status);
		goto rcq_alloc_err;
	}

	ibt_set_cq_handler(ch->ch_scq_hdl, srpt_ch_scq_hdlr, ch);
	ibt_set_cq_handler(ch->ch_rcq_hdl, srpt_ch_rcq_hdlr, ch);
	(void) ibt_enable_cq_notify(ch->ch_scq_hdl, IBT_NEXT_COMPLETION);
	(void) ibt_enable_cq_notify(ch->ch_rcq_hdl, IBT_NEXT_COMPLETION);

	ch_args.rc_flags   = IBT_WR_SIGNALED;

	/* Maker certain initiator can not read/write our memory */
	ch_args.rc_control = 0;

	ch_args.rc_hca_port_num = port;

	/*
	 * Any SRP IU can result in a number of STMF data buffer transfers
	 * and those transfers themselves could span multiple initiator
	 * buffers.  Therefore, the number of send WQE's actually required
	 * can vary.  Here we assume that on average an I/O will require
	 * no more than SRPT_MAX_OUT_IO_PER_CMD send WQE's.  In practice
	 * this will prevent send work queue overrun, but we will also
	 * inform STMF to throttle I/O should the work queue become full.
	 *
	 * If the HCA tells us the max outstanding WRs for a channel is
	 * lower than our default, use the HCA value.
	 */
	ch_args.rc_sizes.cs_sq = min(ioc->ioc_attr.hca_max_chan_sz,
	    (srpt_send_msg_depth * SRPT_MAX_OUT_IO_PER_CMD));
	ch_args.rc_sizes.cs_rq =  0;
	ch_args.rc_sizes.cs_sq_sgl = 2;
	ch_args.rc_sizes.cs_rq_sgl = 0;

	ch_args.rc_scq = ch->ch_scq_hdl;
	ch_args.rc_rcq = ch->ch_rcq_hdl;
	ch_args.rc_pd  = ioc->ioc_pd_hdl;
	ch_args.rc_clone_chan = NULL;
	ch_args.rc_srq = ioc->ioc_srq_hdl;

	status = ibt_alloc_rc_channel(ioc->ioc_ibt_hdl, IBT_ACHAN_USES_SRQ,
	    &ch_args, &ch->ch_chan_hdl, &ch->ch_sizes);
	if (status != IBT_SUCCESS) {
		SRPT_DPRINTF_L2("ch_alloc, IBT channel alloc error (%d)",
		    status);
		goto qp_alloc_err;
	}

	/*
	 * Create pool of send WQE entries to map send wqe work IDs
	 * to various types (specifically in error cases where OP
	 * is not known).
	 */
	ch->ch_num_swqe = ch->ch_sizes.cs_sq;
	SRPT_DPRINTF_L2("ch_alloc, number of SWQEs = %u", ch->ch_num_swqe);
	ch->ch_swqe = kmem_zalloc(sizeof (srpt_swqe_t) * ch->ch_num_swqe,
	    KM_SLEEP);
	if (ch->ch_swqe == NULL) {
		SRPT_DPRINTF_L2("ch_alloc, SWQE alloc error");
		(void) ibt_free_channel(ch->ch_chan_hdl);
		goto qp_alloc_err;
	}
	mutex_init(&ch->ch_swqe_lock, NULL, MUTEX_DRIVER, NULL);
	ch->ch_head = 1;
	for (ch->ch_tail = 1; ch->ch_tail < ch->ch_num_swqe -1; ch->ch_tail++) {
		ch->ch_swqe[ch->ch_tail].sw_next = ch->ch_tail + 1;
	}
	ch->ch_swqe[ch->ch_tail].sw_next = 0;

	ibt_set_chan_private(ch->ch_chan_hdl, ch);
	return (ch);

qp_alloc_err:
	(void) ibt_free_cq(ch->ch_rcq_hdl);

rcq_alloc_err:
	(void) ibt_free_cq(ch->ch_scq_hdl);

scq_alloc_err:
	cv_destroy(&ch->ch_cv_complete);
	mutex_destroy(&ch->ch_reflock);
	rw_destroy(&ch->ch_rwlock);
	kmem_free(ch, sizeof (*ch));

	return (NULL);
}
Ejemplo n.º 5
0
/************************************************************************
 * iumfs_alloc_node()
 *
 *   新しい vnode 及び iumnode を確保する。
 *
 * 引数:
 *     vfsp   : vfs 構造体
 *     vpp    : 呼び出し側から渡された vnode 構造体のポインタのアドレス
 *     flag   : 作成する vnode のフラグ(VROOT, VISSWAP 等)
 *     type   : 作成する vnode のタイプ(VDIR, VREG 等)
 *     nodeid : 作成する vnode のノード番号(0の場合自動割当)
 *
 * 戻り値
 *    正常時   : SUCCESS(=0)
 *    エラー時 : 0 以外
 *
 ************************************************************************/
int
iumfs_alloc_node(vfs_t *vfsp, vnode_t **nvpp, uint_t flag, enum vtype type, ino_t nodeid)
{
    vnode_t *vp;
    iumnode_t *inp;
    iumfs_t *iumfsp; // ファイルシステム型依存のプライベートデータ構造体

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node is called\n"));
    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: type=%d\n",type));
    
    iumfsp = VFS2IUMFS(vfsp);

    // vnode 構造体を確保
#ifdef SOL10
    // Solaris 10 では直接 vnode 構造体を alloc してはいけない。
    vp = vn_alloc(KM_NOSLEEP);
#else
    // Solaris 9 ではファイルシステム自身で vnode 構造体を alloc する。
    vp = (vnode_t *) kmem_zalloc(sizeof (vnode_t), KM_NOSLEEP);
#endif    

    //ファイルシステム型依存のノード情報(iumnode 構造体)を確保
    inp = (iumnode_t *) kmem_zalloc(sizeof (iumnode_t), KM_NOSLEEP);

    /*
     * どちらかでも確保できなかったら ENOMEM を返す
     */
    if (vp == NULL || inp == NULL) {
        cmn_err(CE_WARN, "iumfs_alloc_node: kmem_zalloc failed\n");
        if (vp != NULL)
#ifdef SOL10
            vn_free(vp);
#else        
            kmem_free(vp, sizeof (vnode_t));
#endif            
        if (inp != NULL)
            kmem_free(inp, sizeof (iumnode_t));
        DEBUG_PRINT((CE_CONT, "iumfs_alloc_node return(ENOMEM)\n"));
        return (ENOMEM);
    }

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: allocated vnode = 0x%p\n", vp));

    /*
     * 確保した vnode を初期化
     * VN_INIT マクロの中で、v_count の初期値を 1 にセットする。
     * これによって、ファイルシステムの意図しないタイミングで iumfs_inactive()
     * が呼ばれてしまうのを防ぐ。
     */
    VN_INIT(vp, vfsp, type, 0);

    // ファイルシステム型依存の vnode 操作構造体のアドレスをセット
#ifdef SOL10
    vn_setops(vp, iumfs_vnodeops);
#else        
    vp->v_op = &iumfs_vnodeops;
#endif

    // v_flag にフラグをセット
    vp->v_flag &= flag;

    /*
     * 確保した iumnode を初期化 (IN_INIT マクロは使わない)
     */
    mutex_init(&(inp)->i_dlock, NULL, MUTEX_DEFAULT, NULL);
    inp->vattr.va_mask = AT_ALL;
    inp->vattr.va_uid = 0;
    inp->vattr.va_gid = 0;
    inp->vattr.va_blksize = BLOCKSIZE;
    inp->vattr.va_nlink = 1;
    inp->vattr.va_rdev = 0;
    rw_init(&(inp)->i_listlock,NULL,RW_DRIVER,NULL);
#ifdef SOL10
#else    
    inp->vattr.va_vcode = 1;
#endif
    /*
     * vattr の va_fsid は dev_t(=ulong_t), これに対して vfs の
     * vfs_fsid は int 型の配列(int[2])を含む構造体。
     * なので、iumfs_mount() でもとめたデバイス番号を入れておく。
     */
    inp->vattr.va_fsid = vfsp->vfs_dev;
    inp->vattr.va_type = type;
    inp->vattr.va_atime =      \
    inp->vattr.va_ctime =      \
    inp->vattr.va_mtime = iumfs_get_current_time();

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: va_fsid = 0x%x\n", inp->vattr.va_fsid));

    /*
     * vnode に iumnode 構造体へのポインタをセット
     * 逆に、iumnode にも vnode 構造体へのポインタをセット
     */
    vp->v_data = (caddr_t) inp;
    inp->vnode = vp;

    /*
     * ノード番号(iノード番号)をセット。
     * もし指定されている場合はそれを使い、指定が無い場合には
     * 単純に1づつ増やしていく。
     */
    if( (inp->vattr.va_nodeid = nodeid) == 0) {
        mutex_enter(&(iumfsp->iumfs_lock));
        inp->vattr.va_nodeid = ++(iumfsp->iumfs_last_nodeid);
        mutex_exit(&(iumfsp->iumfs_lock));
    }

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: new nodeid = %d \n", inp->vattr.va_nodeid));

    //新しい iumnode をノードのリンクリストに新規のノードを追加
    iumfs_add_node_to_list(vfsp, vp);

    // 渡された vnode 構造体のポインタに確保した vnode のアドレスをセット
    *nvpp = vp;
    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: return(%d)\n", SUCCESS));
    return (SUCCESS);
}
Ejemplo n.º 6
0
/*
 * General fork call.  Note that another LWP in the process may call exec()
 * or exit() while we are forking.  It's safe to continue here, because
 * neither operation will complete until all LWPs have exited the process.
 */
int
fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize,
    void (*func)(void *), void *arg, register_t *retval,
    struct proc **rnewprocp)
{
	struct proc	*p1, *p2, *parent;
	struct plimit   *p1_lim;
	uid_t		uid;
	struct lwp	*l2;
	int		count;
	vaddr_t		uaddr;
	int		tnprocs;
	int		tracefork;
	int		error = 0;

	p1 = l1->l_proc;
	uid = kauth_cred_getuid(l1->l_cred);
	tnprocs = atomic_inc_uint_nv(&nprocs);

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create.
	 */
	if (__predict_false(tnprocs >= maxproc))
		error = -1;
	else
		error = kauth_authorize_process(l1->l_cred,
		    KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);

	if (error) {
		static struct timeval lasttfm;
		atomic_dec_uint(&nprocs);
		if (ratecheck(&lasttfm, &fork_tfmrate))
			tablefull("proc", "increase kern.maxproc or NPROC");
		if (forkfsleep)
			kpause("forkmx", false, forkfsleep, NULL);
		return EAGAIN;
	}

	/*
	 * Enforce limits.
	 */
	count = chgproccnt(uid, 1);
	if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
		if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT,
		    p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
		    &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) {
			(void)chgproccnt(uid, -1);
			atomic_dec_uint(&nprocs);
			if (forkfsleep)
				kpause("forkulim", false, forkfsleep, NULL);
			return EAGAIN;
		}
	}

	/*
	 * Allocate virtual address space for the U-area now, while it
	 * is still easy to abort the fork operation if we're out of
	 * kernel virtual address space.
	 */
	uaddr = uvm_uarea_alloc();
	if (__predict_false(uaddr == 0)) {
		(void)chgproccnt(uid, -1);
		atomic_dec_uint(&nprocs);
		return ENOMEM;
	}

	/*
	 * We are now committed to the fork.  From here on, we may
	 * block on resources, but resource allocation may NOT fail.
	 */

	/* Allocate new proc. */
	p2 = proc_alloc();

	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */
	memset(&p2->p_startzero, 0,
	    (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
	memcpy(&p2->p_startcopy, &p1->p_startcopy,
	    (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));

	TAILQ_INIT(&p2->p_sigpend.sp_info);

	LIST_INIT(&p2->p_lwps);
	LIST_INIT(&p2->p_sigwaiters);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 * Inherit flags we want to keep.  The flags related to SIGCHLD
	 * handling are important in order to keep a consistent behaviour
	 * for the child after the fork.  If we are a 32-bit process, the
	 * child will be too.
	 */
	p2->p_flag =
	    p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32);
	p2->p_emul = p1->p_emul;
	p2->p_execsw = p1->p_execsw;

	if (flags & FORK_SYSTEM) {
		/*
		 * Mark it as a system process.  Set P_NOCLDWAIT so that
		 * children are reparented to init(8) when they exit.
		 * init(8) can easily wait them out for us.
		 */
		p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT);
	}

	mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
	mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
	rw_init(&p2->p_reflock);
	cv_init(&p2->p_waitcv, "wait");
	cv_init(&p2->p_lwpcv, "lwpwait");

	/*
	 * Share a lock between the processes if they are to share signal
	 * state: we must synchronize access to it.
	 */
	if (flags & FORK_SHARESIGS) {
		p2->p_lock = p1->p_lock;
		mutex_obj_hold(p1->p_lock);
	} else
		p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);

	kauth_proc_fork(p1, p2);

	p2->p_raslist = NULL;
#if defined(__HAVE_RAS)
	ras_fork(p1, p2);
#endif

	/* bump references to the text vnode (for procfs) */
	p2->p_textvp = p1->p_textvp;
	if (p2->p_textvp)
		vref(p2->p_textvp);

	if (flags & FORK_SHAREFILES)
		fd_share(p2);
	else if (flags & FORK_CLEANFILES)
		p2->p_fd = fd_init(NULL);
	else
		p2->p_fd = fd_copy();

	/* XXX racy */
	p2->p_mqueue_cnt = p1->p_mqueue_cnt;

	if (flags & FORK_SHARECWD)
		cwdshare(p2);
	else
		p2->p_cwdi = cwdinit();

	/*
	 * Note: p_limit (rlimit stuff) is copy-on-write, so normally
	 * we just need increase pl_refcnt.
	 */
	p1_lim = p1->p_limit;
	if (!p1_lim->pl_writeable) {
		lim_addref(p1_lim);
		p2->p_limit = p1_lim;
	} else {
		p2->p_limit = lim_copy(p1_lim);
	}

	if (flags & FORK_PPWAIT) {
		/* Mark ourselves as waiting for a child. */
		l1->l_pflag |= LP_VFORKWAIT;
		p2->p_lflag = PL_PPWAIT;
		p2->p_vforklwp = l1;
	} else {
		p2->p_lflag = 0;
	}
	p2->p_sflag = 0;
	p2->p_slflag = 0;
	parent = (flags & FORK_NOWAIT) ? initproc : p1;
	p2->p_pptr = parent;
	p2->p_ppid = parent->p_pid;
	LIST_INIT(&p2->p_children);

	p2->p_aio = NULL;

#ifdef KTRACE
	/*
	 * Copy traceflag and tracefile if enabled.
	 * If not inherited, these were zeroed above.
	 */
	if (p1->p_traceflag & KTRFAC_INHERIT) {
		mutex_enter(&ktrace_lock);
		p2->p_traceflag = p1->p_traceflag;
		if ((p2->p_tracep = p1->p_tracep) != NULL)
			ktradref(p2);
		mutex_exit(&ktrace_lock);
	}
#endif

	/*
	 * Create signal actions for the child process.
	 */
	p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS);
	mutex_enter(p1->p_lock);
	p2->p_sflag |=
	    (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
	sched_proc_fork(p1, p2);
	mutex_exit(p1->p_lock);

	p2->p_stflag = p1->p_stflag;

	/*
	 * p_stats.
	 * Copy parts of p_stats, and zero out the rest.
	 */
	p2->p_stats = pstatscopy(p1->p_stats);

	/*
	 * Set up the new process address space.
	 */
	uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false);

	/*
	 * Finish creating the child process.
	 * It will return through a different path later.
	 */
	lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0,
	    stack, stacksize, (func != NULL) ? func : child_return, arg, &l2,
	    l1->l_class);

	/*
	 * Inherit l_private from the parent.
	 * Note that we cannot use lwp_setprivate() here since that
	 * also sets the CPU TLS register, which is incorrect if the
	 * process has changed that without letting the kernel know.
	 */
	l2->l_private = l1->l_private;

	/*
	 * If emulation has a process fork hook, call it now.
	 */
	if (p2->p_emul->e_proc_fork)
		(*p2->p_emul->e_proc_fork)(p2, l1, flags);

	/*
	 * ...and finally, any other random fork hooks that subsystems
	 * might have registered.
	 */
	doforkhooks(p2, p1);

	SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0);

	/*
	 * It's now safe for the scheduler and other processes to see the
	 * child process.
	 */
	mutex_enter(proc_lock);

	if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
		p2->p_lflag |= PL_CONTROLT;

	LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling);
	p2->p_exitsig = exitsig;		/* signal for parent on exit */

	/*
	 * We don't want to tracefork vfork()ed processes because they
	 * will not receive the SIGTRAP until it is too late.
	 */
	tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) ==
	    (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0;
	if (tracefork) {
		p2->p_slflag |= PSL_TRACED;
		p2->p_opptr = p2->p_pptr;
		if (p2->p_pptr != p1->p_pptr) {
			struct proc *parent1 = p2->p_pptr;

			if (parent1->p_lock < p2->p_lock) {
				if (!mutex_tryenter(parent1->p_lock)) {
					mutex_exit(p2->p_lock);
					mutex_enter(parent1->p_lock);
				}
			} else if (parent1->p_lock > p2->p_lock) {
				mutex_enter(parent1->p_lock);
			}
			parent1->p_slflag |= PSL_CHTRACED;
			proc_reparent(p2, p1->p_pptr);
			if (parent1->p_lock != p2->p_lock)
				mutex_exit(parent1->p_lock);
		}

		/*
		 * Set ptrace status.
		 */
		p1->p_fpid = p2->p_pid;
		p2->p_fpid = p1->p_pid;
	}

	LIST_INSERT_AFTER(p1, p2, p_pglist);
	LIST_INSERT_HEAD(&allproc, p2, p_list);

	p2->p_trace_enabled = trace_is_enabled(p2);
#ifdef __HAVE_SYSCALL_INTERN
	(*p2->p_emul->e_syscall_intern)(p2);
#endif

	/*
	 * Update stats now that we know the fork was successful.
	 */
	uvmexp.forks++;
	if (flags & FORK_PPWAIT)
		uvmexp.forks_ppwait++;
	if (flags & FORK_SHAREVM)
		uvmexp.forks_sharevm++;

	/*
	 * Pass a pointer to the new process to the caller.
	 */
	if (rnewprocp != NULL)
		*rnewprocp = p2;

	if (ktrpoint(KTR_EMUL))
		p2->p_traceflag |= KTRFAC_TRC_EMUL;

	/*
	 * Notify any interested parties about the new process.
	 */
	if (!SLIST_EMPTY(&p1->p_klist)) {
		mutex_exit(proc_lock);
		KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
		mutex_enter(proc_lock);
	}

	/*
	 * Make child runnable, set start time, and add to run queue except
	 * if the parent requested the child to start in SSTOP state.
	 */
	mutex_enter(p2->p_lock);

	/*
	 * Start profiling.
	 */
	if ((p2->p_stflag & PST_PROFIL) != 0) {
		mutex_spin_enter(&p2->p_stmutex);
		startprofclock(p2);
		mutex_spin_exit(&p2->p_stmutex);
	}

	getmicrotime(&p2->p_stats->p_start);
	p2->p_acflag = AFORK;
	lwp_lock(l2);
	KASSERT(p2->p_nrlwps == 1);
	if (p2->p_sflag & PS_STOPFORK) {
		struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate;
		p2->p_nrlwps = 0;
		p2->p_stat = SSTOP;
		p2->p_waited = 0;
		p1->p_nstopchild++;
		l2->l_stat = LSSTOP;
		KASSERT(l2->l_wchan == NULL);
		lwp_unlock_to(l2, spc->spc_lwplock);
	} else {
		p2->p_nrlwps = 1;
		p2->p_stat = SACTIVE;
		l2->l_stat = LSRUN;
		sched_enqueue(l2, false);
		lwp_unlock(l2);
	}

	/*
	 * Return child pid to parent process,
	 * marking us as parent via retval[1].
	 */
	if (retval != NULL) {
		retval[0] = p2->p_pid;
		retval[1] = 0;
	}
	mutex_exit(p2->p_lock);

	/*
	 * Preserve synchronization semantics of vfork.  If waiting for
	 * child to exec or exit, sleep until it clears LP_VFORKWAIT.
	 */
#if 0
	while (l1->l_pflag & LP_VFORKWAIT) {
		cv_wait(&l1->l_waitcv, proc_lock);
	}
#else
	while (p2->p_lflag & PL_PPWAIT)
		cv_wait(&p1->p_waitcv, proc_lock);
#endif

	/*
	 * Let the parent know that we are tracing its child.
	 */
	if (tracefork) {
		ksiginfo_t ksi;

		KSI_INIT_EMPTY(&ksi);
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_lid = l1->l_lid;
		kpsignal(p1, &ksi, NULL);
	}
	mutex_exit(proc_lock);

	return 0;
}
Ejemplo n.º 7
0
/* recursive rwlocks; */
void
rrw_init(struct rrwlock *rrwl, char *name)
{
	memset(rrwl, 0, sizeof(struct rrwlock));
	rw_init(&rrwl->rrwl_lock, name);
}
Ejemplo n.º 8
0
void
ichiic_attach(struct device *parent, struct device *self, void *aux)
{
	struct ichiic_softc *sc = (struct ichiic_softc *)self;
	struct pci_attach_args *pa = aux;
	struct i2cbus_attach_args iba;
	pcireg_t conf;
	bus_size_t iosize;
	pci_intr_handle_t ih;
	const char *intrstr = NULL;

	/* Read configuration */
	conf = pci_conf_read(pa->pa_pc, pa->pa_tag, ICH_SMB_HOSTC);
	DPRINTF((": conf 0x%08x", conf));

	if ((conf & ICH_SMB_HOSTC_HSTEN) == 0) {
		printf(": SMBus disabled\n");
		return;
	}

	/* Map I/O space */
	if (pci_mapreg_map(pa, ICH_SMB_BASE, PCI_MAPREG_TYPE_IO, 0,
	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0)) {
		printf(": can't map i/o space\n");
		return;
	}

	sc->sc_poll = 1;
	if (conf & ICH_SMB_HOSTC_SMIEN) {
		/* No PCI IRQ */
		printf(": SMI");
	} else {
		/* Install interrupt handler */
		if (pci_intr_map(pa, &ih) == 0) {
			intrstr = pci_intr_string(pa->pa_pc, ih);
			sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
			    ichiic_intr, sc, sc->sc_dev.dv_xname);
			if (sc->sc_ih != NULL) {
				printf(": %s", intrstr);
				sc->sc_poll = 0;
			}
		}
		if (sc->sc_poll)
			printf(": polling");
	}

	printf("\n");

	/* Attach I2C bus */
	rw_init(&sc->sc_i2c_lock, "iiclk");
	sc->sc_i2c_tag.ic_cookie = sc;
	sc->sc_i2c_tag.ic_acquire_bus = ichiic_i2c_acquire_bus;
	sc->sc_i2c_tag.ic_release_bus = ichiic_i2c_release_bus;
	sc->sc_i2c_tag.ic_exec = ichiic_i2c_exec;

	bzero(&iba, sizeof(iba));
	iba.iba_name = "iic";
	iba.iba_tag = &sc->sc_i2c_tag;
	config_found(self, &iba, iicbus_print);

	return;
}
Ejemplo n.º 9
0
struct tftphdr *w_init()
{
    return rw_init(0);
}                               /* write-behind */
Ejemplo n.º 10
0
/* init for write-behind                                                  */
struct tftphdr *w_init(void)
{
    return rw_init(0);
}
Ejemplo n.º 11
0
/* init for read-ahead   */
struct tftphdr *r_init(void)
{
    return rw_init(1);
}
Ejemplo n.º 12
0
/*
 * smbfs mount vfsop
 * Set up mount info record and attach it to vfs struct.
 */
static int
smbfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	char		*data = uap->dataptr;
	int		error;
	smbnode_t 	*rtnp = NULL;	/* root of this fs */
	smbmntinfo_t 	*smi = NULL;
	dev_t 		smbfs_dev;
	int 		version;
	int 		devfd;
	zone_t		*zone = curproc->p_zone;
	zone_t		*mntzone = NULL;
	smb_share_t 	*ssp = NULL;
	smb_cred_t 	scred;
	int		flags, sec;

	STRUCT_DECL(smbfs_args, args);		/* smbfs mount arguments */

	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
		return (error);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	/*
	 * get arguments
	 *
	 * uap->datalen might be different from sizeof (args)
	 * in a compatible situation.
	 */
	STRUCT_INIT(args, get_udatamodel());
	bzero(STRUCT_BUF(args), SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE));
	if (copyin(data, STRUCT_BUF(args), MIN(uap->datalen,
	    SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE))))
		return (EFAULT);

	/*
	 * Check mount program version
	 */
	version = STRUCT_FGET(args, version);
	if (version != SMBFS_VERSION) {
		cmn_err(CE_WARN, "mount version mismatch:"
		    " kernel=%d, mount=%d\n",
		    SMBFS_VERSION, version);
		return (EINVAL);
	}

	/*
	 * Deal with re-mount requests.
	 */
	if (uap->flags & MS_REMOUNT) {
		cmn_err(CE_WARN, "MS_REMOUNT not implemented");
		return (ENOTSUP);
	}

	/*
	 * Check for busy
	 */
	mutex_enter(&mvp->v_lock);
	if (!(uap->flags & MS_OVERLAY) &&
	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Get the "share" from the netsmb driver (ssp).
	 * It is returned with a "ref" (hold) for us.
	 * Release this hold: at errout below, or in
	 * smbfs_freevfs().
	 */
	devfd = STRUCT_FGET(args, devfd);
	error = smb_dev2share(devfd, &ssp);
	if (error) {
		cmn_err(CE_WARN, "invalid device handle %d (%d)\n",
		    devfd, error);
		return (error);
	}

	/*
	 * Use "goto errout" from here on.
	 * See: ssp, smi, rtnp, mntzone
	 */

	/*
	 * Determine the zone we're being mounted into.
	 */
	zone_hold(mntzone = zone);		/* start with this assumption */
	if (getzoneid() == GLOBAL_ZONEID) {
		zone_rele(mntzone);
		mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
		ASSERT(mntzone != NULL);
		if (mntzone != zone) {
			error = EBUSY;
			goto errout;
		}
	}

	/*
	 * Stop the mount from going any further if the zone is going away.
	 */
	if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
		error = EBUSY;
		goto errout;
	}

	/*
	 * On a Trusted Extensions client, we may have to force read-only
	 * for read-down mounts.
	 */
	if (is_system_labeled()) {
		void *addr;
		int ipvers = 0;
		struct smb_vc *vcp;

		vcp = SSTOVC(ssp);
		addr = smb_vc_getipaddr(vcp, &ipvers);
		error = smbfs_mount_label_policy(vfsp, addr, ipvers, cr);

		if (error > 0)
			goto errout;

		if (error == -1) {
			/* change mount to read-only to prevent write-down */
			vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
		}
	}

	/* Prevent unload. */
	atomic_inc_32(&smbfs_mountcount);

	/*
	 * Create a mount record and link it to the vfs struct.
	 * No more possiblities for errors from here on.
	 * Tear-down of this stuff is in smbfs_free_smi()
	 *
	 * Compare with NFS: nfsrootvp()
	 */
	smi = kmem_zalloc(sizeof (*smi), KM_SLEEP);

	mutex_init(&smi->smi_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&smi->smi_statvfs_cv, NULL, CV_DEFAULT, NULL);

	rw_init(&smi->smi_hash_lk, NULL, RW_DEFAULT, NULL);
	smbfs_init_hash_avl(&smi->smi_hash_avl);

	smi->smi_share = ssp;
	ssp = NULL;

	/*
	 * Convert the anonymous zone hold acquired via zone_hold() above
	 * into a zone reference.
	 */
	zone_init_ref(&smi->smi_zone_ref);
	zone_hold_ref(mntzone, &smi->smi_zone_ref, ZONE_REF_SMBFS);
	zone_rele(mntzone);
	mntzone = NULL;

	/*
	 * Initialize option defaults
	 */
	smi->smi_flags	= SMI_LLOCK;
	smi->smi_acregmin = SEC2HR(SMBFS_ACREGMIN);
	smi->smi_acregmax = SEC2HR(SMBFS_ACREGMAX);
	smi->smi_acdirmin = SEC2HR(SMBFS_ACDIRMIN);
	smi->smi_acdirmax = SEC2HR(SMBFS_ACDIRMAX);

	/*
	 * All "generic" mount options have already been
	 * handled in vfs.c:domount() - see mntopts stuff.
	 * Query generic options using vfs_optionisset().
	 */
	if (vfs_optionisset(vfsp, MNTOPT_INTR, NULL))
		smi->smi_flags |= SMI_INT;
	if (vfs_optionisset(vfsp, MNTOPT_ACL, NULL))
		smi->smi_flags |= SMI_ACL;

	/*
	 * Get the mount options that come in as smbfs_args,
	 * starting with args.flags (SMBFS_MF_xxx)
	 */
	flags = STRUCT_FGET(args, flags);
	smi->smi_uid 	= STRUCT_FGET(args, uid);
	smi->smi_gid 	= STRUCT_FGET(args, gid);
	smi->smi_fmode	= STRUCT_FGET(args, file_mode) & 0777;
	smi->smi_dmode	= STRUCT_FGET(args, dir_mode) & 0777;

	/*
	 * Hande the SMBFS_MF_xxx flags.
	 */
	if (flags & SMBFS_MF_NOAC)
		smi->smi_flags |= SMI_NOAC;
	if (flags & SMBFS_MF_ACREGMIN) {
		sec = STRUCT_FGET(args, acregmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acregmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACREGMAX) {
		sec = STRUCT_FGET(args, acregmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acregmax = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMIN) {
		sec = STRUCT_FGET(args, acdirmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acdirmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMAX) {
		sec = STRUCT_FGET(args, acdirmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acdirmax = SEC2HR(sec);
	}

	/*
	 * Get attributes of the remote file system,
	 * i.e. ACL support, named streams, etc.
	 */
	smb_credinit(&scred, cr);
	error = smbfs_smb_qfsattr(smi->smi_share, &smi->smi_fsa, &scred);
	smb_credrele(&scred);
	if (error) {
		SMBVDEBUG("smbfs_smb_qfsattr error %d\n", error);
	}

	/*
	 * We enable XATTR by default (via smbfs_mntopts)
	 * but if the share does not support named streams,
	 * force the NOXATTR option (also clears XATTR).
	 * Caller will set or clear VFS_XATTR after this.
	 */
	if ((smi->smi_fsattr & FILE_NAMED_STREAMS) == 0)
		vfs_setmntopt(vfsp, MNTOPT_NOXATTR, NULL, 0);

	/*
	 * Ditto ACLs (disable if not supported on this share)
	 */
	if ((smi->smi_fsattr & FILE_PERSISTENT_ACLS) == 0) {
		vfs_setmntopt(vfsp, MNTOPT_NOACL, NULL, 0);
		smi->smi_flags &= ~SMI_ACL;
	}

	/*
	 * Assign a unique device id to the mount
	 */
	mutex_enter(&smbfs_minor_lock);
	do {
		smbfs_minor = (smbfs_minor + 1) & MAXMIN32;
		smbfs_dev = makedevice(smbfs_major, smbfs_minor);
	} while (vfs_devismounted(smbfs_dev));
	mutex_exit(&smbfs_minor_lock);

	vfsp->vfs_dev	= smbfs_dev;
	vfs_make_fsid(&vfsp->vfs_fsid, smbfs_dev, smbfsfstyp);
	vfsp->vfs_data	= (caddr_t)smi;
	vfsp->vfs_fstype = smbfsfstyp;
	vfsp->vfs_bsize = MAXBSIZE;
	vfsp->vfs_bcount = 0;

	smi->smi_vfsp	= vfsp;
	smbfs_zonelist_add(smi);	/* undo in smbfs_freevfs */

	/*
	 * Create the root vnode, which we need in unmount
	 * for the call to smbfs_check_table(), etc.
	 * Release this hold in smbfs_unmount.
	 */
	rtnp = smbfs_node_findcreate(smi, "\\", 1, NULL, 0, 0,
	    &smbfs_fattr0);
	ASSERT(rtnp != NULL);
	rtnp->r_vnode->v_type = VDIR;
	rtnp->r_vnode->v_flag |= VROOT;
	smi->smi_root = rtnp;

	/*
	 * NFS does other stuff here too:
	 *   async worker threads
	 *   init kstats
	 *
	 * End of code from NFS nfsrootvp()
	 */
	return (0);

errout:
	vfsp->vfs_data = NULL;
	if (smi != NULL)
		smbfs_free_smi(smi);

	if (mntzone != NULL)
		zone_rele(mntzone);

	if (ssp != NULL)
		smb_share_rele(ssp);

	return (error);
}
Ejemplo n.º 13
0
int
zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, size, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Optional temporary mount options, free'd in zfs_sb_free().
	 */
	zsb->z_mntopts = (zmo ? zmo : zfs_mntopts_alloc());

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields.
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version > ZPL_VERSION) {
		error = SET_ERROR(ENOTSUP);
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &zval)) != 0)
		goto out;
	zsb->z_acl_type = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
	    8, 1, &zsb->z_userobjquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
	    8, 1, &zsb->z_groupobjquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrm_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);

	size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
	zsb->z_hold_size = size;
	zsb->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, KM_SLEEP);
	zsb->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
	for (i = 0; i != size; i++) {
		avl_create(&zsb->z_hold_trees[i], zfs_znode_hold_compare,
		    sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
		mutex_init(&zsb->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
	}

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;

	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}
Ejemplo n.º 14
0
void
vdev_raidz_math_init(void)
{
	raidz_impl_ops_t *curr_impl;
	zio_t *bench_zio = NULL;
	raidz_map_t *bench_rm = NULL;
	uint64_t bench_parity;
	int i, c, fn;

	/* init & vdev_raidz_impl_lock */
	rw_init(&vdev_raidz_impl_lock, NULL, RW_DEFAULT, NULL);

	/* move supported impl into raidz_supp_impl */
	for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
		curr_impl = (raidz_impl_ops_t *) raidz_all_maths[i];

		/* initialize impl */
		if (curr_impl->init)
			curr_impl->init();

		if (curr_impl->is_supported()) {
			/* init kstat */
			init_raidz_kstat(&raidz_impl_kstats[c],
			    curr_impl->name);
			raidz_supp_impl[c++] = (raidz_impl_ops_t *) curr_impl;
		}
	}
	raidz_supp_impl_cnt = c;	/* number of supported impl */
	raidz_supp_impl[c] = NULL;	/* sentinel */

	/* init kstat for original routines */
	init_raidz_kstat(&(raidz_impl_kstats[raidz_supp_impl_cnt]), "original");

#if !defined(_KERNEL)
	/*
	 * Skip benchmarking and use last implementation as fastest
	 */
	memcpy(&vdev_raidz_fastest_impl, raidz_supp_impl[raidz_supp_impl_cnt-1],
	    sizeof (vdev_raidz_fastest_impl));

	vdev_raidz_fastest_impl.name = "fastest";

	raidz_math_initialized = B_TRUE;

	/* Use 'cycle' math selection method for userspace */
	VERIFY0(vdev_raidz_impl_set("cycle"));
	return;
#endif

	/* Fake an zio and run the benchmark on it */
	bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
	bench_zio->io_offset = 0;
	bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
	bench_zio->io_data = zio_data_buf_alloc(BENCH_ZIO_SIZE);
	VERIFY(bench_zio->io_data);

	/* Benchmark parity generation methods */
	for (fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
		bench_parity = fn + 1;
		/* New raidz_map is needed for each generate_p/q/r */
		bench_rm = vdev_raidz_map_alloc(bench_zio, 9,
		    BENCH_D_COLS + bench_parity, bench_parity);

		benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);

		vdev_raidz_map_free(bench_rm);
	}

	/* Benchmark data reconstruction methods */
	bench_rm = vdev_raidz_map_alloc(bench_zio, 9, BENCH_COLS, PARITY_PQR);

	for (fn = 0; fn < RAIDZ_REC_NUM; fn++)
		benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);

	vdev_raidz_map_free(bench_rm);

	/* cleanup the bench zio */
	zio_data_buf_free(bench_zio->io_data, BENCH_ZIO_SIZE);
	kmem_free(bench_zio, sizeof (zio_t));

	/* install kstats for all impl */
	raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench",
		"misc", KSTAT_TYPE_NAMED,
		sizeof (raidz_impl_kstat_t) / sizeof (kstat_named_t) *
		(raidz_supp_impl_cnt + 1), KSTAT_FLAG_VIRTUAL);

	if (raidz_math_kstat != NULL) {
		raidz_math_kstat->ks_data = raidz_impl_kstats;
		kstat_install(raidz_math_kstat);
	}

	/* Finish initialization */
	raidz_math_initialized = B_TRUE;
	if (!vdev_raidz_impl_user_set)
		VERIFY0(vdev_raidz_impl_set("fastest"));
}
Ejemplo n.º 15
0
static struct tftphdr *w_init(void)
{
  return rw_init(0); /* write-behind */
}
Ejemplo n.º 16
0
struct tftphdr *r_init()
{
    return rw_init(1);
}                               /* read-ahead */
Ejemplo n.º 17
0
static struct tftphdr *r_init(void)
{
  return rw_init(1); /* read-ahead */
}
Ejemplo n.º 18
0
static int
zfs_domount(vfs_t *vfsp, char *osname)
{
	uint64_t recordsize, readonly;
	int error = 0;
	int mode;
	zfsvfs_t *zfsvfs;
	znode_t *zp = NULL;

	ASSERT(vfsp);
	ASSERT(osname);

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
	zfsvfs->z_vfs = vfsp;
	zfsvfs->z_parent = zfsvfs;
	zfsvfs->z_assign = TXG_NOWAIT;
	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;

	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zfsvfs->z_online_recv_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrw_init(&zfsvfs->z_teardown_lock);
	rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);

	if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
	    NULL))
		goto out;
	zfsvfs->z_vfs->vfs_bsize = recordsize;

	vfsp->vfs_data = zfsvfs;
	vfsp->mnt_flag |= MNT_LOCAL;
	vfsp->mnt_kern_flag |= MNTK_MPSAFE;
	vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
	vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;

	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
		goto out;

	mode = DS_MODE_OWNER;
	if (readonly)
		mode |= DS_MODE_READONLY;

	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (error == EROFS) {
		mode = DS_MODE_OWNER | DS_MODE_READONLY;
		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
		    &zfsvfs->z_os);
	}

	if (error)
		goto out;

	if (error = zfs_init_fs(zfsvfs, &zp))
		goto out;

	/*
	 * Set features for file system.
	 */
	zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
	if (zfsvfs->z_use_fuids) {
		vfs_set_feature(vfsp, VFSFT_XVATTR);
		vfs_set_feature(vfsp, VFSFT_SYSATTR_VIEWS);
		vfs_set_feature(vfsp, VFSFT_ACEMASKONACCESS);
		vfs_set_feature(vfsp, VFSFT_ACLONCREATE);
	}
	if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
		vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
		vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
		vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
	} else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
		vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
		vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
	}

	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
		uint64_t pval;

		ASSERT(mode & DS_MODE_READONLY);
		atime_changed_cb(zfsvfs, B_FALSE);
		readonly_changed_cb(zfsvfs, B_TRUE);
		if (error = dsl_prop_get_integer(osname, "xattr", &pval, NULL))
			goto out;
		xattr_changed_cb(zfsvfs, pval);
		zfsvfs->z_issnap = B_TRUE;
	} else {
		error = zfsvfs_setup(zfsvfs, B_TRUE);
	}

	vfs_mountedfrom(vfsp, osname);

	if (!zfsvfs->z_issnap)
		zfsctl_create(zfsvfs);
out:
	if (error) {
		if (zfsvfs->z_os)
			dmu_objset_close(zfsvfs->z_os);
		zfs_freezfsvfs(zfsvfs);
	} else {
		atomic_add_32(&zfs_active_fs_count, 1);
	}

	return (error);
}
Ejemplo n.º 19
0
static void
piixpm_attach(device_t parent, device_t self, void *aux)
{
	struct piixpm_softc *sc = device_private(self);
	struct pci_attach_args *pa = aux;
	struct i2cbus_attach_args iba;
	pcireg_t base, conf;
	pcireg_t pmmisc;
	pci_intr_handle_t ih;
	char devinfo[256];
	const char *intrstr = NULL;

	sc->sc_dev = self;
	sc->sc_pc = pa->pa_pc;
	sc->sc_pcitag = pa->pa_tag;

	aprint_naive("\n");
	aprint_normal("\n");

	pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo));
	aprint_normal_dev(self, "%s (rev. 0x%02x)\n", devinfo,
	    PCI_REVISION(pa->pa_class));

	if (!pmf_device_register(self, piixpm_suspend, piixpm_resume))
		aprint_error_dev(self, "couldn't establish power handler\n");

	/* Read configuration */
	conf = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_HOSTC);
	DPRINTF(("%s: conf 0x%x\n", device_xname(self), conf));

	if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) ||
	    (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_INTEL_82371AB_PMC))
		goto nopowermanagement;

	/* check whether I/O access to PM regs is enabled */
	pmmisc = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PMREGMISC);
	if (!(pmmisc & 1))
		goto nopowermanagement;

	sc->sc_pm_iot = pa->pa_iot;
	/* Map I/O space */
	base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PM_BASE);
	if (bus_space_map(sc->sc_pm_iot, PCI_MAPREG_IO_ADDR(base),
	    PIIX_PM_SIZE, 0, &sc->sc_pm_ioh)) {
		aprint_error_dev(self, "can't map power management I/O space\n");
		goto nopowermanagement;
	}

	/*
	 * Revision 0 and 1 are PIIX4, 2 is PIIX4E, 3 is PIIX4M.
	 * PIIX4 and PIIX4E have a bug in the timer latch, see Errata #20
	 * in the "Specification update" (document #297738).
	 */
	acpipmtimer_attach(self, sc->sc_pm_iot, sc->sc_pm_ioh,
			   PIIX_PM_PMTMR,
		(PCI_REVISION(pa->pa_class) < 3) ? ACPIPMT_BADLATCH : 0 );

nopowermanagement:
	if ((conf & PIIX_SMB_HOSTC_HSTEN) == 0) {
		aprint_normal_dev(self, "SMBus disabled\n");
		return;
	}

	/* Map I/O space */
	sc->sc_smb_iot = pa->pa_iot;
	base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_BASE) & 0xffff;
	if (bus_space_map(sc->sc_smb_iot, PCI_MAPREG_IO_ADDR(base),
	    PIIX_SMB_SIZE, 0, &sc->sc_smb_ioh)) {
		aprint_error_dev(self, "can't map smbus I/O space\n");
		return;
	}

	sc->sc_poll = 1;
	if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_SMI) {
		/* No PCI IRQ */
		aprint_normal_dev(self, "interrupting at SMI");
	} else if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_IRQ) {
		/* Install interrupt handler */
		if (pci_intr_map(pa, &ih) == 0) {
			intrstr = pci_intr_string(pa->pa_pc, ih);
			sc->sc_smb_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
			    piixpm_intr, sc);
			if (sc->sc_smb_ih != NULL) {
				aprint_normal_dev(self, "interrupting at %s",
				    intrstr);
				sc->sc_poll = 0;
			}
		}
	}
	if (sc->sc_poll)
		aprint_normal_dev(self, "polling");

	aprint_normal("\n");

	/* Attach I2C bus */
	rw_init(&sc->sc_i2c_rwlock);
	sc->sc_i2c_tag.ic_cookie = sc;
	sc->sc_i2c_tag.ic_acquire_bus = piixpm_i2c_acquire_bus;
	sc->sc_i2c_tag.ic_release_bus = piixpm_i2c_release_bus;
	sc->sc_i2c_tag.ic_exec = piixpm_i2c_exec;

	bzero(&iba, sizeof(iba));
	iba.iba_tag = &sc->sc_i2c_tag;
	config_found_ia(self, "i2cbus", &iba, iicbus_print);

	return;
}
Ejemplo n.º 20
0
void
glxpcib_attach(struct device *parent, struct device *self, void *aux)
{
	struct glxpcib_softc *sc = (struct glxpcib_softc *)self;
	struct timecounter *tc = &sc->sc_timecounter;
#ifndef SMALL_KERNEL
	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
	u_int64_t wa;
#if NGPIO > 0
	u_int64_t ga;
	struct gpiobus_attach_args gba;
	int i, gpio = 0;
#endif
	u_int64_t sa;
	struct i2cbus_attach_args iba;
	int i2c = 0;
#endif
	tc->tc_get_timecount = glxpcib_get_timecount;
	tc->tc_counter_mask = 0xffffffff;
	tc->tc_frequency = 3579545;
	tc->tc_name = "CS5536";
#ifdef __loongson__
	tc->tc_quality = 0;
#else
	tc->tc_quality = 1000;
#endif
	tc->tc_priv = sc;
	tc_init(tc);

	printf(": rev %d, 32-bit %lluHz timer",
	    (int)rdmsr(AMD5536_REV) & AMD5536_REV_MASK,
	    tc->tc_frequency);

#ifndef SMALL_KERNEL
	/* Attach the watchdog timer */
	sc->sc_iot = pa->pa_iot;
	wa = rdmsr(MSR_LBAR_MFGPT);
	if (wa & MSR_LBAR_ENABLE &&
	    !bus_space_map(sc->sc_iot, wa & MSR_MFGPT_ADDR_MASK,
	    MSR_MFGPT_SIZE, 0, &sc->sc_ioh)) {
		/* count in seconds (as upper level desires) */
		bus_space_write_2(sc->sc_iot, sc->sc_ioh, AMD5536_MFGPT0_SETUP,
		    AMD5536_MFGPT_CNT_EN | AMD5536_MFGPT_CMP2EV |
		    AMD5536_MFGPT_CMP2 | AMD5536_MFGPT_DIV_MASK);
		wdog_register(sc, glxpcib_wdogctl_cb);
		sc->sc_wdog = 1;
		printf(", watchdog");
	}

#if NGPIO > 0
	/* map GPIO I/O space */
	sc->sc_gpio_iot = pa->pa_iot;
	ga = rdmsr(MSR_LBAR_GPIO);
	if (ga & MSR_LBAR_ENABLE &&
	    !bus_space_map(sc->sc_gpio_iot, ga & MSR_GPIO_ADDR_MASK,
	    MSR_GPIO_SIZE, 0, &sc->sc_gpio_ioh)) {
		printf(", gpio");

		/* initialize pin array */
		for (i = 0; i < AMD5536_GPIO_NPINS; i++) {
			sc->sc_gpio_pins[i].pin_num = i;
			sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT |
			    GPIO_PIN_OUTPUT | GPIO_PIN_OPENDRAIN |
			    GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN |
			    GPIO_PIN_INVIN | GPIO_PIN_INVOUT;

			/* read initial state */
			sc->sc_gpio_pins[i].pin_state =
			    glxpcib_gpio_pin_read(sc, i);
		}

		/* create controller tag */
		sc->sc_gpio_gc.gp_cookie = sc;
		sc->sc_gpio_gc.gp_pin_read = glxpcib_gpio_pin_read;
		sc->sc_gpio_gc.gp_pin_write = glxpcib_gpio_pin_write;
		sc->sc_gpio_gc.gp_pin_ctl = glxpcib_gpio_pin_ctl;

		gba.gba_name = "gpio";
		gba.gba_gc = &sc->sc_gpio_gc;
		gba.gba_pins = sc->sc_gpio_pins;
		gba.gba_npins = AMD5536_GPIO_NPINS;
		gpio = 1;

	}
#endif /* NGPIO */

	/* Map SMB I/O space */
	sc->sc_smb_iot = pa->pa_iot;
	sa = rdmsr(MSR_LBAR_SMB);
	if (sa & MSR_LBAR_ENABLE &&
	    !bus_space_map(sc->sc_smb_iot, sa & MSR_SMB_ADDR_MASK,
	    MSR_SMB_SIZE, 0, &sc->sc_smb_ioh)) {
		printf(", i2c");

		/* Enable controller */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_CTL2, AMD5536_SMB_CTL2_EN |
		    AMD5536_SMB_CTL2_FREQ);

		/* Disable interrupts */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_CTL1, 0);

		/* Disable slave address */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_ADDR, 0);

		/* Stall the bus after start */
		bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
		    AMD5536_SMB_CTL1, AMD5536_SMB_CTL1_STASTRE);

		/* Attach I2C framework */
		sc->sc_smb_ic.ic_cookie = sc;
		sc->sc_smb_ic.ic_acquire_bus = glxpcib_smb_acquire_bus;
		sc->sc_smb_ic.ic_release_bus = glxpcib_smb_release_bus;
		sc->sc_smb_ic.ic_send_start = glxpcib_smb_send_start;
		sc->sc_smb_ic.ic_send_stop = glxpcib_smb_send_stop;
		sc->sc_smb_ic.ic_initiate_xfer = glxpcib_smb_initiate_xfer;
		sc->sc_smb_ic.ic_read_byte = glxpcib_smb_read_byte;
		sc->sc_smb_ic.ic_write_byte = glxpcib_smb_write_byte;

		rw_init(&sc->sc_smb_lck, "iiclk");

		bzero(&iba, sizeof(iba));
		iba.iba_name = "iic";
		iba.iba_tag = &sc->sc_smb_ic;
		i2c = 1;
	}
#endif /* SMALL_KERNEL */
	pcibattach(parent, self, aux);

#ifndef SMALL_KERNEL
#if NGPIO > 0
	if (gpio)
		config_found(&sc->sc_dev, &gba, gpiobus_print);
#endif
	if (i2c)
		config_found(&sc->sc_dev, &iba, iicbus_print);
#endif
}
Ejemplo n.º 21
0
void
smsc_attach(struct device *parent, struct device *self, void *aux)
{
	struct smsc_softc *sc = (struct smsc_softc *)self;
	struct usb_attach_arg *uaa = aux;
	struct usbd_device *dev = uaa->device;
	usb_interface_descriptor_t *id;
	usb_endpoint_descriptor_t *ed;
	struct mii_data *mii;
	struct ifnet *ifp;
	int err, s, i;
	uint32_t mac_h, mac_l;

	sc->sc_udev = dev;

	err = usbd_set_config_no(dev, SMSC_CONFIG_INDEX, 1);

	/* Setup the endpoints for the SMSC LAN95xx device(s) */
	usb_init_task(&sc->sc_tick_task, smsc_tick_task, sc,
	    USB_TASK_TYPE_GENERIC);
	rw_init(&sc->sc_mii_lock, "smscmii");
	usb_init_task(&sc->sc_stop_task, (void (*)(void *))smsc_stop, sc,
	    USB_TASK_TYPE_GENERIC);

	err = usbd_device2interface_handle(dev, SMSC_IFACE_IDX, &sc->sc_iface);
	if (err) {
		printf("%s: getting interface handle failed\n",
		    sc->sc_dev.dv_xname);
		return;
	}

	id = usbd_get_interface_descriptor(sc->sc_iface);

	if (sc->sc_udev->speed >= USB_SPEED_HIGH)
		sc->sc_bufsz = SMSC_MAX_BUFSZ;
	else
		sc->sc_bufsz = SMSC_MIN_BUFSZ;

	/* Find endpoints. */
	for (i = 0; i < id->bNumEndpoints; i++) {
		ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
		if (!ed) {
			printf("%s: couldn't get ep %d\n",
			    sc->sc_dev.dv_xname, i);
			return;
		}
		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
			sc->sc_ed[SMSC_ENDPT_RX] = ed->bEndpointAddress;
		} else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
			   UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
			sc->sc_ed[SMSC_ENDPT_TX] = ed->bEndpointAddress;
		} else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
			   UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) {
			sc->sc_ed[SMSC_ENDPT_INTR] = ed->bEndpointAddress;
		}
	}

	s = splnet();

	ifp = &sc->sc_ac.ac_if;
	ifp->if_softc = sc;
	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = smsc_ioctl;
	ifp->if_start = smsc_start;
	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* Setup some of the basics */
	sc->sc_phyno = 1;

	/*
	 * Attempt to get the mac address, if an EEPROM is not attached this
	 * will just return FF:FF:FF:FF:FF:FF, so in such cases we invent a MAC
	 * address based on urandom.
	 */
	memset(sc->sc_ac.ac_enaddr, 0xff, ETHER_ADDR_LEN);
	
	/* Check if there is already a MAC address in the register */
	if ((smsc_read_reg(sc, SMSC_MAC_ADDRL, &mac_l) == 0) &&
	    (smsc_read_reg(sc, SMSC_MAC_ADDRH, &mac_h) == 0)) {
		sc->sc_ac.ac_enaddr[5] = (uint8_t)((mac_h >> 8) & 0xff);
		sc->sc_ac.ac_enaddr[4] = (uint8_t)((mac_h) & 0xff);
		sc->sc_ac.ac_enaddr[3] = (uint8_t)((mac_l >> 24) & 0xff);
		sc->sc_ac.ac_enaddr[2] = (uint8_t)((mac_l >> 16) & 0xff);
		sc->sc_ac.ac_enaddr[1] = (uint8_t)((mac_l >> 8) & 0xff);
		sc->sc_ac.ac_enaddr[0] = (uint8_t)((mac_l) & 0xff);
	}
Ejemplo n.º 22
0
/* chfs_mountfs - init CHFS */
int
chfs_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct lwp *l = curlwp;
	kauth_cred_t cred;
	devmajor_t flash_major;
	dev_t dev;
	struct ufsmount* ump = NULL;
	struct chfs_mount* chmp;
	struct vnode *vp;
	int err = 0;

	dbg("mountfs()\n");

	dev = devvp->v_rdev;
	cred = l ? l->l_cred : NOCRED;

	/* Flush out any old buffers remaining from a previous use. */
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	err = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
	VOP_UNLOCK(devvp);
	if (err)
		return (err);

	/* Setup device. */
	flash_major = cdevsw_lookup_major(&flash_cdevsw);

	if (devvp->v_type != VBLK)
		err = ENOTBLK;
	else if (bdevsw_lookup(dev) == NULL)
		err = ENXIO;
	else if (major(dev) != flash_major) {
		dbg("major(dev): %d, flash_major: %d\n",
		    major(dev), flash_major);
		err = ENODEV;
	}
	if (err) {
		vrele(devvp);
		return (err);
	}

	/* Connect CHFS to UFS. */
	ump = kmem_zalloc(sizeof(struct ufsmount), KM_SLEEP);

	ump->um_fstype = UFS1;
	ump->um_chfs = kmem_zalloc(sizeof(struct chfs_mount), KM_SLEEP);
	mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);

	chmp = ump->um_chfs;

	/* Initialize erase block handler. */
	chmp->chm_ebh = kmem_alloc(sizeof(struct chfs_ebh), KM_SLEEP);

	dbg("[]opening flash: %u\n", (unsigned int)devvp->v_rdev);
	err = ebh_open(chmp->chm_ebh, devvp->v_rdev);
	if (err) {
		dbg("error while opening flash\n");
		goto fail;
	}

	//TODO check flash sizes

	/* Initialize vnode cache's hashtable and eraseblock array. */
	chmp->chm_gbl_version = 0;
	chmp->chm_vnocache_hash = chfs_vnocache_hash_init();

	chmp->chm_blocks = kmem_zalloc(chmp->chm_ebh->peb_nr *
	    sizeof(struct chfs_eraseblock), KM_SLEEP);

	/* Initialize mutexes. */
	mutex_init(&chmp->chm_lock_mountfields, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&chmp->chm_lock_sizes, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&chmp->chm_lock_vnocache, MUTEX_DEFAULT, IPL_NONE);

	/* Initialize read/write contants. (from UFS) */
	chmp->chm_fs_bmask = -4096;
	chmp->chm_fs_bsize = 4096;
	chmp->chm_fs_qbmask = 4095;
	chmp->chm_fs_bshift = 12;
	chmp->chm_fs_fmask = -2048;
	chmp->chm_fs_qfmask = 2047;

	/* Initialize writebuffer. */
	chmp->chm_wbuf_pagesize = chmp->chm_ebh->flash_if->page_size;
	dbg("wbuf size: %zu\n", chmp->chm_wbuf_pagesize);
	chmp->chm_wbuf = kmem_alloc(chmp->chm_wbuf_pagesize, KM_SLEEP);
	rw_init(&chmp->chm_lock_wbuf);

	/* Initialize queues. */
	TAILQ_INIT(&chmp->chm_free_queue);
	TAILQ_INIT(&chmp->chm_clean_queue);
	TAILQ_INIT(&chmp->chm_dirty_queue);
	TAILQ_INIT(&chmp->chm_very_dirty_queue);
	TAILQ_INIT(&chmp->chm_erasable_pending_wbuf_queue);
	TAILQ_INIT(&chmp->chm_erase_pending_queue);

	/* Initialize flash-specific constants. */
	chfs_calc_trigger_levels(chmp);

	/* Initialize sizes. */
	chmp->chm_nr_free_blocks = 0;
	chmp->chm_nr_erasable_blocks = 0;
	chmp->chm_max_vno = 2;
	chmp->chm_checked_vno = 2;
	chmp->chm_unchecked_size = 0;
	chmp->chm_used_size = 0;
	chmp->chm_dirty_size = 0;
	chmp->chm_wasted_size = 0;
	chmp->chm_free_size = chmp->chm_ebh->eb_size * chmp->chm_ebh->peb_nr;

	/* Build filesystem. */
	err = chfs_build_filesystem(chmp);

	if (err) {
		/* Armageddon and return. */
		chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash);
		ebh_close(chmp->chm_ebh);
		err = EIO;
		goto fail;
	}

	/* Initialize UFS. */
	mp->mnt_data = ump;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_CHFS);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = MAXNAMLEN;
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_fs_bshift = PAGE_SHIFT;
	mp->mnt_dev_bshift = DEV_BSHIFT;
	mp->mnt_iflag |= IMNT_MPSAFE;
	ump->um_flags = 0;
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_maxfilesize = 1048512 * 1024;

	/* Allocate the root vnode. */
	err = VFS_VGET(mp, CHFS_ROOTINO, &vp);
	if (err) {
		dbg("error: %d while allocating root node\n", err);
		return err;
	}
	vput(vp);

	/* Start GC. */
	chfs_gc_thread_start(chmp);
	mutex_enter(&chmp->chm_lock_mountfields);
	chfs_gc_trigger(chmp);
	mutex_exit(&chmp->chm_lock_mountfields);

	spec_node_setmountedfs(devvp, mp);
	return 0;

fail:
	kmem_free(chmp->chm_ebh, sizeof(struct chfs_ebh));
	kmem_free(chmp, sizeof(struct chfs_mount));
	kmem_free(ump, sizeof(struct ufsmount));
	return err;
}
Ejemplo n.º 23
0
void
dmu_objset_init(void)
{
	rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
}
Ejemplo n.º 24
0
void list_init(struct linked_list_head *list) {  
  list->head=NULL;
  list->sync = rw_init();
}