Exemplo n.º 1
0
/*
 * Starts the creation of a new printer file, which will be deleted
 * automatically once it has been closed and printed.
 *
 * SetupLength is the number of bytes in the first part of the resulting
 * print spool file which contains printer-specific control strings.
 *
 * Mode can have the following values:
 *      0     Text mode.  The server may optionally
 *            expand tabs to a series of spaces.
 *      1     Graphics mode.  No conversion of data
 *            should be done by the server.
 *
 * IdentifierString can be used by the server to provide some sort of
 * per-client identifying component to the print file.
 *
 * When the file is closed, it will be sent to the spooler and printed.
 */
smb_sdrc_t
smb_pre_open_print_file(smb_request_t *sr)
{
	struct open_param	*op = &sr->arg.open;
	char			*path;
	char			*identifier;
	uint32_t		new_id;
	uint16_t		setup;
	uint16_t		mode;
	int			rc;
	static uint32_t		tmp_id = 10000;

	bzero(op, sizeof (sr->arg.open));
	rc = smbsr_decode_vwv(sr, "ww", &setup, &mode);
	if (rc == 0)
		rc = smbsr_decode_data(sr, "%S", sr, &identifier);

	if (rc == 0) {
		path = smb_srm_zalloc(sr, MAXPATHLEN);
		op->fqi.fq_path.pn_path = path;
		new_id = atomic_inc_32_nv(&tmp_id);
		(void) snprintf(path, MAXPATHLEN, "%s%05u", identifier, new_id);
	}

	op->create_disposition = FILE_OVERWRITE_IF;
	op->create_options = FILE_NON_DIRECTORY_FILE;
	DTRACE_SMB_2(op__OpenPrintFile__start, smb_request_t *, sr,
	    struct open_param *, op);

	return ((rc == 0) ? SDRC_SUCCESS : SDRC_ERROR);
}
Exemplo n.º 2
0
int
smb_smb_echo(struct smb_vc *vcp, struct smb_cred *scred, int timo)
{
	struct smb_rq *rqp;
	struct mbchain *mbp;
	int error;

	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_ECHO, scred, &rqp);
	if (error)
		return (error);
	mbp = &rqp->sr_rq;
	smb_rq_wstart(rqp);
	mb_put_uint16le(mbp, 1); /* echo count */
	smb_rq_wend(rqp);
	smb_rq_bstart(rqp);
	mb_put_uint32le(mbp, atomic_inc_32_nv(&smbechoes));
	smb_rq_bend(rqp);
	/*
	 * Note: the IOD calls this, so
	 * this request must not wait for
	 * connection state changes, etc.
	 */
	rqp->sr_flags |= SMBR_NORECONNECT;
	error = smb_rq_simple_timed(rqp, timo);
	SMBSDEBUG("%d\n", error);
	smb_rq_done(rqp);
	return (error);
}
Exemplo n.º 3
0
__NON_INSTRUMENT_FUNCTION__
gnu_ptrace_thread_init()
{
	static pthread_once_t key_once = PTHREAD_ONCE_INIT;
	static volatile uint32_t thread_n = 0;

	struct stat sta;

	/* See if a trace file exists */
	if(stat(PTRACE_FLAG_FILENAME, &sta) != 0) {
		/* No trace file: do not trace at all */
		return NULL;
	}

	char fname[100];
	sprintf(fname, PTRACE_OUTPUT, getpid(), atomic_inc_32_nv(&thread_n));

	unlink(fname);

	FILE *ret = fopen(fname, "a");
	if(ret == NULL)
		return NULL;

	/* Call initialization function, if not called before */
	pthread_once(&key_once, gnu_ptrace_process_init);

	if(pthread_getspecific(key) == NULL)
		pthread_setspecific(key, ret);

	fprintf(ret, START_TRACE "\n");
	fflush(ret);

	return ret;
}
/*ARGSUSED*/
static ACPI_STATUS
acpidev_scope_init(acpidev_walk_info_t *infop)
{
	char unitaddr[32];
	char *compatible[] = {
		ACPIDEV_HID_SCOPE,
		ACPIDEV_TYPE_SCOPE,
		ACPIDEV_HID_VIRTNEX,
		ACPIDEV_TYPE_VIRTNEX,
	};

	ASSERT(infop != NULL);
	ASSERT(infop->awi_hdl != NULL);
	ASSERT(infop->awi_dip != NULL);
	if (ACPI_FAILURE(acpidev_set_compatible(infop,
	    ACPIDEV_ARRAY_PARAM(compatible)))) {
		return (AE_ERROR);
	}
	(void) snprintf(unitaddr, sizeof (unitaddr), "%u",
	    atomic_inc_32_nv(&acpidev_scope_unitaddr) - 1);
	if (ACPI_FAILURE(acpidev_set_unitaddr(infop, NULL, 0, unitaddr))) {
		return (AE_ERROR);
	}

	return (AE_OK);
}
Exemplo n.º 5
0
inline uint32_t inc_and_fetch(volatile uint32_t *ptr)
{
#if ELEVELDB_IS_SOLARIS
    return atomic_inc_32_nv(ptr);
#else
    return __sync_add_and_fetch(ptr, 1);
#endif
}
Exemplo n.º 6
0
/*
 * prop_object_retain --
 *	Increment the reference count on an object.
 */
void
prop_object_retain(prop_object_t obj)
{
	struct _prop_object *po = obj;
	uint32_t ncnt;

	ncnt = atomic_inc_32_nv(&po->po_refcnt);
	_PROP_ASSERT(ncnt != 0);
}
Exemplo n.º 7
0
/*
 * returns ENOENT, EIO, or 0.
 */
int
dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
{
	dnode_t *dn;
	dmu_buf_impl_t *db;
	int error;

	error = dnode_hold(os, object, FTAG, &dn);
	if (error)
		return (error);

	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	if (dn->dn_bonus == NULL) {
		rw_exit(&dn->dn_struct_rwlock);
		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
		if (dn->dn_bonus == NULL)
			dbuf_create_bonus(dn);
	}
	db = dn->dn_bonus;

	/* as long as the bonus buf is held, the dnode will be held */
	if (refcount_add(&db->db_holds, tag) == 1) {
		VERIFY(dnode_add_ref(dn, db));
		(void) atomic_inc_32_nv(&dn->dn_dbufs_count);
	}

	/*
	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
	 * a dnode hold for every dbuf.
	 */
	rw_exit(&dn->dn_struct_rwlock);

	dnode_rele(dn, FTAG);

	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));

	*dbp = &db->db;
	return (0);
}
Exemplo n.º 8
0
/*
 * VFS entry points
 */
static int
objfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	objfs_vfs_t *data;
	dev_t dev;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT)))
		return (EBUSY);

	data = kmem_alloc(sizeof (objfs_vfs_t), KM_SLEEP);

	/*
	 * Initialize vfs fields
	 */
	vfsp->vfs_bsize = DEV_BSIZE;
	vfsp->vfs_fstype = objfs_fstype;
	do {
		dev = makedevice(objfs_major,
		    atomic_inc_32_nv(&objfs_minor) & L_MAXMIN32);
	} while (vfs_devismounted(dev));
	vfs_make_fsid(&vfsp->vfs_fsid, dev, objfs_fstype);
	vfsp->vfs_data = data;
	vfsp->vfs_dev = dev;

	/*
	 * Create root
	 */
	data->objfs_vfs_root = objfs_create_root(vfsp);

	return (0);
}
Exemplo n.º 9
0
/**
 * Get the most up to date value of the given cf var.
 *
 * @param [in] var Pointer to a cf var
 * @param [out] res Pointer to location where the variable's value will be output
 * @param [out] version Pointer (may be NULL) to 64-bit integer where the current version will be output
 *
 * @return Zero on success, a system error code otherwise
 */
int
pthread_var_get_np(pthread_var_np_t vp, void **res, uint64_t *version)
{
    int err = 0;
    uint32_t slot_idx;
    uint32_t slots_in_use;
    uint64_t vers;
    struct slot *slot;
    struct value *newest;

    if (version == NULL)
        version = &vers;
    *version = 0;
    *res = NULL;

    if ((slot = pthread_getspecific(vp->tkey)) == NULL) {
        /* First time for this thread -> O(N) slow path (subscribe thread) */
        slot_idx = atomic_inc_32_nv(&vp->next_slot_idx) - 1;
        if ((slot = get_free_slot(vp)) == NULL) {
            /* Slower path still: grow slots array list */
            err = grow_slots(vp, slot_idx, 2);  /* O(log N) */
            assert(err == 0);
            slot = get_slot(vp, slot_idx);      /* O(N) */
            assert(slot != NULL);
            atomic_write_32(&slot->in_use, 1);
        }
        assert(slot->vp == vp);
        slots_in_use = atomic_inc_32_nv(&vp->slots_in_use);
        assert(slots_in_use > 1);
        if ((err = pthread_setspecific(vp->tkey, slot)) != 0)
            return err;
    }

    /*
     * Else/then fast path: one acquire read, one release write, no
     * free()s.  O(1).
     *
     * We have to loop because we could read one value in the
     * conditional and that value could get freed if a writer runs
     * between the read in the conditional and the assignment to
     * slot->value with no other readers also succeeding in capturing
     * that value before that writer completes.
     *
     * This loop will run just once if there are no writers, and will
     * run as many times as writers can run between the conditional and
     * the body.  This loop can only be an infinite loop if there's an
     * infinite number of writers who run with higher priority than this
     * thread.  This is why writers yield() before dropping their write
     * lock.
     *
     * Note that in the body of this loop we can write a soon-to-become-
     * invalid value to our slot because many writers can write between
     * the loop condition and the body.  The writer has to jump through
     * some hoops to deal with this.
     */
    while (atomic_read_ptr((volatile void **)&slot->value) !=
           (newest = atomic_read_ptr((volatile void **)&vp->values)))
        atomic_write_ptr((volatile void **)&slot->value, newest);

    if (newest != NULL) {
        *res = newest->value;
        *version = newest->version;
    }

    return 0;
}
Exemplo n.º 10
0
/**
 * Set new data on a thread-safe global variable
 *
 * @param [in] var Pointer to thread-safe global variable
 * @param [in] cfdata New value for the thread-safe global variable
 * @param [out] new_version New version number
 *
 * @return 0 on success, or a system error such as ENOMEM.
 */
int
pthread_var_set_np(pthread_var_np_t vp, void *cfdata,
                     uint64_t *new_version)
{
    int err;
    size_t i;
    struct var *v;
    struct vwrapper *old_wrapper = NULL;
    struct vwrapper *wrapper;
    struct vwrapper *tmp;
    uint64_t vers;
    uint64_t tmp_version;
    uint64_t nref;

    if (cfdata == NULL)
        return EINVAL;

    if (new_version == NULL)
        new_version = &vers;

    *new_version = 0;

    /* Build a wrapper for the new value */
    if ((wrapper = calloc(1, sizeof(*wrapper))) == NULL)
        return errno;

    /*
     * The var itself holds a reference to the current value, thus its
     * nref starts at 1, but that is made so further below.
     */
    wrapper->dtor = vp->dtor;
    wrapper->nref = 0;
    wrapper->ptr = cfdata;

    if ((err = pthread_mutex_lock(&vp->write_lock)) != 0) {
        free(wrapper);
        return err;
    }

    /* vp->next_version is stable because we hold the write_lock */
    *new_version = wrapper->version = atomic_read_64(&vp->next_version);

    /* Grab the next slot */
    v = vp->vars[(*new_version + 1) & 0x1].other;
    old_wrapper = atomic_read_ptr((volatile void **)&v->wrapper);

    if (*new_version == 0) {
        /* This is the first write; set wrapper on both slots */

        for (i = 0; i < sizeof(vp->vars)/sizeof(vp->vars[0]); i++) {
            v = &vp->vars[i];
            nref = atomic_inc_32_nv(&wrapper->nref);
            v->version = 0;
            tmp = atomic_cas_ptr((volatile void **)&v->wrapper,
                                 old_wrapper, wrapper);
            assert(tmp == old_wrapper && tmp == NULL);
        }

        assert(nref > 1);

        tmp_version = atomic_inc_64_nv(&vp->next_version);
        assert(tmp_version == 1);

        /* Signal waiters */
        (void) pthread_mutex_lock(&vp->waiter_lock);
        (void) pthread_cond_signal(&vp->waiter_cv); /* no thundering herd */
        (void) pthread_mutex_unlock(&vp->waiter_lock);
        return pthread_mutex_unlock(&vp->write_lock);
    }

    nref = atomic_inc_32_nv(&wrapper->nref);
    assert(nref == 1);

    assert(old_wrapper != NULL && old_wrapper->nref > 0);

    /* Wait until that slot is quiescent before mutating it */
    if ((err = pthread_mutex_lock(&vp->cv_lock)) != 0) {
        (void) pthread_mutex_unlock(&vp->write_lock);
        free(wrapper);
        return err;
    }
    while (atomic_read_32(&v->nreaders) > 0) {
        /*
         * We have a separate lock for writing vs. waiting so that no
         * other writer can steal our march.  All writers will enter,
         * all writers will finish.  We got here by winning the race for
         * the writer lock, so we'll hold onto it, and thus avoid having
         * to restart here.
         */
        if ((err = pthread_cond_wait(&vp->cv, &vp->cv_lock)) != 0) {
            (void) pthread_mutex_unlock(&vp->cv_lock);
            (void) pthread_mutex_unlock(&vp->write_lock);
            free(wrapper);
            return err;
        }
    }
    if ((err = pthread_mutex_unlock(&vp->cv_lock)) != 0) {
        (void) pthread_mutex_unlock(&vp->write_lock);
        free(wrapper);
        return err;
    }

    /* Update that now quiescent slot; these are the release operations */
    tmp = atomic_cas_ptr((volatile void **)&v->wrapper, old_wrapper, wrapper);
    assert(tmp == old_wrapper);
    v->version = *new_version;
    tmp_version = atomic_inc_64_nv(&vp->next_version);
    assert(tmp_version == *new_version + 1);
    assert(v->version > v->other->version);

    /* Release the old cf */
    assert(old_wrapper != NULL && old_wrapper->nref > 0);
    wrapper_free(old_wrapper);

    /* Done */
    return pthread_mutex_unlock(&vp->write_lock);
}
Exemplo n.º 11
0
/**
 * Get the most up to date value of the given cf var.
 *
 * @param [in] var Pointer to a cf var
 * @param [out] res Pointer to location where the variable's value will be output
 * @param [out] version Pointer (may be NULL) to 64-bit integer where the current version will be output
 *
 * @return Zero on success, a system error code otherwise
 */
int
pthread_var_get_np(pthread_var_np_t vp, void **res, uint64_t *version)
{
    int err = 0;
    int err2 = 0;
    uint32_t nref;
    struct var *v;
    uint64_t vers, vers2;
    struct vwrapper *wrapper;
    int got_both_slots = 0; /* Whether we incremented both slots' nreaders */
    int do_signal_writer = 0;

    if (version == NULL)
        version = &vers;
    *version = 0;

    *res = NULL;

    if ((wrapper = pthread_getspecific(vp->tkey)) != NULL &&
        wrapper->version == atomic_read_64(&vp->next_version) - 1) {

        /* Fast path */
        *version = wrapper->version;
        *res = wrapper->ptr;
        return 0;
    }

    /* Get the current next version */
    *version = atomic_read_64(&vp->next_version);
    if (*version == 0) {
        /* Not set yet */
        assert(*version == 0 || *res != NULL);
        return 0;
    }
    (*version)--; /* make it the current version */

    /* Get what we hope is still the current slot */
    v = &vp->vars[(*version) & 0x1];

    /*
     * We picked a slot, but we could just have lost against one or more
     * writers.  So far nothing we've done would block any number of
     * them.
     *
     * We increment nreaders for the slot we picked to keep out
     * subsequent writers; we can then lose one more race at most.
     *
     * But we still need to learn whether we lost the race.
     */
    (void) atomic_inc_32_nv(&v->nreaders);

    /* See if we won any race */
    if ((vers2 = atomic_read_64(&vp->next_version)) == *version) {
        /*
         * We won, or didn't race at all.  We can now safely
         * increment nref for the wrapped value in the current slot.
         *
         * We can still have lost one race, but this slot is now ours.
         *
         * The key here is that we updated nreaders for one slot,
         * which might not keep the one writer we might have been
         * racing with from making the then current slot the now
         * previous slot, but because writers are serialized it will
         * keep the next writer from touching the slot we thought
         * was the current slot.  Thus here we either have the
         * current slot or the previous slot, and either way it's OK
         * for us to grab a reference to the wrapped value in the
         * slot we took.
         */
        goto got_a_slot;
    }

    /*
     * We may have incremented nreaders for the wrong slot.  Any number
     * of writers could have written between our getting
     * vp->next_version the first time, and our incrementing nreaders
     * for the corresponding slot.  We can't incref the nref of the
     * value wrapper found at the slot we picked.  We first have to find
     * the correct current slot, or ensure that no writer will release
     * the other slot.
     *
     * We increment the reader count on the other slot, but we do it
     * *before* decrementing the reader count on this one.  This should
     * guarantee that we find the other one present by keeping
     * subsequent writers (subsequent to the second writer we might be
     * racing with) out of both slots for the time between the update of
     * one slot's nreaders and the other's.
     *
     * We then have to repeat the race loss detection.  We need only do
     * this at most once.
     */
    atomic_inc_32_nv(&v->other->nreaders);

    /* We hold both slots */
    got_both_slots = 1;

    /*
     * vp->next_version can now increment by at most one, and we're
     * guaranteed to have one usable slot (whichever one we _now_ see as
     * the current slot, and which can still become the previous slot).
     */
    vers2 = atomic_read_64(&vp->next_version);
    assert(vers2 > *version);
    *version = vers2 - 1;

    /* Select a slot that looks current in this thread */
    v = &vp->vars[(*version) & 0x1];

got_a_slot:
    if (v->wrapper == NULL) {
        /* Whoa, nothing there; shouldn't happen; assert? */
        assert(*version == 0);
        assert(*version == 0 || *res != NULL);
        if (got_both_slots && atomic_dec_32_nv(&v->other->nreaders) == 0) {
            /* Last reader of a slot -> signal writer. */
            do_signal_writer = 1;
        }
        /*
         * Optimization TODO:
         *
         *    If vp->next_version hasn't changed since earlier then we
         *    should be able to avoid having to signal a writer when we
         *    decrement what we know is the current slot's nreaders to
         *    zero.  This should read:
         *
         *    if ((atomic_dec_32_nv(&v->nreaders) == 0 &&
         *         atomic_read_64(&vp->next_version) == vers2) ||
         *        do_signal_writer)
         *        err2 = signal_writer(vp);
         */
        if (atomic_dec_32_nv(&v->nreaders) == 0 || do_signal_writer)
            err2 = signal_writer(vp);
        return (err2 == 0) ? err : err2;
    }

    assert(vers2 == atomic_read_64(&vp->next_version) ||
           (vers2 + 1) == atomic_read_64(&vp->next_version));

    /* Take the wrapped value for the slot we chose */
    nref = atomic_inc_32_nv(&v->wrapper->nref);
    assert(nref > 1);
    *version = v->wrapper->version;
    *res = atomic_read_ptr((volatile void **)&v->wrapper->ptr);
    assert(*res != NULL);

    /*
     * We'll release the previous wrapper and save the new one in
     * vp->tkey below, after releasing the slot it came from.
     */
    wrapper = v->wrapper;

    /*
     * Release the slot(s) and signal any possible waiting writer if
     * either slot's nreaders drops to zero (that's what the writer will
     * be waiting for).
     *
     * The one blocking operation done by readers happens in
     * signal_writer(), but that one blocking operation is for a lock
     * that the writer will have or will soon have released, so it's
     * a practically uncontended blocking operation.
     */
    if (got_both_slots && atomic_dec_32_nv(&v->other->nreaders) == 0)
        do_signal_writer = 1;
    if (atomic_dec_32_nv(&v->nreaders) == 0 || do_signal_writer)
        err2 = signal_writer(vp);

    /*
     * Release the value previously read in this thread, if any.
     *
     * Note that we call free() here, which means that we might take a
     * lock in free().  The application's value destructor also can do
     * the same.
     *
     * TODO We could use a lock-less queue/stack to queue up wrappers
     *      for destruction by writers, then readers could be even more
     *      light-weight.
     */
    if (*res != pthread_getspecific(vp->tkey))
        pthread_var_release_np(vp);

    /* Recall this value we just read */
    err = pthread_setspecific(vp->tkey, wrapper);
    return (err2 == 0) ? err : err2;
}
Exemplo n.º 12
0
 long operator++()
 {
     return atomic_inc_32_nv( &value_ );
 }
Exemplo n.º 13
0
template<typename T> static T increase_nv(T *ptr) { return atomic_inc_32_nv(ptr); }
Exemplo n.º 14
0
/*
 * Add the specified MAC client to the group corresponding to the specified
 * broadcast or multicast address.
 * Return 0 on success, or an errno value on failure.
 */
int
mac_bcast_add(mac_client_impl_t *mcip, const uint8_t *addr, uint16_t vid,
    mac_addrtype_t addrtype)
{
	mac_impl_t 		*mip = mcip->mci_mip;
	mac_bcast_grp_t		*grp = NULL, **last_grp;
	size_t			addr_len = mip->mi_type->mt_addr_length;
	int			rc = 0;
	int			i, index = -1;
	mac_mcast_addrs_t	**prev_mi_addr = NULL;
	mac_mcast_addrs_t	**prev_mci_addr = NULL;

	ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));

	ASSERT(addrtype == MAC_ADDRTYPE_MULTICAST ||
	    addrtype == MAC_ADDRTYPE_BROADCAST);

	/*
	 * Add the MAC client to the list of MAC clients associated
	 * with the group.
	 */
	if (addrtype == MAC_ADDRTYPE_MULTICAST) {
		mac_mcast_addrs_t	*maddr;

		/*
		 * In case of a driver (say aggr), we need this information
		 * on a per MAC instance basis.
		 */
		prev_mi_addr = &mip->mi_mcast_addrs;
		for (maddr = *prev_mi_addr; maddr != NULL;
		    prev_mi_addr = &maddr->mma_next, maddr = maddr->mma_next) {
			if (bcmp(maddr->mma_addr, addr, addr_len) == 0)
				break;
		}
		if (maddr == NULL) {
			/*
			 * For multicast addresses, have the underlying MAC
			 * join the corresponding multicast group.
			 */
			rc = mip->mi_multicst(mip->mi_driver, B_TRUE, addr);
			if (rc != 0)
				return (rc);
			maddr = kmem_zalloc(sizeof (mac_mcast_addrs_t),
			    KM_SLEEP);
			bcopy(addr, maddr->mma_addr, addr_len);
			*prev_mi_addr = maddr;
		} else {
			prev_mi_addr = NULL;
		}
		maddr->mma_ref++;

		/*
		 * We maintain a separate list for each MAC client. Get
		 * the entry or add, if it is not present.
		 */
		prev_mci_addr = &mcip->mci_mcast_addrs;
		for (maddr = *prev_mci_addr; maddr != NULL;
		    prev_mci_addr = &maddr->mma_next, maddr = maddr->mma_next) {
			if (bcmp(maddr->mma_addr, addr, addr_len) == 0)
				break;
		}
		if (maddr == NULL) {
			maddr = kmem_zalloc(sizeof (mac_mcast_addrs_t),
			    KM_SLEEP);
			bcopy(addr, maddr->mma_addr, addr_len);
			*prev_mci_addr = maddr;
		} else {
			prev_mci_addr = NULL;
		}
		maddr->mma_ref++;
	}

	/* The list is protected by the perimeter */
	last_grp = &mip->mi_bcast_grp;
	for (grp = *last_grp; grp != NULL;
	    last_grp = &grp->mbg_next, grp = grp->mbg_next) {
		if (bcmp(grp->mbg_addr, addr, addr_len) == 0 &&
		    grp->mbg_vid == vid)
			break;
	}

	if (grp == NULL) {
		/*
		 * The group does not yet exist, create it.
		 */
		flow_desc_t flow_desc;
		char flow_name[MAXFLOWNAMELEN];

		grp = kmem_cache_alloc(mac_bcast_grp_cache, KM_SLEEP);
		bzero(grp, sizeof (mac_bcast_grp_t));
		grp->mbg_next = NULL;
		grp->mbg_mac_impl = mip;

		DTRACE_PROBE1(mac__bcast__add__new__group, mac_bcast_grp_t *,
		    grp);

		grp->mbg_addr = kmem_zalloc(addr_len, KM_SLEEP);
		bcopy(addr, grp->mbg_addr, addr_len);
		grp->mbg_addrtype = addrtype;
		grp->mbg_vid = vid;

		/*
		 * Add a new flow to the underlying MAC.
		 */
		bzero(&flow_desc, sizeof (flow_desc));
		bcopy(addr, &flow_desc.fd_dst_mac, addr_len);
		flow_desc.fd_mac_len = (uint32_t)addr_len;

		flow_desc.fd_mask = FLOW_LINK_DST;
		if (vid != 0) {
			flow_desc.fd_vid = vid;
			flow_desc.fd_mask |= FLOW_LINK_VID;
		}

		grp->mbg_id = atomic_inc_32_nv(&mac_bcast_id);
		(void) sprintf(flow_name,
		    "mac/%s/mcast%d", mip->mi_name, grp->mbg_id);

		rc = mac_flow_create(&flow_desc, NULL, flow_name,
		    grp, FLOW_MCAST, &grp->mbg_flow_ent);
		if (rc != 0) {
			kmem_free(grp->mbg_addr, addr_len);
			kmem_cache_free(mac_bcast_grp_cache, grp);
			goto fail;
		}
		grp->mbg_flow_ent->fe_mbg = grp;
		mip->mi_bcast_ngrps++;

		/*
		 * Initial creation reference on the flow. This is released
		 * in the corresponding delete action i_mac_bcast_delete()
		 */
		FLOW_REFHOLD(grp->mbg_flow_ent);

		/*
		 * When the multicast and broadcast packet is received
		 * by the underlying NIC, mac_rx_classify() will invoke
		 * mac_bcast_send() with arg2=NULL, which will cause
		 * mac_bcast_send() to send a copy of the packet(s)
		 * to every MAC client opened on top of the underlying MAC.
		 *
		 * When the mac_bcast_send() function is invoked from
		 * the transmit path of a MAC client, it will specify the
		 * transmitting MAC client as the arg2 value, which will
		 * allow mac_bcast_send() to skip that MAC client and not
		 * send it a copy of the packet.
		 *
		 * We program the classifier to dispatch matching broadcast
		 * packets to mac_bcast_send().
		 */

		grp->mbg_flow_ent->fe_cb_fn = mac_bcast_send;
		grp->mbg_flow_ent->fe_cb_arg1 = grp;
		grp->mbg_flow_ent->fe_cb_arg2 = NULL;

		rc = mac_flow_add(mip->mi_flow_tab, grp->mbg_flow_ent);
		if (rc != 0) {
			FLOW_FINAL_REFRELE(grp->mbg_flow_ent);
			goto fail;
		}

		*last_grp = grp;
	}

	ASSERT(grp->mbg_addrtype == addrtype);

	/*
	 * Add the MAC client to the list of MAC clients associated
	 * with the group.
	 */
	rw_enter(&mip->mi_rw_lock, RW_WRITER);
	for (i = 0; i < grp->mbg_nclients_alloc; i++) {
		/*
		 * The MAC client was already added, say when we have
		 * different unicast addresses with the same vid.
		 * Just increment the ref and we are done.
		 */
		if (grp->mbg_clients[i].mgb_client == mcip) {
			grp->mbg_clients[i].mgb_client_ref++;
			rw_exit(&mip->mi_rw_lock);
			return (0);
		} else if (grp->mbg_clients[i].mgb_client == NULL &&
		    index == -1) {
			index = i;
		}
	}
	if (grp->mbg_nclients_alloc == grp->mbg_nclients) {
		mac_bcast_grp_mcip_t	*new_clients;
		uint_t			new_size = grp->mbg_nclients+1;

		new_clients = kmem_zalloc(new_size *
		    sizeof (mac_bcast_grp_mcip_t), KM_SLEEP);

		if (grp->mbg_nclients > 0) {
			ASSERT(grp->mbg_clients != NULL);
			bcopy(grp->mbg_clients, new_clients, grp->mbg_nclients *
			    sizeof (mac_bcast_grp_mcip_t));
			kmem_free(grp->mbg_clients, grp->mbg_nclients *
			    sizeof (mac_bcast_grp_mcip_t));
		}

		grp->mbg_clients = new_clients;
		grp->mbg_nclients_alloc = new_size;
		index = new_size - 1;
	}

	ASSERT(index != -1);
	grp->mbg_clients[index].mgb_client = mcip;
	grp->mbg_clients[index].mgb_client_ref = 1;
	grp->mbg_nclients++;
	/*
	 * Since we're adding to the list of MAC clients using that group,
	 * kick the generation count, which will allow mac_bcast_send()
	 * to detect that condition after re-acquiring the lock.
	 */
	grp->mbg_clients_gen++;
	rw_exit(&mip->mi_rw_lock);
	return (0);

fail:
	if (prev_mi_addr != NULL) {
		kmem_free(*prev_mi_addr, sizeof (mac_mcast_addrs_t));
		*prev_mi_addr = NULL;
		(void) mip->mi_multicst(mip->mi_driver, B_FALSE, addr);
	}
	if (prev_mci_addr != NULL) {
		kmem_free(*prev_mci_addr, sizeof (mac_mcast_addrs_t));
		*prev_mci_addr = NULL;
	}
	return (rc);
}
Exemplo n.º 15
0
/*
 * Create in-kernel entry for device. Device attributes such as name, uuid are
 * taken from proplib dictionary.
 *
 */
int
dm_dev_create_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	int r, flags;
	device_t devt;

	r = 0;
	flags = 0;
	name = NULL;
	uuid = NULL;

	/* Get needed values from dictionary. */
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);

	dm_dbg_print_flags(flags);

	/* Lookup name and uuid if device already exist quit. */
	if ((dmv = dm_dev_lookup(name, uuid, -1)) != NULL) {
		DM_ADD_FLAG(flags, DM_EXISTS_FLAG);	/* Device already exists */
		dm_dev_unbusy(dmv);
		return EEXIST;
	}
	if ((devt = config_attach_pseudo(&dm_cfdata)) == NULL) {
		aprint_error("Unable to attach pseudo device dm/%s\n", name);
		return (ENOMEM);
	}
	if ((dmv = dm_dev_alloc()) == NULL)
		return ENOMEM;

	if (uuid)
		strncpy(dmv->uuid, uuid, DM_UUID_LEN);
	else
		dmv->uuid[0] = '\0';

	if (name)
		strlcpy(dmv->name, name, DM_NAME_LEN);

	dmv->minor = (uint64_t)atomic_inc_32_nv(&sc_minor_num);
	dmv->flags = 0;		/* device flags are set when needed */
	dmv->ref_cnt = 0;
	dmv->event_nr = 0;
	dmv->dev_type = 0;
	dmv->devt = devt;

	dm_table_head_init(&dmv->table_head);

	mutex_init(&dmv->dev_mtx, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&dmv->diskp_mtx, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&dmv->dev_cv, "dm_dev");

	if (flags & DM_READONLY_FLAG)
		dmv->flags |= DM_READONLY_FLAG;

	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_MINOR, dmv->minor);

	disk_init(dmv->diskp, dmv->name, &dmdkdriver);
	disk_attach(dmv->diskp);

	dmv->diskp->dk_info = NULL;

	if ((r = dm_dev_insert(dmv)) != 0)
		dm_dev_free(dmv);

	DM_ADD_FLAG(flags, DM_EXISTS_FLAG);
	DM_REMOVE_FLAG(flags, DM_INACTIVE_PRESENT_FLAG);

	/* Increment device counter After creating device */
	atomic_inc_32(&dm_dev_counter);

	return r;
}