//  Atomic subtraction. Returns false if the counter drops to zero.
        inline bool sub (integer_t decrement)
        {
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
            LONG delta = - ((LONG) decrement);
            integer_t old = InterlockedExchangeAdd ((LONG*) &value, delta);
            return old - decrement != 0;
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
            int32_t delta = - ((int32_t) decrement);
            integer_t nv = atomic_add_32_nv (&value, delta);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_X86
            integer_t oldval = -decrement;
            volatile integer_t *val = &value;
            __asm__ volatile ("lock; xaddl %0,%1"
                : "=r" (oldval), "=m" (*val)
                : "0" (oldval), "m" (*val)
                : "cc", "memory");
            return oldval != decrement;
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
            sync.lock ();
            value -= decrement;
            bool result = value ? true : false;
            sync.unlock ();
            return result;
#else
#error atomic_counter is not implemented for this platform
#endif
        }
Beispiel #2
0
/*
 * decrement audit path reference count
 */
void
au_pathrele(struct audit_path *app)
{
	if (atomic_add_32_nv(&app->audp_ref, -1) > 0)
		return;
	kmem_free(app, app->audp_size);
}
Beispiel #3
0
void
dpfree(devplcy_t *dp)
{
	ASSERT(dp->dp_ref != 0xdeadbeef && dp->dp_ref != 0);
	if (atomic_add_32_nv(&dp->dp_ref, -1) == 0)
		kmem_free(dp, sizeof (*dp));
}
void
corectl_path_rele(corectl_path_t *ccp)
{
	if (atomic_add_32_nv(&ccp->ccp_refcnt, -1) == 0) {
		refstr_rele(ccp->ccp_path);
		kmem_free(ccp, sizeof (corectl_path_t));
	}
}
Beispiel #5
0
/*
 * ctfs_mount - the VFS_MOUNT entry point
 */
static int
ctfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	ctfs_vfs_t *data;
	dev_t dev;
	gfs_dirent_t *dirent;
	int i;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT)))
		return (EBUSY);

	data = kmem_alloc(sizeof (ctfs_vfs_t), KM_SLEEP);

	/*
	 * Initialize vfs fields not initialized by VFS_INIT/domount
	 */
	vfsp->vfs_bsize = DEV_BSIZE;
	vfsp->vfs_fstype = ctfs_fstype;
	do
		dev = makedevice(ctfs_major,
		    atomic_add_32_nv(&ctfs_minor, 1) & L_MAXMIN32);
	while (vfs_devismounted(dev));
	vfs_make_fsid(&vfsp->vfs_fsid, dev, ctfs_fstype);
	vfsp->vfs_data = data;
	vfsp->vfs_dev = dev;

	/*
	 * Dynamically create gfs_dirent_t array for the root directory.
	 */
	dirent = kmem_zalloc((ct_ntypes + 2) * sizeof (gfs_dirent_t), KM_SLEEP);
	for (i = 0; i < ct_ntypes; i++) {
		dirent[i].gfse_name = (char *)ct_types[i]->ct_type_name;
		dirent[i].gfse_ctor = ctfs_create_tdirnode;
		dirent[i].gfse_flags = GFS_CACHE_VNODE;
	}
	dirent[i].gfse_name = "all";
	dirent[i].gfse_ctor = ctfs_create_adirnode;
	dirent[i].gfse_flags = GFS_CACHE_VNODE;
	dirent[i+1].gfse_name = NULL;

	/*
	 * Create root vnode
	 */
	data->ctvfs_root = gfs_root_create(sizeof (ctfs_rootnode_t),
	    vfsp, ctfs_ops_root, CTFS_INO_ROOT, dirent, ctfs_root_do_inode,
	    CTFS_NAME_MAX, NULL, NULL);

	kmem_free(dirent, (ct_ntypes + 2) * sizeof (gfs_dirent_t));

	return (0);
}
/*
 * Our version of vfs_rele() that stops at 1 instead of 0, and calls
 * freelfsnode() instead of kmem_free().
 */
static void
lfs_rele(struct lfsnode *lfs, struct loinfo *li)
{
	vfs_t *vfsp = &lfs->lfs_vfs;

	ASSERT(MUTEX_HELD(&li->li_lfslock));
	ASSERT(vfsp->vfs_count > 1);
	if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 1)
		freelfsnode(lfs, li);
}
Beispiel #7
0
/* increment nonce and return new value */
uint32_t
nonce32(void)
{
#ifdef HAVE_ATOMIC_H
  return atomic_add_32_nv(&seq, 2);
#else
  xpthread_mutex_lock(&seq_mutex);
  seq += 2;
  xpthread_mutex_unlock(&seq_mutex);
  return seq;
#endif /* else !HAVE_ATOMIC_H */
}
Beispiel #8
0
void occ_rwlock::acquire_read()
{
    int count = atomic_add_32_nv(&_active_count, READER);
    while(count & WRITER) {
        // block
        count = atomic_add_32_nv(&_active_count, -READER);
        {
            CRITICAL_SECTION(cs, _read_write_mutex);
            
            // nasty race: we could have fooled a writer into sleeping...
            if(count == WRITER)
                DO_PTHREAD(pthread_cond_signal(&_write_cond));
            
            while(*&_active_count & WRITER) {
                DO_PTHREAD(pthread_cond_wait(&_read_cond, &_read_write_mutex));
            }
        }
        count = atomic_add_32_nv(&_active_count, READER);
    }
    membar_enter();
}
Beispiel #9
0
/*
 * To implement a reasonable panic() equivalent for fmd, we atomically bump a
 * global counter of calls to fmd_vpanic() and attempt to print a panic message
 * to stderr and dump core as a result of raising SIGABRT.  This function must
 * not attempt to grab any locks so that it can be called from any fmd code.
 */
void
fmd_vpanic(const char *format, va_list ap)
{
    int oserr = errno;
    pthread_t tid = pthread_self();

    fmd_thread_t *tp;
    char msg[BUFSIZ];
    size_t len;

    /*
     * If this is not the first call to fmd_vpanic(), then check d_panictid
     * to see if we are the panic thread.  If so, then proceed directly to
     * abort() because we have recursively panicked.  If not, then pause()
     * indefinitely waiting for the panic thread to terminate the daemon.
     */
    if (atomic_add_32_nv(&fmd.d_panicrefs, 1) != 1) {
        while (fmd.d_panictid != tid)
            (void) pause();
        goto abort;
    }

    /*
     * Use fmd.d_pid != 0 as a cheap test to see if fmd.d_key is valid
     * (i.e. we're after fmd_create() and before fmd_destroy()).
     */
    if (fmd.d_pid != 0 && (tp = pthread_getspecific(fmd.d_key)) != NULL)
        (void) tp->thr_trfunc(tp->thr_trdata, FMD_DBG_ERR, format, ap);

    fmd.d_panicstr = msg;
    fmd.d_panictid = tid;

    (void) snprintf(msg, sizeof (msg), "%s: ABORT: ",
                    fmd.d_pname ? fmd.d_pname : "fmd");

    len = strlen(msg);
    (void) vsnprintf(msg + len, sizeof (msg) - len, format, ap);

    if (strchr(format, '\n') == NULL) {
        len = strlen(msg);
        (void) snprintf(msg + len, sizeof (msg) - len, ": %s\n",
                        fmd_strerror(oserr));
    }

    (void) write(STDERR_FILENO, msg, strlen(msg));

abort:
    abort();
    _exit(FMD_EXIT_ERROR);
}
Beispiel #10
0
        //  Atomic subtraction. Returns false if the counter drops to zero.
        inline bool sub (integer_t decrement)
        {
#if defined ZMQ_ATOMIC_COUNTER_WINDOWS
            LONG delta = - ((LONG) decrement);
            integer_t old = InterlockedExchangeAdd ((LONG*) &value, delta);
            return old - decrement != 0;
#elif defined ZMQ_ATOMIC_INTRINSIC
            integer_t nv = __atomic_sub_fetch(&value, decrement, __ATOMIC_ACQ_REL);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H
            int32_t delta = - ((int32_t) decrement);
            integer_t nv = atomic_add_32_nv (&value, delta);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_TILE
            int32_t delta = - ((int32_t) decrement);
            integer_t nv = arch_atomic_add (&value, delta);
            return nv != 0;
#elif defined ZMQ_ATOMIC_COUNTER_X86
            integer_t oldval = -decrement;
            volatile integer_t *val = &value;
            __asm__ volatile ("lock; xaddl %0,%1"
                : "=r" (oldval), "=m" (*val)
                : "0" (oldval), "m" (*val)
                : "cc", "memory");
            return oldval != decrement;
#elif defined ZMQ_ATOMIC_COUNTER_ARM
            integer_t old_value, flag, tmp;
            __asm__ volatile (
                "       dmb     sy\n\t"
                "1:     ldrex   %0, [%5]\n\t"
                "       sub     %2, %0, %4\n\t"
                "       strex   %1, %2, [%5]\n\t"
                "       teq     %1, #0\n\t"
                "       bne     1b\n\t"
                "       dmb     sy\n\t"
                : "=&r"(old_value), "=&r"(flag), "=&r"(tmp), "+Qo"(value)
                : "Ir"(decrement), "r"(&value)
                : "cc");
            return old_value - decrement != 0;
#elif defined ZMQ_ATOMIC_COUNTER_MUTEX
            sync.lock ();
            value -= decrement;
            bool result = value ? true : false;
            sync.unlock ();
            return result;
#else
#error atomic_counter is not implemented for this platform
#endif
        }
Beispiel #11
0
void occ_rwlock::acquire_write()
{
    // only one writer allowed in at a time...
    CRITICAL_SECTION(cs, _read_write_mutex);    
    while(*&_active_count & WRITER) {
        DO_PTHREAD(pthread_cond_wait(&_read_cond, &_read_write_mutex));
    }
    
    // any lurking writers are waiting on the cond var
    int count = atomic_add_32_nv(&_active_count, WRITER);
    w_assert1(count & WRITER);

    // drain readers
    while(count != WRITER) {
        DO_PTHREAD(pthread_cond_wait(&_write_cond, &_read_write_mutex));
        count = *&_active_count;
    }
}
Beispiel #12
0
/*
 * Release previous hold on a cred structure.  Free it if refcnt == 0.
 * If cred uses label different from zone label, free it.
 */
void
crfree(cred_t *cr)
{
	if (atomic_add_32_nv(&cr->cr_ref, -1) == 0) {
		ASSERT(cr != kcred);
		if (cr->cr_label)
			label_rele(cr->cr_label);
		if (cr->cr_klpd)
			crklpd_rele(cr->cr_klpd);
		if (cr->cr_zone)
			zone_cred_rele(cr->cr_zone);
		if (cr->cr_ksid)
			kcrsid_rele(cr->cr_ksid);
		if (cr->cr_grps)
			crgrprele(cr->cr_grps);

		kmem_cache_free(cred_cache, cr);
	}
}
Beispiel #13
0
uint32_t nn_atomic_dec (struct nn_atomic *self, uint32_t n)
{
#if defined NN_ATOMIC_WINAPI
    return (uint32_t) InterlockedExchangeAdd ((LONG*) &self->n, -((LONG) n));
#elif defined NN_ATOMIC_SOLARIS
    return atomic_add_32_nv (&self->n, -((int32_t) n)) + n;
#elif defined NN_ATOMIC_GCC_BUILTINS
    return (uint32_t) __sync_fetch_and_sub (&self->n, n);
#elif defined NN_ATOMIC_MUTEX
    uint32_t res;
    nn_mutex_lock (&self->sync);
    res = self->n;
    self->n -= n;
    nn_mutex_unlock (&self->sync);
    return res;
#else
#error
#endif
}
Beispiel #14
0
/*
 * void task_rele(task_t *)
 *
 * Overview
 *   task_rele() relinquishes a reference on the given task, which was acquired
 *   via task_hold() or task_hold_by_id().  If this is the last member or
 *   observer of the task, dispatch it for commitment via the accounting
 *   subsystem.
 *
 * Return values
 *   None.
 *
 * Caller's context
 *   Caller must not be holding the task_hash_lock.
 */
void
task_rele(task_t *tk)
{
	mutex_enter(&task_hash_lock);
	if (atomic_add_32_nv(&tk->tk_hold_count, -1) > 0) {
		mutex_exit(&task_hash_lock);
		return;
	}

	ASSERT(tk->tk_nprocs == 0);

	mutex_enter(&tk->tk_zone->zone_nlwps_lock);
	tk->tk_proj->kpj_ntasks--;
	mutex_exit(&tk->tk_zone->zone_nlwps_lock);

	task_kstat_delete(tk);

	if (mod_hash_destroy(task_hash,
	    (mod_hash_key_t)(uintptr_t)tk->tk_tkid) != 0)
		panic("unable to delete task %d", tk->tk_tkid);
	mutex_exit(&task_hash_lock);

	/*
	 * At this point, there are no members or observers of the task, so we
	 * can safely send it on for commitment to the accounting subsystem.
	 * The task will be destroyed in task_end() subsequent to commitment.
	 * Since we may be called with pidlock held, taskq_dispatch() cannot
	 * sleep. Commitment is handled by a backup thread in case dispatching
	 * the task fails.
	 */
	if (taskq_dispatch(exacct_queue, exacct_commit_task, tk,
	    TQ_NOSLEEP | TQ_NOQUEUE) == NULL) {
		mutex_enter(&task_commit_lock);
		if (task_commit_head == NULL) {
			task_commit_head = task_commit_tail = tk;
		} else {
			task_commit_tail->tk_commit_next = tk;
			task_commit_tail = tk;
		}
		cv_signal(&task_commit_cv);
		mutex_exit(&task_commit_lock);
	}
}
/*
 * VFS entry points
 */
static int
objfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	objfs_vfs_t *data;
	dev_t dev;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT)))
		return (EBUSY);

	data = kmem_alloc(sizeof (objfs_vfs_t), KM_SLEEP);

	/*
	 * Initialize vfs fields
	 */
	vfsp->vfs_bsize = DEV_BSIZE;
	vfsp->vfs_fstype = objfs_fstype;
	do {
		dev = makedevice(objfs_major,
		    atomic_add_32_nv(&objfs_minor, 1) & L_MAXMIN32);
	} while (vfs_devismounted(dev));
	vfs_make_fsid(&vfsp->vfs_fsid, dev, objfs_fstype);
	vfsp->vfs_data = data;
	vfsp->vfs_dev = dev;

	/*
	 * Create root
	 */
	data->objfs_vfs_root = objfs_create_root(vfsp);

	return (0);
}
Beispiel #16
0
uint32_t
atomic_dec_32_nv(volatile uint32_t *addr)
{

	return (atomic_add_32_nv(addr, -1));
}
Beispiel #17
0
/**
 * the handler of the ever-1-second timer
 *
 * @param fd, the descriptors of the socket
 * @param which, event flags
 * @param arg, argument
 */
static void ms_clock_handler(const int fd, const short which, void *arg)
{
  ms_thread_t *ms_thread= pthread_getspecific(ms_thread_key);
  struct timeval t=
  {
    .tv_sec= 1, .tv_usec= 0
  };

  UNUSED_ARGUMENT(fd);
  UNUSED_ARGUMENT(which);
  UNUSED_ARGUMENT(arg);

  ms_set_current_time();

  if (ms_thread->initialized)
  {
    /* only delete the event if it's actually there. */
    evtimer_del(&ms_thread->clock_event);
    ms_check_sock_timeout();
  }
  else
  {
    ms_thread->initialized= true;
  }

  ms_reconn_thread_socks();

  evtimer_set(&ms_thread->clock_event, ms_clock_handler, 0);
  event_base_set(ms_thread->base, &ms_thread->clock_event);
  evtimer_add(&ms_thread->clock_event, &t);
} /* ms_clock_handler */


/**
 * used to bind thread to CPU if the system supports
 *
 * @param cpu, cpu index
 *
 * @return if success, return EXIT_SUCCESS, else return -1
 */
/** +EDIT */
/*
static uint32_t ms_set_thread_cpu_affinity(uint32_t cpu)
{
  uint32_t ret= 0;

#ifdef HAVE_CPU_SET_T
  cpu_set_t cpu_set;
  CPU_ZERO(&cpu_set);
  CPU_SET(cpu, &cpu_set);

  if (sched_setaffinity(0, sizeof(cpu_set_t), &cpu_set) == -1)
  {
    fprintf(stderr, "WARNING: Could not set CPU Affinity, continuing...\n");
    ret= 1;
  }
#else
  UNUSED_ARGUMENT(cpu);
#endif

  return ret;
}
*/
/** -EDIT */
/* ms_set_thread_cpu_affinity */


/**
 * Set up a thread's information.
 *
 * @param thread_ctx, pointer of the thread context structure
 *
 * @return if success, return EXIT_SUCCESS, else return -1
 */
static int ms_setup_thread(ms_thread_ctx_t *thread_ctx)
{

  ms_thread_t *ms_thread= (ms_thread_t *)calloc(sizeof(*ms_thread), 1);
  pthread_setspecific(ms_thread_key, (void *)ms_thread);

  ms_thread->thread_ctx= thread_ctx;
  ms_thread->nactive_conn= thread_ctx->nconns;
  ms_thread->initialized= false;
  static volatile uint32_t cnt= 0;

  gettimeofday(&ms_thread->startup_time, NULL);

  ms_thread->base= event_init();
  if (ms_thread->base == NULL)
  {
    if (atomic_add_32_nv(&cnt, 1) == 0)
    {
      fprintf(stderr, "Can't allocate event base.\n");
    }

    return -1;
  }

  ms_thread->conn=
    (ms_conn_t *)malloc((size_t)thread_ctx->nconns * sizeof(ms_conn_t));
  if (ms_thread->conn == NULL)
  {
    if (atomic_add_32_nv(&cnt, 1) == 0)
    {
      fprintf(
        stderr,
        "Can't allocate concurrency structure for thread descriptors.");
    }

    return -1;
  }
  memset(ms_thread->conn, 0, (size_t)thread_ctx->nconns * sizeof(ms_conn_t));

  for (uint32_t i= 0; i < thread_ctx->nconns; i++)
  {
    ms_thread->conn[i].conn_idx= i;
    if (ms_setup_conn(&ms_thread->conn[i]) != 0)
    {
      /* only output this error once */
      if (atomic_add_32_nv(&cnt, 1) == 0)
      {
        fprintf(stderr, "Initializing connection failed.\n");
      }

      return -1;
    }
  }

  return EXIT_SUCCESS;
} /* ms_setup_thread */
Beispiel #18
0
void
refstr_rele(refstr_t *rsp)
{
	if (atomic_add_32_nv(&rsp->rs_refcnt, -1) == 0)
		kmem_free(rsp, (size_t)rsp->rs_size);
}
Beispiel #19
0
void
crgrprele(credgrp_t *grps)
{
	if (atomic_add_32_nv(&grps->crg_ref, -1) == 0)
		kmem_free(grps, CREDGRPSZ(grps->crg_ngroups));
}
Beispiel #20
0
/*
 * Release previous hold on a label structure.  Free it if refcnt == 0.
 */
void
label_rele(ts_label_t *lab)
{
	if (atomic_add_32_nv(&lab->tsl_ref, -1) == 0)
		kmem_cache_free(tslabel_cache, lab);
}
template<typename T> static T add_nv(T *ptr, T val) { return atomic_add_32_nv(ptr, val); }
void
corectl_content_rele(corectl_content_t *ccp)
{
	if (atomic_add_32_nv(&ccp->ccc_refcnt, -1) == 0)
		kmem_free(ccp, sizeof (corectl_content_t));
}