Example #1
0
/*
 * Release the buffer associated with the buf log item.  If there is no dirty
 * logged data associated with the buffer recorded in the buf log item, then
 * free the buf log item and remove the reference to it in the buffer.
 *
 * This call ignores the recursion count.  It is only called when the buffer
 * should REALLY be unlocked, regardless of the recursion count.
 *
 * We unconditionally drop the transaction's reference to the log item. If the
 * item was logged, then another reference was taken when it was pinned, so we
 * can safely drop the transaction reference now.  This also allows us to avoid
 * potential races with the unpin code freeing the bli by not referencing the
 * bli after we've dropped the reference count.
 *
 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
 * if necessary but do not unlock the buffer.  This is for support of
 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
 * free the item.
 */
STATIC void
xfs_buf_item_unlock(
	struct xfs_log_item	*lip)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
	bool			clean;
	bool			aborted;
	int			flags;

	/* Clear the buffer's association with this transaction. */
	bp->b_transp = NULL;

	/*
	 * If this is a transaction abort, don't return early.  Instead, allow
	 * the brelse to happen.  Normally it would be done for stale
	 * (cancelled) buffers at unpin time, but we'll never go through the
	 * pin/unpin cycle if we abort inside commit.
	 */
	aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
	/*
	 * Before possibly freeing the buf item, copy the per-transaction state
	 * so we can reference it safely later after clearing it from the
	 * buffer log item.
	 */
	flags = bip->bli_flags;
	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);

	/*
	 * If the buf item is marked stale, then don't do anything.  We'll
	 * unlock the buffer and free the buf item when the buffer is unpinned
	 * for the last time.
	 */
	if (flags & XFS_BLI_STALE) {
		trace_xfs_buf_item_unlock_stale(bip);
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
		if (!aborted) {
			atomic_dec(&bip->bli_refcount);
			return;
		}
	}

	trace_xfs_buf_item_unlock(bip);

	/*
	 * If the buf item isn't tracking any data, free it, otherwise drop the
	 * reference we hold to it. If we are aborting the transaction, this may
	 * be the only reference to the buf item, so we free it anyway
	 * regardless of whether it is dirty or not. A dirty abort implies a
	 * shutdown, anyway.
	 *
	 * Ordered buffers are dirty but may have no recorded changes, so ensure
	 * we only release clean items here.
	 */
	clean = (flags & XFS_BLI_DIRTY) ? false : true;
	if (clean) {
		int i;
		for (i = 0; i < bip->bli_format_count; i++) {
			if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
				     bip->bli_formats[i].blf_map_size)) {
				clean = false;
				break;
			}
		}
	}

	/*
	 * Clean buffers, by definition, cannot be in the AIL. However, aborted
	 * buffers may be dirty and hence in the AIL. Therefore if we are
	 * aborting a buffer and we've just taken the last refernce away, we
	 * have to check if it is in the AIL before freeing it. We need to free
	 * it in this case, because an aborted transaction has already shut the
	 * filesystem down and this is the last chance we will have to do so.
	 */
	if (atomic_dec_and_test(&bip->bli_refcount)) {
		if (clean)
			xfs_buf_item_relse(bp);
		else if (aborted) {
			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
			if (lip->li_flags & XFS_LI_IN_AIL) {
				spin_lock(&lip->li_ailp->xa_lock);
				xfs_trans_ail_delete(lip->li_ailp, lip,
						     SHUTDOWN_LOG_IO_ERROR);
			}
			xfs_buf_item_relse(bp);
		}
	}

	if (!(flags & XFS_BLI_HOLD))
		xfs_buf_relse(bp);
}
Example #2
0
static int lustre_stop_mgc(struct super_block *sb)
{
	struct lustre_sb_info *lsi = s2lsi(sb);
	struct obd_device *obd;
	char *niduuid = NULL, *ptr = NULL;
	int i, rc = 0, len = 0;

	if (!lsi)
		return -ENOENT;
	obd = lsi->lsi_mgc;
	if (!obd)
		return -ENOENT;
	lsi->lsi_mgc = NULL;

	mutex_lock(&mgc_start_lock);
	LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
	if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
		/* This is not fatal, every client that stops
		   will call in here. */
		CDEBUG(D_MOUNT, "mgc still has %d references.\n",
		       atomic_read(&obd->u.cli.cl_mgc_refcount));
		GOTO(out, rc = -EBUSY);
	}

	/* The MGC has no recoverable data in any case.
	 * force shutdown set in umount_begin */
	obd->obd_no_recov = 1;

	if (obd->u.cli.cl_mgc_mgsexp) {
		/* An error is not fatal, if we are unable to send the
		   disconnect mgs ping evictor cleans up the export */
		rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp);
		if (rc)
			CDEBUG(D_MOUNT, "disconnect failed %d\n", rc);
	}

	/* Save the obdname for cleaning the nid uuids, which are
	   obdname_XX */
	len = strlen(obd->obd_name) + 6;
	OBD_ALLOC(niduuid, len);
	if (niduuid) {
		strcpy(niduuid, obd->obd_name);
		ptr = niduuid + strlen(niduuid);
	}

	rc = class_manual_cleanup(obd);
	if (rc)
		GOTO(out, rc);

	/* Clean the nid uuids */
	if (!niduuid)
		GOTO(out, rc = -ENOMEM);

	for (i = 0; i < lsi->lsi_lmd->lmd_mgs_failnodes; i++) {
		sprintf(ptr, "_%x", i);
		rc = do_lcfg(LUSTRE_MGC_OBDNAME, 0, LCFG_DEL_UUID,
			     niduuid, NULL, NULL, NULL);
		if (rc)
			CERROR("del MDC UUID %s failed: rc = %d\n",
			       niduuid, rc);
	}
out:
	if (niduuid)
		OBD_FREE(niduuid, len);

	/* class_import_put will get rid of the additional connections */
	mutex_unlock(&mgc_start_lock);
	return rc;
}
Example #3
0
/* periodic work to do:
 *  * purge structures when they are too old
 *  * send announcements
 */
static void batadv_bla_periodic_work(struct work_struct *work)
{
	struct delayed_work *delayed_work;
	struct batadv_priv *bat_priv;
	struct batadv_priv_bla *priv_bla;
	struct hlist_head *head;
	struct batadv_bla_backbone_gw *backbone_gw;
	struct batadv_hashtable *hash;
	struct batadv_hard_iface *primary_if;
	int i;

	delayed_work = container_of(work, struct delayed_work, work);
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;

	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

	hash = bat_priv->bla.backbone_hash;
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
				continue;

			backbone_gw->lasttime = jiffies;

			batadv_bla_send_announce(bat_priv, backbone_gw);

			/* request_sent is only set after creation to avoid
			 * problems when we are not yet known as backbone gw
			 * in the backbone.
			 *
			 * We can reset this now after we waited some periods
			 * to give bridge forward delays and bla group forming
			 * some grace time.
			 */

			if (atomic_read(&backbone_gw->request_sent) == 0)
				continue;

			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
				continue;

			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
			atomic_set(&backbone_gw->request_sent, 0);
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
		batadv_hardif_free_ref(primary_if);

	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
static inline void put_signal_struct(struct signal_struct *sig)
{
	if (atomic_dec_and_test(&sig->sigcnt))
		free_signal_struct(sig);
}
Example #5
0
static void blk_trace_cleanup(struct blk_trace *bt)
{
	blk_trace_free(bt);
	if (atomic_dec_and_test(&blk_probes_ref))
		blk_unregister_tracepoints();
}
Example #6
0
void __cleanup_sighand(struct sighand_struct *sighand)
{
	if (atomic_dec_and_test(&sighand->count))
		kmem_cache_free(sighand_cachep, sighand);
}
Example #7
0
void fd_device_del_locked(struct fd_device *dev)
{
	if (!atomic_dec_and_test(&dev->refcnt))
		return;
	fd_device_del_impl(dev);
}
Example #8
0
static __inline__ void
netlink_unlock_table(void)
{
	if (atomic_dec_and_test(&nl_table_users))
		wake_up(&nl_table_wait);
}
Example #9
0
static void
put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
{
	if (atomic_dec_and_test(&lo->plh_refcount))
		destroy_layout_hdr(lo);
}
Example #10
0
static void c2_cq_put(struct c2_cq *cq)
{
	if (atomic_dec_and_test(&cq->refcount))
		wake_up(&cq->wait);
}
Example #11
0
static void
instance_put(struct nfulnl_instance *inst)
{
	if (inst && atomic_dec_and_test(&inst->use))
		call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
}
Example #12
0
/*
 * Drop a reference to a group.  Free it if it's through.
 */
void fsnotify_put_group(struct fsnotify_group *group)
{
	if (atomic_dec_and_test(&group->refcnt))
		fsnotify_destroy_group(group);
}
Example #13
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
 */
STATIC void
xfs_buf_item_unpin(
	struct xfs_log_item	*lip,
	int			remove)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	xfs_buf_t	*bp = bip->bli_buf;
	struct xfs_ail	*ailp = lip->li_ailp;
	int		stale = bip->bli_flags & XFS_BLI_STALE;
	int		freed;

	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);

	trace_xfs_buf_item_unpin(bip);

	freed = atomic_dec_and_test(&bip->bli_refcount);

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);

	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);

		trace_xfs_buf_item_unpin_stale(bip);

		if (remove) {
			/*
			 * If we are in a transaction context, we have to
			 * remove the log item from the transaction as we are
			 * about to release our reference to the buffer.  If we
			 * don't, the unlock that occurs later in
			 * xfs_trans_uncommit() will try to reference the
			 * buffer which we no longer have a hold on.
			 */
			if (lip->li_desc)
				xfs_trans_del_item(lip);

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
			XFS_BUF_SET_FSPRIVATE2(bp, NULL);
		}

		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
		 * will take care of that situation.
		 * xfs_trans_ail_delete() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp);
			XFS_BUF_SET_FSPRIVATE(bp, NULL);
			XFS_BUF_CLR_IODONE_FUNC(bp);
		} else {
			spin_lock(&ailp->xa_lock);
			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
			xfs_buf_item_relse(bp);
			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
		}
		xfs_buf_relse(bp);
	}
Example #14
0
void iser_conn_put(struct iser_conn *ib_conn)
{
	if (atomic_dec_and_test(&ib_conn->refcount))
		iser_conn_release(ib_conn);
}
Example #15
0
void fuse_conn_put(struct fuse_conn *fc)
{
	if (atomic_dec_and_test(&fc->count))
		kfree(fc);
}
Example #16
0
void skiplist_put_leaf(struct sl_leaf *leaf)
{
	BUG_ON(atomic_read(&leaf->refs) == 0);
	if (atomic_dec_and_test(&leaf->refs))
		call_rcu(&leaf->rcu_head, sl_free_rcu);
}
Example #17
0
static __inline__ void rt6_release(struct rt6_info *rt)
{
    if (atomic_dec_and_test(&rt->rt6i_ref))
        dst_free(&rt->dst);
}
Example #18
0
/*
 * The open method is always the first operation performed on the device file.
 * It is provided for a driver to do any initialization in preparation for 
 * later operations. In most drivers, open should perform the following tasks:
 * 1) Check for device-specific errors (such as device-not-ready or similar 
 *    hardware problems).
 * 2) Initialize the device if it is being opened for the first time
 * 3) Update the f_op pointer, if necessary
 * 4) Allocate and fill any data structure to be put in filp->private_data.
 *    Driver can store device context content, such as device handler,
 *    interface handler etc. in the private_data. We can treat the private_data as
 *    the bridge between file structure and lower level device object. More than 
 *    one application processes have their own file descriptor opened for a device.
 *    After driver store device context data in the open method for this device, 
 *    other file operation methods can get access to these data easily later.
 */
static int osrfx2_fp_open(struct inode *inode, struct file *file)
{
	struct osrfx2           *fx2dev;
	struct usb_interface    *interface;
	int                     subminor;
	int                     retval = 0;
#if FILE_NOSHARE_READWRITE
	int 			oflags;
#endif /* FILE_NOSHARE_READWRITE */

	subminor = iminor(inode);
	interface = usb_find_interface(&osrfx2_driver, subminor);
	if (!interface) {
		ENTRY(NULL, "can't find device for minor %d\n", subminor);
		EXIT(NULL, -ENODEV, "");
		return -ENODEV;
	}
	
	ENTRY(&interface->dev, "minor = %d\n", subminor);

	fx2dev = usb_get_intfdata(interface);
	if (!fx2dev) {
	        retval = -ENODEV;
	        goto exit;
	}

#if FILE_NOSHARE_READWRITE
	/*
	 *  Serialize access to each of the bulk pipes.
	 */
	oflags = (file->f_flags & O_ACCMODE);
	if ((oflags == O_WRONLY) || (oflags == O_RDWR)) {
		if (atomic_dec_and_test( &fx2dev->bulk_write_available ) == 0) {
			atomic_inc( &fx2dev->bulk_write_available );
			retval = -EBUSY;
			goto exit;
		}
	}

	if ((oflags == O_RDONLY) || (oflags == O_RDWR)) {
		if (atomic_dec_and_test( &fx2dev->bulk_read_available ) == 0) {
			atomic_inc( &fx2dev->bulk_read_available );
			if (oflags == O_RDWR) 
				atomic_inc( &fx2dev->bulk_write_available );
			retval = -EBUSY;
			goto exit;
		}
	}
#endif /* FILE_NOSHARE_READWRITE */

	/**
	 * Set this device as non-seekable bcos it does not make sense for osrfx2. 
	 * Refer to http://lwn.net/Articles/97154/ "Safe seeks" and ldd3 section 6.5.
	 */ 
	retval = nonseekable_open(inode, file);
	if (0 != retval) {
		goto exit;
	}

	/* increment our usage count for the device */
	kref_get(&fx2dev->kref);

	/* save our object in the file's private structure */
	file->private_data = fx2dev;

exit:
	EXIT(&interface->dev, retval, "");
	return retval;
}
Example #19
0
static void put_uprobe(struct uprobe *uprobe)
{
	if (atomic_dec_and_test(&uprobe->ref))
		kfree(uprobe);
}
static __inline__ void fq_put(struct frag_queue *fq)
{
	if (atomic_dec_and_test(&fq->refcnt))
		ip6_frag_destroy(fq);
}
Example #21
0
static void xen_blkif_free(struct xen_blkif *blkif)
{
	if (!atomic_dec_and_test(&blkif->refcnt))
		BUG();
	kmem_cache_free(xen_blkif_cachep, blkif);
}
Example #22
0
/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
	int err = 0;
	struct fs_struct *fs, *new_fs = NULL;
	struct sighand_struct *new_sigh = NULL;
	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
	struct files_struct *fd, *new_fd = NULL;
	struct nsproxy *new_nsproxy = NULL;
	int do_sysvsem = 0;

	check_unshare_flags(&unshare_flags);

	/* Return -EINVAL for all unsupported flags */
	err = -EINVAL;
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
		goto bad_unshare_out;

	/*
	 * CLONE_NEWIPC must also detach from the undolist: after switching
	 * to a new ipc namespace, the semaphore arrays from the old
	 * namespace are unreachable.
	 */
	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
		do_sysvsem = 1;
	if ((err = unshare_thread(unshare_flags)))
		goto bad_unshare_out;
	if ((err = unshare_fs(unshare_flags, &new_fs)))
		goto bad_unshare_cleanup_thread;
	if ((err = unshare_sighand(unshare_flags, &new_sigh)))
		goto bad_unshare_cleanup_fs;
	if ((err = unshare_vm(unshare_flags, &new_mm)))
		goto bad_unshare_cleanup_sigh;
	if ((err = unshare_fd(unshare_flags, &new_fd)))
		goto bad_unshare_cleanup_vm;
	if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
			new_fs)))
		goto bad_unshare_cleanup_fd;

	if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
		if (do_sysvsem) {
			/*
			 * CLONE_SYSVSEM is equivalent to sys_exit().
			 */
			exit_sem(current);
		}

		if (new_nsproxy) {
			switch_task_namespaces(current, new_nsproxy);
			new_nsproxy = NULL;
		}

		task_lock(current);

		if (new_fs) {
			fs = current->fs;
			spin_lock(&fs->lock);
			current->fs = new_fs;
			if (--fs->users)
				new_fs = NULL;
			else
				new_fs = fs;
			spin_unlock(&fs->lock);
		}

		if (new_mm) {
			mm = current->mm;
			active_mm = current->active_mm;
			current->mm = new_mm;
			current->active_mm = new_mm;
			activate_mm(active_mm, new_mm);
			new_mm = mm;
		}

		if (new_fd) {
			fd = current->files;
			current->files = new_fd;
			new_fd = fd;
		}

		task_unlock(current);
	}

	if (new_nsproxy)
		put_nsproxy(new_nsproxy);

bad_unshare_cleanup_fd:
	if (new_fd)
		put_files_struct(new_fd);

bad_unshare_cleanup_vm:
	if (new_mm)
		mmput(new_mm);

bad_unshare_cleanup_sigh:
	if (new_sigh)
		if (atomic_dec_and_test(&new_sigh->count))
			kmem_cache_free(sighand_cachep, new_sigh);

bad_unshare_cleanup_fs:
	if (new_fs)
		free_fs_struct(new_fs);

bad_unshare_cleanup_thread:
bad_unshare_out:
	return err;
}
Example #23
0
static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
	const int offset = offsetof(struct blk_io_trace, sector);
	struct blk_io_trace old = {
		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
		.time     = iter->ts,
	};

	if (!trace_seq_putmem(s, &old, offset))
		return 0;
	return trace_seq_putmem(s, &t->sector,
				sizeof(old) - offset + t->pdu_len);
}

static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
{
	return blk_trace_synthesize_old_trace(iter) ?
			TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}

static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
		return TRACE_TYPE_UNHANDLED;

	return print_one_line(iter, true);
}

static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
{
	
	if (bit == TRACE_BLK_OPT_CLASSIC) {
		if (set)
			trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
		else
			trace_flags |= TRACE_ITER_CONTEXT_INFO;
	}
	return 0;
}

static struct tracer blk_tracer __read_mostly = {
	.name		= "blk",
	.init		= blk_tracer_init,
	.reset		= blk_tracer_reset,
	.start		= blk_tracer_start,
	.stop		= blk_tracer_stop,
	.print_header	= blk_tracer_print_header,
	.print_line	= blk_tracer_print_line,
	.flags		= &blk_tracer_flags,
	.set_flag	= blk_tracer_set_flag,
};

static struct trace_event trace_blk_event = {
	.type		= TRACE_BLK,
	.trace		= blk_trace_event_print,
	.binary		= blk_trace_event_print_binary,
};

static int __init init_blk_tracer(void)
{
	if (!register_ftrace_event(&trace_blk_event)) {
		pr_warning("Warning: could not register block events\n");
		return 1;
	}

	if (register_tracer(&blk_tracer) != 0) {
		pr_warning("Warning: could not register the block tracer\n");
		unregister_ftrace_event(&trace_blk_event);
		return 1;
	}

	return 0;
}

device_initcall(init_blk_tracer);

static int blk_trace_remove_queue(struct request_queue *q)
{
	struct blk_trace *bt;

	bt = xchg(&q->blk_trace, NULL);
	if (bt == NULL)
		return -EINVAL;

	if (atomic_dec_and_test(&blk_probes_ref))
		blk_unregister_tracepoints();

	blk_trace_free(bt);
	return 0;
}


static int blk_trace_setup_queue(struct request_queue *q,
				 struct block_device *bdev)
{
	struct blk_trace *old_bt, *bt = NULL;
	int ret = -ENOMEM;

	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		return -ENOMEM;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
	if (!bt->msg_data)
		goto free_bt;

	bt->dev = bdev->bd_dev;
	bt->act_mask = (u16)-1;

	blk_trace_setup_lba(bt, bdev);

	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt != NULL) {
		(void)xchg(&q->blk_trace, old_bt);
		ret = -EBUSY;
		goto free_bt;
	}

	if (atomic_inc_return(&blk_probes_ref) == 1)
		blk_register_tracepoints();
	return 0;

free_bt:
	blk_trace_free(bt);
	return ret;
}



static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf);
static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count);
#define BLK_TRACE_DEVICE_ATTR(_name) \
	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
		    sysfs_blk_trace_attr_show, \
		    sysfs_blk_trace_attr_store)

static BLK_TRACE_DEVICE_ATTR(enable);
static BLK_TRACE_DEVICE_ATTR(act_mask);
static BLK_TRACE_DEVICE_ATTR(pid);
static BLK_TRACE_DEVICE_ATTR(start_lba);
static BLK_TRACE_DEVICE_ATTR(end_lba);

static struct attribute *blk_trace_attrs[] = {
	&dev_attr_enable.attr,
	&dev_attr_act_mask.attr,
	&dev_attr_pid.attr,
	&dev_attr_start_lba.attr,
	&dev_attr_end_lba.attr,
	NULL
};

struct attribute_group blk_trace_attr_group = {
	.name  = "trace",
	.attrs = blk_trace_attrs,
};

static const struct {
	int mask;
	const char *str;
} mask_maps[] = {
	{ BLK_TC_READ,		"read"		},
	{ BLK_TC_WRITE,		"write"		},
	{ BLK_TC_BARRIER,	"barrier"	},
	{ BLK_TC_SYNC,		"sync"		},
	{ BLK_TC_QUEUE,		"queue"		},
	{ BLK_TC_REQUEUE,	"requeue"	},
	{ BLK_TC_ISSUE,		"issue"		},
	{ BLK_TC_COMPLETE,	"complete"	},
	{ BLK_TC_FS,		"fs"		},
	{ BLK_TC_PC,		"pc"		},
	{ BLK_TC_AHEAD,		"ahead"		},
	{ BLK_TC_META,		"meta"		},
	{ BLK_TC_DISCARD,	"discard"	},
	{ BLK_TC_DRV_DATA,	"drv_data"	},
};

static int blk_trace_str2mask(const char *str)
{
	int i;
	int mask = 0;
	char *buf, *s, *token;

	buf = kstrdup(str, GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;
	s = strstrip(buf);

	while (1) {
		token = strsep(&s, ",");
		if (token == NULL)
			break;

		if (*token == '\0')
			continue;

		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
			if (strcasecmp(token, mask_maps[i].str) == 0) {
				mask |= mask_maps[i].mask;
				break;
			}
		}
		if (i == ARRAY_SIZE(mask_maps)) {
			mask = -EINVAL;
			break;
		}
	}
	kfree(buf);

	return mask;
}

static ssize_t blk_trace_mask2str(char *buf, int mask)
{
	int i;
	char *p = buf;

	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
		if (mask & mask_maps[i].mask) {
			p += sprintf(p, "%s%s",
				    (p == buf) ? "" : ",", mask_maps[i].str);
		}
	}
	*p++ = '\n';

	return p - buf;
}

static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
{
	if (bdev->bd_disk == NULL)
		return NULL;

	return bdev_get_queue(bdev);
}

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct hd_struct *p = dev_to_part(dev);
	struct request_queue *q;
	struct block_device *bdev;
	ssize_t ret = -ENXIO;

	lock_kernel();
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		ret = sprintf(buf, "%u\n", !!q->blk_trace);
		goto out_unlock_bdev;
	}

	if (q->blk_trace == NULL)
		ret = sprintf(buf, "disabled\n");
	else if (attr == &dev_attr_act_mask)
		ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
	else if (attr == &dev_attr_pid)
		ret = sprintf(buf, "%u\n", q->blk_trace->pid);
	else if (attr == &dev_attr_start_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
	else if (attr == &dev_attr_end_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
	return ret;
}

static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count)
{
	struct block_device *bdev;
	struct request_queue *q;
	struct hd_struct *p;
	u64 value;
	ssize_t ret = -EINVAL;

	if (count == 0)
		goto out;

	if (attr == &dev_attr_act_mask) {
		if (sscanf(buf, "%llx", &value) != 1) {
			
			ret = blk_trace_str2mask(buf);
			if (ret < 0)
				goto out;
			value = ret;
		}
	} else if (sscanf(buf, "%llu", &value) != 1)
		goto out;

	ret = -ENXIO;

	lock_kernel();
	p = dev_to_part(dev);
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		if (value)
			ret = blk_trace_setup_queue(q, bdev);
		else
			ret = blk_trace_remove_queue(q);
		goto out_unlock_bdev;
	}

	ret = 0;
	if (q->blk_trace == NULL)
		ret = blk_trace_setup_queue(q, bdev);

	if (ret == 0) {
		if (attr == &dev_attr_act_mask)
			q->blk_trace->act_mask = value;
		else if (attr == &dev_attr_pid)
			q->blk_trace->pid = value;
		else if (attr == &dev_attr_start_lba)
			q->blk_trace->start_lba = value;
		else if (attr == &dev_attr_end_lba)
			q->blk_trace->end_lba = value;
	}

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
out:
	return ret ? ret : count;
}

int blk_trace_init_sysfs(struct device *dev)
{
	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}

void blk_trace_remove_sysfs(struct device *dev)
{
	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
}
Example #24
0
/** Decrement reference count, freeing if zero.
 *
 * @param info vnet (OK if null)
 */
void Vnet_decref(Vnet *info) {
    if(!info) return;
    if(atomic_dec_and_test(&info->refcount)) {
        deallocate(info);
    }
}
Example #25
0
/*!
 *  @fn  ssize_t  output_Read(struct file  *filp, 
 *                            char         *buf, 
 *                            size_t        count, 
 *                            loff_t       *f_pos,
 *                            BUFFER_DESC   kernel_buf)
 *
 *  @brief  Return a sample buffer to user-mode. If not full or flush, wait
 *
 *  @param *filp          a file pointer
 *  @param *buf           a sampling buffer
 *  @param  count         size of the user's buffer
 *  @param  f_pos         file pointer (current offset in bytes)
 *  @param  kernel_buf    the kernel output buffer structure
 *
 *  @return number of bytes read. zero indicates end of file. Neg means error
 *
 *  Place no more than count bytes into the user's buffer.
 *  Block if unavailable on "BUFFER_DESC_queue(buf)"
 *
 * <I>Special Notes:</I>
 *
 */
static ssize_t
output_Read (
    struct file  *filp, 
    char         *buf, 
    size_t        count, 
    loff_t       *f_pos,
    BUFFER_DESC   kernel_buf
)
{
    ssize_t  to_copy;
    ssize_t  uncopied;
    OUTPUT   outbuf = &BUFFER_DESC_outbuf(kernel_buf);
    U32      cur_buf, i;

/* Buffer is filled by output_fill_modules. */

    cur_buf = OUTPUT_current_buffer(outbuf);
    for (i=0; i<OUTPUT_NUM_BUFFERS; i++) { //iterate through all buffers
        cur_buf++;
        if (cur_buf >= OUTPUT_NUM_BUFFERS) { cur_buf = 0; } //circularly
        if ((to_copy = OUTPUT_buffer_full(outbuf, cur_buf))) {
            break;
        }
    }
    SEP_PRINT_DEBUG("buffer %d has %d bytes ready\n", (S32)cur_buf, (S32)to_copy);
    if (!flush && to_copy == 0) {
#if defined(CONFIG_PREEMPT_RT)
        do {
            unsigned long delay;
            delay = msecs_to_jiffies(1000);
            wait_event_interruptible_timeout(BUFFER_DESC_queue(kernel_buf),
                                 flush||OUTPUT_buffer_full(outbuf, cur_buf), delay);
        } while (!(flush||OUTPUT_buffer_full(outbuf, cur_buf)));
#else
        if (wait_event_interruptible(BUFFER_DESC_queue(kernel_buf),
                                 flush||OUTPUT_buffer_full(outbuf, cur_buf))) {
            return OS_RESTART_SYSCALL;
        }
#endif
        SEP_PRINT_DEBUG("Get to copy\n", (S32)cur_buf);
        to_copy = OUTPUT_buffer_full(outbuf, cur_buf);
        SEP_PRINT_DEBUG("output_Read awakened, buffer %d has %d bytes\n",cur_buf, (int)to_copy );
    }

    /* Ensure that the user's buffer is large enough */
    if (to_copy > count) {
        SEP_PRINT_DEBUG("user buffer is too small\n");
        return OS_NO_MEM;
    }

    /* Copy data to user space. Note that we use cur_buf as the source */ 
    if (abnormal_terminate == 0) {
        uncopied = copy_to_user(buf,
                                OUTPUT_buffer(outbuf, cur_buf),
                                to_copy);
        /* Mark the buffer empty */
        OUTPUT_buffer_full(outbuf, cur_buf) = 0;
        *f_pos += to_copy-uncopied;
        if (uncopied) {
            SEP_PRINT_DEBUG("only copied %d of %lld bytes of module records\n", 
                    (S32)to_copy, (long long)uncopied);
            return (to_copy - uncopied);
        }
    }
    else {
        to_copy = 0;
        SEP_PRINT_DEBUG("to copy set to 0\n");
    }

    // At end-of-file, decrement the count of active buffer writers

    if (to_copy == 0) {
        DRV_BOOL flush_val = atomic_dec_and_test(&flush_writers);
        SEP_PRINT_DEBUG("output_Read decremented flush_writers\n");
        if (flush_val == TRUE) {
            wake_up_interruptible_sync(&flush_queue);
        }
    }

    return to_copy;
}
Example #26
0
File: llog.c Project: 19Dan01/linux
void llog_handle_put(struct llog_handle *loghandle)
{
	LASSERT(atomic_read(&loghandle->lgh_refcount) > 0);
	if (atomic_dec_and_test(&loghandle->lgh_refcount))
		llog_free_handle(loghandle);
}
Example #27
0
/* free a backbone gw */
static void
batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
{
	if (atomic_dec_and_test(&backbone_gw->refcount))
		kfree_rcu(backbone_gw, rcu);
}
Example #28
0
static void afs_dec_cells_outstanding(struct afs_net *net)
{
	if (atomic_dec_and_test(&net->cells_outstanding))
		wake_up_var(&net->cells_outstanding);
}
Example #29
0
/* free a claim, call claim_free_rcu if its the last reference */
static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
{
	if (atomic_dec_and_test(&claim->refcount))
		call_rcu(&claim->rcu, batadv_claim_free_rcu);
}
Example #30
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
 */
STATIC void
xfs_buf_item_unpin(
	struct xfs_log_item	*lip,
	int			remove)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	xfs_buf_t	*bp = bip->bli_buf;
	struct xfs_ail	*ailp = lip->li_ailp;
	int		stale = bip->bli_flags & XFS_BLI_STALE;
	int		freed;

	ASSERT(bp->b_fspriv == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);

	trace_xfs_buf_item_unpin(bip);

	freed = atomic_dec_and_test(&bip->bli_refcount);

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);

	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(xfs_buf_islocked(bp));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);

		trace_xfs_buf_item_unpin_stale(bip);

		if (remove) {
			/*
			 * If we are in a transaction context, we have to
			 * remove the log item from the transaction as we are
			 * about to release our reference to the buffer.  If we
			 * don't, the unlock that occurs later in
			 * xfs_trans_uncommit() will try to reference the
			 * buffer which we no longer have a hold on.
			 */
			if (lip->li_desc)
				xfs_trans_del_item(lip);

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
			bp->b_transp = NULL;
		}

		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
		 * will take care of that situation.
		 * xfs_trans_ail_delete() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp);
			bp->b_fspriv = NULL;
			bp->b_iodone = NULL;
		} else {
			spin_lock(&ailp->xa_lock);
			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
			xfs_buf_item_relse(bp);
			ASSERT(bp->b_fspriv == NULL);
		}
		xfs_buf_relse(bp);
	} else if (freed && remove) {
		/*
		 * There are currently two references to the buffer - the active
		 * LRU reference and the buf log item. What we are about to do
		 * here - simulate a failed IO completion - requires 3
		 * references.
		 *
		 * The LRU reference is removed by the xfs_buf_stale() call. The
		 * buf item reference is removed by the xfs_buf_iodone()
		 * callback that is run by xfs_buf_do_callbacks() during ioend
		 * processing (via the bp->b_iodone callback), and then finally
		 * the ioend processing will drop the IO reference if the buffer
		 * is marked XBF_ASYNC.
		 *
		 * Hence we need to take an additional reference here so that IO
		 * completion processing doesn't free the buffer prematurely.
		 */
		xfs_buf_lock(bp);
		xfs_buf_hold(bp);
		bp->b_flags |= XBF_ASYNC;
		xfs_buf_ioerror(bp, -EIO);
		XFS_BUF_UNDONE(bp);
		xfs_buf_stale(bp);
		xfs_buf_ioend(bp);
	}
}