コード例 #1
0
ファイル: checkpoint.c プロジェクト: hugh712/Jollen
/*
 * journal_insert_checkpoint: put a committed buffer onto a checkpoint
 * list so that we know when it is safe to clean the transaction out of
 * the log.
 *
 * Called with the journal locked.
 * Called with journal_datalist_lock held.
 */
void __journal_insert_checkpoint(struct journal_head *jh, 
			       transaction_t *transaction)
{
	JBUFFER_TRACE(jh, "entry");
	J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jdirty(jh2bh(jh)));
	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);

	assert_spin_locked(&journal_datalist_lock);
	jh->b_cp_transaction = transaction;

	if (!transaction->t_checkpoint_list) {
		jh->b_cpnext = jh->b_cpprev = jh;
	} else {
		jh->b_cpnext = transaction->t_checkpoint_list;
		jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
		jh->b_cpprev->b_cpnext = jh;
		jh->b_cpnext->b_cpprev = jh;
	}
	transaction->t_checkpoint_list = jh;
}
コード例 #2
0
void saa7146_buffer_next(struct saa7146_dev *dev,
			 struct saa7146_dmaqueue *q, int vbi)
{
	struct saa7146_buf *buf,*next = NULL;

	BUG_ON(!q);

	DEB_INT(("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi));

	assert_spin_locked(&dev->slock);
	if (!list_empty(&q->queue)) {
		/* activate next one from queue */
		buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue);
		list_del(&buf->vb.queue);
		if (!list_empty(&q->queue))
			next = list_entry(q->queue.next,struct saa7146_buf, vb.queue);
		q->curr = buf;
		DEB_INT(("next buffer: buf:%p, prev:%p, next:%p\n", buf, q->queue.prev,q->queue.next));
		buf->activate(dev,buf,next);
	} else {
コード例 #3
0
int saa7146_buffer_queue(struct saa7146_dev *dev,
			 struct saa7146_dmaqueue *q,
			 struct saa7146_buf *buf)
{
	assert_spin_locked(&dev->slock);
	DEB_EE(("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf));

	BUG_ON(!q);

	if (NULL == q->curr) {
		q->curr = buf;
		DEB_D(("immediately activating buffer %p\n", buf));
		buf->activate(dev,buf,NULL);
	} else {
		list_add_tail(&buf->vb.queue,&q->queue);
		buf->vb.state = VIDEOBUF_QUEUED;
		DEB_D(("adding buffer %p to queue. (active buffer present)\n", buf));
	}
	return 0;
}
コード例 #4
0
ファイル: checkpoint.c プロジェクト: Soaa-/-lightspeed
/*
 * __jbd2_log_wait_for_space: wait until there is space in the journal.
 *
 * Called under j-state_lock *only*.  It will be unlocked if we have to wait
 * for a checkpoint to free up some space in the log.
 */
void __jbd2_log_wait_for_space(journal_t *journal)
{
	int nblocks;
	assert_spin_locked(&journal->j_state_lock);

	nblocks = jbd_space_needed(journal);
	while (__jbd2_log_space_left(journal) < nblocks) {
		if (journal->j_flags & JBD2_ABORT)
			return;
		spin_unlock(&journal->j_state_lock);
		mutex_lock(&journal->j_checkpoint_mutex);

		/*
		 * Test again, another process may have checkpointed while we
		 * were waiting for the checkpoint lock. If there are no
		 * outstanding transactions there is nothing to checkpoint and
		 * we can't make progress. Abort the journal in this case.
		 */
		spin_lock(&journal->j_state_lock);
		spin_lock(&journal->j_list_lock);
		nblocks = jbd_space_needed(journal);
		if (__jbd2_log_space_left(journal) < nblocks) {
			int chkpt = journal->j_checkpoint_transactions != NULL;

			spin_unlock(&journal->j_list_lock);
			spin_unlock(&journal->j_state_lock);
			if (chkpt) {
				jbd2_log_do_checkpoint(journal);
			} else {
				printk(KERN_ERR "%s: no transactions\n",
				       __func__);
				jbd2_journal_abort(journal, 0);
			}

			spin_lock(&journal->j_state_lock);
		} else {
			spin_unlock(&journal->j_list_lock);
		}
		mutex_unlock(&journal->j_checkpoint_mutex);
	}
}
コード例 #5
0
ファイル: tlb.c プロジェクト: cybernet/rhel7-kernel
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
	struct list_head *lh;
	pgtable_t pgtable;

	assert_spin_locked(&mm->page_table_lock);

	/* FIFO */
	pgtable = pmd_huge_pte(mm, pmdp);
	lh = (struct list_head *) pgtable;
	if (list_empty(lh))
		pmd_huge_pte(mm, pmdp) = NULL;
	else {
		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
		list_del(lh);
	}
	pte_val(pgtable[0]) = 0;
	pte_val(pgtable[1]) = 0;

	return pgtable;
}
コード例 #6
0
ファイル: saa7146_fops.c プロジェクト: johnny/CobraDroidBeta
void saa7146_buffer_finish(struct saa7146_dev *dev,
			   struct saa7146_dmaqueue *q,
			   int state)
{
	assert_spin_locked(&dev->slock);
	DEB_EE(("dev:%p, dmaq:%p, state:%d\n", dev, q, state));
	DEB_EE(("q->curr:%p\n",q->curr));

	BUG_ON(!q->curr);

	/* finish current buffer */
	if (NULL == q->curr) {
		DEB_D(("aiii. no current buffer\n"));
		return;
	}

	q->curr->vb.state = state;
	do_gettimeofday(&q->curr->vb.ts);
	wake_up(&q->curr->vb.done);

	q->curr = NULL;
}
コード例 #7
0
ファイル: pgtable.c プロジェクト: CCNITSilchar/linux
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
				      unsigned long addr, pte_t *ptep,
				      pte_t pte, int dirty)
{
#ifdef HUGETLB_NEED_PRELOAD
	/*
	 * The "return 1" forces a call of update_mmu_cache, which will write a
	 * TLB entry.  Without this, platforms that don't do a write of the TLB
	 * entry in the TLB miss handler asm will fault ad infinitum.
	 */
	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
	return 1;
#else
	int changed, psize;

	pte = set_access_flags_filter(pte, vma, dirty);
	changed = !pte_same(*(ptep), pte);
	if (changed) {

#ifdef CONFIG_PPC_BOOK3S_64
		struct hstate *h = hstate_vma(vma);

		psize = hstate_get_psize(h);
#ifdef CONFIG_DEBUG_VM
		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
#endif

#else
		/*
		 * Not used on non book3s64 platforms. But 8xx
		 * can possibly use tsize derived from hstate.
		 */
		psize = 0;
#endif
		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
	}
	return changed;
#endif
}
コード例 #8
0
ファイル: ima_iint.c プロジェクト: ANFS/ANFS-kernel
/*
 * __ima_iint_find - return the iint associated with an inode
 */
static struct ima_iint_cache *__ima_iint_find(struct inode *inode)
{
	struct ima_iint_cache *iint;
	struct rb_node *n = ima_iint_tree.rb_node;

	assert_spin_locked(&ima_iint_lock);

	while (n) {
		iint = rb_entry(n, struct ima_iint_cache, rb_node);

		if (inode < iint->inode)
			n = n->rb_left;
		else if (inode > iint->inode)
			n = n->rb_right;
		else
			break;
	}
	if (!n)
		return NULL;

	return iint;
}
コード例 #9
0
int saa7134_buffer_queue(struct saa7134_dev *dev,
			 struct saa7134_dmaqueue *q,
			 struct saa7134_buf *buf)
{
	struct saa7134_buf *next = NULL;

	assert_spin_locked(&dev->slock);
	dprintk("buffer_queue %p\n",buf);
	if (NULL == q->curr) {
		if (!q->need_two) {
			q->curr = buf;
			buf->activate(dev,buf,NULL);
		} else if (list_empty(&q->queue)) {
			list_add_tail(&buf->vb.queue,&q->queue);
			buf->vb.state = STATE_QUEUED;
		} else {
			next = list_entry(q->queue.next,struct saa7134_buf,
					  vb.queue);
			q->curr = buf;
			buf->activate(dev,buf,next);
		}
	} else {
コード例 #10
0
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
			   struct btrfs_delayed_ref_head *head)
{
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
	assert_spin_locked(&delayed_refs->lock);
	if (mutex_trylock(&head->mutex))
		return 0;

	atomic_inc(&head->node.refs);
	spin_unlock(&delayed_refs->lock);

	mutex_lock(&head->mutex);
	spin_lock(&delayed_refs->lock);
	if (!head->node.in_tree) {
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(&head->node);
		return -EAGAIN;
	}
	btrfs_put_delayed_ref(&head->node);
	return 0;
}
コード例 #11
0
ファイル: lov_merge.c プロジェクト: 020gzh/linux
/* Must be called under the lov_stripe_lock() */
int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
		   u64 size, int shrink)
{
	struct lov_oinfo *loi;
	int stripe = 0;
	__u64 kms;

	assert_spin_locked(&lsm->lsm_lock);
	LASSERT(lsm->lsm_lock_owner == current_pid());

	if (shrink) {
		for (; stripe < lsm->lsm_stripe_count; stripe++) {
			struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];

			kms = lov_size_to_stripe(lsm, size, stripe);
			CDEBUG(D_INODE,
			       "stripe %d KMS %sing %llu->%llu\n",
			       stripe, kms > loi->loi_kms ? "increase":"shrink",
			       loi->loi_kms, kms);
			loi->loi_lvb.lvb_size = kms;
			loi_kms_set(loi, loi->loi_lvb.lvb_size);
		}
		return 0;
	}

	if (size > 0)
		stripe = lov_stripe_number(lsm, size - 1);
	kms = lov_size_to_stripe(lsm, size, stripe);
	loi = lsm->lsm_oinfo[stripe];

	CDEBUG(D_INODE, "stripe %d KMS %sincreasing %llu->%llu\n",
	       stripe, kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms);
	if (kms > loi->loi_kms)
		loi_kms_set(loi, kms);

	return 0;
}
コード例 #12
0
ファイル: v4l2.c プロジェクト: alexxxwork/solo6x10
static void solo_thread_try(struct solo_filehandle *fh)
{
	struct videobuf_buffer *vb;

	/* Only "break" from this loop if slock is held, otherwise
	 * just return. */
	for (;;) {
		unsigned int cur_write;

		cur_write = SOLO_VI_STATUS0_PAGE(
			solo_reg_read(fh->solo_dev, SOLO_VI_STATUS0));
		if (cur_write == fh->old_write)
			return;

		spin_lock(&fh->slock);

		if (list_empty(&fh->vidq_active))
			break;

		vb = list_first_entry(&fh->vidq_active, struct videobuf_buffer,
				      queue);

		if (!waitqueue_active(&vb->done))
			break;

		fh->old_write = cur_write;
		list_del(&vb->queue);
		vb->state = VIDEOBUF_ACTIVE;

		spin_unlock(&fh->slock);

		solo_fillbuf(fh, vb);
	}

	assert_spin_locked(&fh->slock);
	spin_unlock(&fh->slock);
}
コード例 #13
0
void flush_reserved2grabbed(txn_atom * atom, __u64 count)
{
	reiser4_context *ctx;
	reiser4_super_info_data *sbinfo;

	assert("nikita-2788", atom != NULL);
	assert_spin_locked(&(atom->alock));

	ctx = get_current_context();
	sbinfo = get_super_private(ctx->super);

	add_to_ctx_grabbed(ctx, count);

	sub_from_atom_flush_reserved_nolock(atom, (__u32) count);

	spin_lock_reiser4_super(sbinfo);

	sbinfo->blocks_grabbed += count;
	sub_from_sb_flush_reserved(sbinfo, count);

	assert("vpf-292", reiser4_check_block_counters(ctx->super));

	spin_unlock_reiser4_super(sbinfo);
}
コード例 #14
0
static void dlm_init_mle(struct dlm_master_list_entry *mle,
			enum dlm_mle_type type,
			struct dlm_ctxt *dlm,
			struct dlm_lock_resource *res,
			const char *name,
			unsigned int namelen)
{
	assert_spin_locked(&dlm->spinlock);

	mle->dlm = dlm;
	mle->type = type;
	INIT_HLIST_NODE(&mle->master_hash_node);
	INIT_LIST_HEAD(&mle->hb_events);
	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
	spin_lock_init(&mle->spinlock);
	init_waitqueue_head(&mle->wq);
	atomic_set(&mle->woken, 0);
	kref_init(&mle->mle_refs);
	memset(mle->response_map, 0, sizeof(mle->response_map));
	mle->master = O2NM_MAX_NODES;
	mle->new_master = O2NM_MAX_NODES;
	mle->inuse = 0;

	BUG_ON(mle->type != DLM_MLE_BLOCK &&
コード例 #15
0
void __journal_refile_buffer(struct journal_head *jh)
{
	int was_dirty, jlist;
	struct buffer_head *bh = jh2bh(jh);

	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
	if (jh->b_transaction)
		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);

	/* If the buffer is now unused, just drop it. */
	if (jh->b_next_transaction == NULL) {
		__journal_unfile_buffer(jh);
		return;
	}

	/*
	 * It has been modified by a later transaction: add it to the new
	 * transaction's metadata list.
	 */

	was_dirty = test_clear_buffer_jbddirty(bh);
	__journal_temp_unlink_buffer(jh);
	jh->b_transaction = jh->b_next_transaction;
	jh->b_next_transaction = NULL;
	if (buffer_freed(bh))
		jlist = BJ_Forget;
	else if (jh->b_modified)
		jlist = BJ_Metadata;
	else
		jlist = BJ_Reserved;
	__journal_file_buffer(jh, jh->b_transaction, jlist);
	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);

	if (was_dirty)
		set_buffer_jbddirty(bh);
}
コード例 #16
0
/**
 * add_extent_mapping - add new extent map to the extent tree
 * @tree:	tree to insert new map in
 * @em:		map to insert
 *
 * Insert @em into @tree or perform a simple forward/backward merge with
 * existing mappings.  The extent_map struct passed in will be inserted
 * into the tree directly, with an additional reference taken, or a
 * reference dropped if the merge attempt was sucessfull.
 */
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em)
{
	int ret = 0;
	struct extent_map *merge = NULL;
	struct rb_node *rb;
	struct extent_map *exist;

	exist = lookup_extent_mapping(tree, em->start, em->len);
	if (exist) {
		free_extent_map(exist);
		ret = -EEXIST;
		goto out;
	}
	assert_spin_locked(&tree->lock);
	rb = tree_insert(&tree->map, em->start, &em->rb_node);
	if (rb) {
		ret = -EEXIST;
		free_extent_map(merge);
		goto out;
	}
	atomic_inc(&em->refs);
	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_len += merge->block_len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
		}
	 }
コード例 #17
0
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	if (mm == &init_mm)
		return;
	pgd = mm->pgd + pgd_index(addr);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd, addr);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, addr);
	/*
	 * khugepaged to collapse normal pages to hugepage, first set
	 * pmd to none to force page fault/gup to take mmap_sem. After
	 * pmd is set to none, we do a pte_clear which does this assertion
	 * so if we find pmd none, return.
	 */
	if (pmd_none(*pmd))
		return;
	BUG_ON(!pmd_present(*pmd));
	assert_spin_locked(pte_lockptr(mm, pmd));
}
コード例 #18
0
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
	assert_spin_locked(&c->erase_completion_lock);
	if (c->gc_task && jffs2_thread_should_wake(c))
		send_sig(SIGHUP, c->gc_task, 1);
}
コード例 #19
0
void __journal_file_buffer(struct journal_head *jh,
			transaction_t *transaction, int jlist)
{
	struct journal_head **list = NULL;
	int was_dirty = 0;
	struct buffer_head *bh = jh2bh(jh);

	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
	assert_spin_locked(&transaction->t_journal->j_list_lock);

	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
				jh->b_transaction == NULL);

	if (jh->b_transaction && jh->b_jlist == jlist)
		return;

	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
	    jlist == BJ_Shadow || jlist == BJ_Forget) {
		/*
		 * For metadata buffers, we track dirty bit in buffer_jbddirty
		 * instead of buffer_dirty. We should not see a dirty bit set
		 * here because we clear it in do_get_write_access but e.g.
		 * tune2fs can modify the sb and set the dirty bit at any time
		 * so we try to gracefully handle that.
		 */
		if (buffer_dirty(bh))
			warn_dirty_buffer(bh);
		if (test_clear_buffer_dirty(bh) ||
		    test_clear_buffer_jbddirty(bh))
			was_dirty = 1;
	}

	if (jh->b_transaction)
		__journal_temp_unlink_buffer(jh);
	jh->b_transaction = transaction;

	switch (jlist) {
	case BJ_None:
		J_ASSERT_JH(jh, !jh->b_committed_data);
		J_ASSERT_JH(jh, !jh->b_frozen_data);
		return;
	case BJ_SyncData:
		list = &transaction->t_sync_datalist;
		break;
	case BJ_Metadata:
		transaction->t_nr_buffers++;
		list = &transaction->t_buffers;
		break;
	case BJ_Forget:
		list = &transaction->t_forget;
		break;
	case BJ_IO:
		list = &transaction->t_iobuf_list;
		break;
	case BJ_Shadow:
		list = &transaction->t_shadow_list;
		break;
	case BJ_LogCtl:
		list = &transaction->t_log_list;
		break;
	case BJ_Reserved:
		list = &transaction->t_reserved_list;
		break;
	case BJ_Locked:
		list =  &transaction->t_locked_list;
		break;
	}

	__blist_add_buffer(list, jh);
	jh->b_jlist = jlist;

	if (was_dirty)
		set_buffer_jbddirty(bh);
}
コード例 #20
0
ファイル: notification.c プロジェクト: AlexShiLucky/linux
/* return true if the notify queue is empty, false otherwise */
bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
{
	assert_spin_locked(&group->notification_lock);
	return list_empty(&group->notification_list) ? true : false;
}
コード例 #21
0
/*
 * Clean up a transaction's checkpoint list.
 *
 * We wait for any pending IO to complete and make sure any clean
 * buffers are removed from the transaction.
 *
 * Return 1 if we performed any actions which might have destroyed the
 * checkpoint.  (journal_remove_checkpoint() deletes the transaction when
 * the last checkpoint buffer is cleansed)
 *
 * Called with j_list_lock held.
 */
static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
{
	struct journal_head *jh, *next_jh, *last_jh;
	struct buffer_head *bh;
	int ret = 0;

	assert_spin_locked(&journal->j_list_lock);
	jh = transaction->t_checkpoint_list;
	if (!jh)
		return 0;

	last_jh = jh->b_cpprev;
	next_jh = jh;
	do {
		jh = next_jh;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			atomic_inc(&bh->b_count);
			spin_unlock(&journal->j_list_lock);
			wait_on_buffer(bh);
			/* the journal_head may have gone by now */
			BUFFER_TRACE(bh, "brelse");
			__brelse(bh);
			goto out_return_1;
		}

		/*
		 * This is foul
		 */
		if (!jbd_trylock_bh_state(bh)) {
			jbd_sync_bh(journal, bh);
			goto out_return_1;
		}

		if (jh->b_transaction != NULL) {
			transaction_t *t = jh->b_transaction;
			tid_t tid = t->t_tid;

			spin_unlock(&journal->j_list_lock);
			jbd_unlock_bh_state(bh);
			log_start_commit(journal, tid);
			log_wait_commit(journal, tid);
			goto out_return_1;
		}

		/*
		 * AKPM: I think the buffer_jbddirty test is redundant - it
		 * shouldn't have NULL b_transaction?
		 */
		next_jh = jh->b_cpnext;
		if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) {
			BUFFER_TRACE(bh, "remove from checkpoint");
			__journal_remove_checkpoint(jh);
			jbd_unlock_bh_state(bh);
			journal_remove_journal_head(bh);
			__brelse(bh);
			ret = 1;
		} else {
			jbd_unlock_bh_state(bh);
		}
	} while (jh != last_jh);

	return ret;
out_return_1:
	spin_lock(&journal->j_list_lock);
	return 1;
}
コード例 #22
0
ファイル: lov_merge.c プロジェクト: 020gzh/linux
/** Merge the lock value block(&lvb) attributes and KMS from each of the
 * stripes in a file into a single lvb. It is expected that the caller
 * initializes the current atime, mtime, ctime to avoid regressing a more
 * uptodate time on the local client.
 */
int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
		      struct ost_lvb *lvb, __u64 *kms_place)
{
	__u64 size = 0;
	__u64 kms = 0;
	__u64 blocks = 0;
	s64 current_mtime = lvb->lvb_mtime;
	s64 current_atime = lvb->lvb_atime;
	s64 current_ctime = lvb->lvb_ctime;
	int i;
	int rc = 0;

	assert_spin_locked(&lsm->lsm_lock);
	LASSERT(lsm->lsm_lock_owner == current_pid());

	CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
	       POSTID(&lsm->lsm_oi), lvb->lvb_size, lvb->lvb_mtime,
	       lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
	for (i = 0; i < lsm->lsm_stripe_count; i++) {
		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
		u64 lov_size, tmpsize;

		if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks)) {
			rc = OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
			continue;
		}

		tmpsize = loi->loi_kms;
		lov_size = lov_stripe_size(lsm, tmpsize, i);
		if (lov_size > kms)
			kms = lov_size;

		if (loi->loi_lvb.lvb_size > tmpsize)
			tmpsize = loi->loi_lvb.lvb_size;

		lov_size = lov_stripe_size(lsm, tmpsize, i);
		if (lov_size > size)
			size = lov_size;
		/* merge blocks, mtime, atime */
		blocks += loi->loi_lvb.lvb_blocks;
		if (loi->loi_lvb.lvb_mtime > current_mtime)
			current_mtime = loi->loi_lvb.lvb_mtime;
		if (loi->loi_lvb.lvb_atime > current_atime)
			current_atime = loi->loi_lvb.lvb_atime;
		if (loi->loi_lvb.lvb_ctime > current_ctime)
			current_ctime = loi->loi_lvb.lvb_ctime;

		CDEBUG(D_INODE, "MDT ID "DOSTID" on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
		       POSTID(&lsm->lsm_oi), loi->loi_ost_idx,
		       loi->loi_lvb.lvb_size, loi->loi_lvb.lvb_mtime,
		       loi->loi_lvb.lvb_atime, loi->loi_lvb.lvb_ctime,
		       loi->loi_lvb.lvb_blocks);
	}

	*kms_place = kms;
	lvb->lvb_size = size;
	lvb->lvb_blocks = blocks;
	lvb->lvb_mtime = current_mtime;
	lvb->lvb_atime = current_atime;
	lvb->lvb_ctime = current_ctime;
	return rc;
}
コード例 #23
0
static void dlm_purge_lockres(struct dlm_ctxt *dlm,
			     struct dlm_lock_resource *res)
{
	int master;
	int ret = 0;

	assert_spin_locked(&dlm->spinlock);
	assert_spin_locked(&res->spinlock);

	master = (res->owner == dlm->node_num);


	mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
	     res->lockname.name, master);

	if (!master) {
		res->state |= DLM_LOCK_RES_DROPPING_REF;
		/* drop spinlock...  retake below */
		spin_unlock(&res->spinlock);
		spin_unlock(&dlm->spinlock);

		spin_lock(&res->spinlock);
		/* This ensures that clear refmap is sent after the set */
		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
		spin_unlock(&res->spinlock);

		/* clear our bit from the master's refmap, ignore errors */
		ret = dlm_drop_lockres_ref(dlm, res);
		if (ret < 0) {
			mlog_errno(ret);
			if (!dlm_is_host_down(ret))
				BUG();
		}
		mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
		     dlm->name, res->lockname.len, res->lockname.name, ret);
		spin_lock(&dlm->spinlock);
		spin_lock(&res->spinlock);
	}

	if (!list_empty(&res->purge)) {
		mlog(0, "removing lockres %.*s:%p from purgelist, "
		     "master = %d\n", res->lockname.len, res->lockname.name,
		     res, master);
		list_del_init(&res->purge);
		dlm_lockres_put(res);
		dlm->purge_count--;
	}

	if (!__dlm_lockres_unused(res)) {
		mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
		     dlm->name, res->lockname.len, res->lockname.name);
		__dlm_print_one_lock_resource(res);
		BUG();
	}

	__dlm_unhash_lockres(res);

	/* lockres is not in the hash now.  drop the flag and wake up
	 * any processes waiting in dlm_get_lock_resource. */
	if (!master) {
		res->state &= ~DLM_LOCK_RES_DROPPING_REF;
		spin_unlock(&res->spinlock);
		wake_up(&res->wq);
	} else
		spin_unlock(&res->spinlock);
}
コード例 #24
0
ファイル: checkpoint.c プロジェクト: hugh712/Jollen
/*
 * Clean up a transaction's checkpoint list.  
 *
 * We wait for any pending IO to complete and make sure any clean
 * buffers are removed from the transaction. 
 *
 * Return 1 if we performed any actions which might have destroyed the
 * checkpoint.  (journal_remove_checkpoint() deletes the transaction when
 * the last checkpoint buffer is cleansed)
 *
 * Called with the journal locked.
 * Called with journal_datalist_lock held.
 */
static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
{
	struct journal_head *jh, *next_jh, *last_jh;
	struct buffer_head *bh;
	int ret = 0;

	assert_spin_locked(&journal_datalist_lock);
	jh = transaction->t_checkpoint_list;
	if (!jh)
		return 0;

	last_jh = jh->b_cpprev;
	next_jh = jh;
	do {
		jh = next_jh;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			atomic_inc(&bh->b_count);
			spin_unlock(&journal_datalist_lock);
			unlock_journal(journal);
			wait_on_buffer(bh);
			/* the journal_head may have gone by now */
			BUFFER_TRACE(bh, "brelse");
			__brelse(bh);
			goto out_return_1;
		}
		
		if (jh->b_transaction != NULL) {
			transaction_t *transaction = jh->b_transaction;
			tid_t tid = transaction->t_tid;

			spin_unlock(&journal_datalist_lock);
			log_start_commit(journal, transaction);
			unlock_journal(journal);
			log_wait_commit(journal, tid);
			goto out_return_1;
		}

		/*
		 * We used to test for (jh->b_list != BUF_CLEAN) here.
		 * But unmap_underlying_metadata() can place buffer onto
		 * BUF_CLEAN. Since refile_buffer() no longer takes buffers
		 * off checkpoint lists, we cope with it here
		 */
		/*
		 * AKPM: I think the buffer_jdirty test is redundant - it
		 * shouldn't have NULL b_transaction?
		 */
		next_jh = jh->b_cpnext;
		if (!buffer_dirty(bh) && !buffer_jdirty(bh)) {
			BUFFER_TRACE(bh, "remove from checkpoint");
			__journal_remove_checkpoint(jh);
			__journal_remove_journal_head(bh);
			refile_buffer(bh);
			__brelse(bh);
			ret = 1;
		}
		
		jh = next_jh;
	} while (jh != last_jh);

	return ret;
out_return_1:
	lock_journal(journal);
	spin_lock(&journal_datalist_lock);
	return 1;
}
コード例 #25
0
void btrfs_assert_tree_locked(struct extent_buffer *eb)
{
	if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
		assert_spin_locked(&eb->lock);
}
コード例 #26
0
/* Increase the counter of block reserved for flush in atom. */
static void add_to_atom_flush_reserved_nolock(txn_atom * atom, __u32 count)
{
	assert("zam-772", atom != NULL);
	assert_spin_locked(&(atom->alock));
	atom->flush_reserved += count;
}
コード例 #27
0
void __jbd2_journal_file_buffer(struct journal_head *jh,
			transaction_t *transaction, int jlist)
{
	struct journal_head **list = NULL;
	int was_dirty = 0;
	struct buffer_head *bh = jh2bh(jh);

	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
	assert_spin_locked(&transaction->t_journal->j_list_lock);

	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
				jh->b_transaction == NULL);

	if (jh->b_transaction && jh->b_jlist == jlist)
		return;

	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
	    jlist == BJ_Shadow || jlist == BJ_Forget) {
		if (buffer_dirty(bh))
			warn_dirty_buffer(bh);
		if (test_clear_buffer_dirty(bh) ||
		    test_clear_buffer_jbddirty(bh))
			was_dirty = 1;
	}

	if (jh->b_transaction)
		__jbd2_journal_temp_unlink_buffer(jh);
	else
		jbd2_journal_grab_journal_head(bh);
	jh->b_transaction = transaction;

	switch (jlist) {
	case BJ_None:
		J_ASSERT_JH(jh, !jh->b_committed_data);
		J_ASSERT_JH(jh, !jh->b_frozen_data);
		return;
	case BJ_Metadata:
		transaction->t_nr_buffers++;
		list = &transaction->t_buffers;
		break;
	case BJ_Forget:
		list = &transaction->t_forget;
		break;
	case BJ_IO:
		list = &transaction->t_iobuf_list;
		break;
	case BJ_Shadow:
		list = &transaction->t_shadow_list;
		break;
	case BJ_LogCtl:
		list = &transaction->t_log_list;
		break;
	case BJ_Reserved:
		list = &transaction->t_reserved_list;
		break;
	}

	__blist_add_buffer(list, jh);
	jh->b_jlist = jlist;

	if (was_dirty)
		set_buffer_jbddirty(bh);
}
コード例 #28
0
ファイル: drm_debugfs_crc.c プロジェクト: Lyude/linux
static int crtc_crc_data_count(struct drm_crtc_crc *crc)
{
	assert_spin_locked(&crc->lock);
	return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
}
コード例 #29
0
ファイル: sock.c プロジェクト: chixsh/tempesta
/**
 * This is main body of the socket close function in Sync Sockets.
 *
 * inet_release() can sleep (as well as tcp_close()), so we make our own
 * non-sleepable socket closing.
 *
 * This function must be used only for data sockets.
 * Use standard sock_release() for listening sockets.
 *
 * In most cases it is called in softirq context and from ksoftirqd which
 * processes data from the socket (RSS and RPS distribute packets that way).
 *
 * Note: it used to be called in process context as well, at the time when
 * Tempesta starts or stops. That's not the case right now, but it may change.
 *
 * TODO In some cases we need to close socket agresively w/o FIN_WAIT_2 state,
 * e.g. by sending RST. So we need to add second parameter to the function
 * which says how to close the socket.
 * One of the examples is rcl_req_limit() (it should reset connections).
 * See tcp_sk(sk)->linger2 processing in standard tcp_close().
 *
 * Called with locked socket.
 */
static void
ss_do_close(struct sock *sk)
{
	struct sk_buff *skb;
	int data_was_unread = 0;
	int state;

	if (unlikely(!sk))
		return;
	SS_DBG("Close socket %p (%s): cpu=%d account=%d refcnt=%d\n",
	       sk, ss_statename[sk->sk_state], smp_processor_id(),
	       sk_has_account(sk), atomic_read(&sk->sk_refcnt));
	assert_spin_locked(&sk->sk_lock.slock);
	ss_sock_cpu_check(sk);
	BUG_ON(sk->sk_state == TCP_LISTEN);
	/* We must return immediately, so LINGER option is meaningless. */
	WARN_ON(sock_flag(sk, SOCK_LINGER));
	/* We don't support virtual containers, so TCP_REPAIR is prohibited. */
	WARN_ON(tcp_sk(sk)->repair);
	/* The socket must have atomic allocation mask. */
	WARN_ON(!(sk->sk_allocation & GFP_ATOMIC));

	/* The below is mostly copy-paste from tcp_close(). */
	sk->sk_shutdown = SHUTDOWN_MASK;

	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
			  tcp_hdr(skb)->fin;
		data_was_unread += len;
		SS_DBG("free rcv skb %p\n", skb);
		__kfree_skb(skb);
	}

	sk_mem_reclaim(sk);

	if (sk->sk_state == TCP_CLOSE)
		goto adjudge_to_death;

	if (data_was_unread) {
		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
		tcp_set_state(sk, TCP_CLOSE);
		tcp_send_active_reset(sk, sk->sk_allocation);
	}
	else if (tcp_close_state(sk)) {
		/* The code below is taken from tcp_send_fin(). */
		struct tcp_sock *tp = tcp_sk(sk);
		int mss_now = tcp_current_mss(sk);

		skb = tcp_write_queue_tail(sk);

		if (tcp_send_head(sk) != NULL) {
			/* Send FIN with data if we have any. */
			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
			TCP_SKB_CB(skb)->end_seq++;
			tp->write_seq++;
		}
		else {
			/* No data to send in the socket, allocate new skb. */
			skb = alloc_skb_fclone(MAX_TCP_HEADER,
					       sk->sk_allocation);
			if (!skb) {
				SS_WARN("can't send FIN due to bad alloc");
			} else {
				skb_reserve(skb, MAX_TCP_HEADER);
				tcp_init_nondata_skb(skb, tp->write_seq,
						     TCPHDR_ACK | TCPHDR_FIN);
				tcp_queue_skb(sk, skb);
			}
		}
		__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
	}

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * SS sockets are processed in softirq only,
	 * so backlog queue should be empty.
	 */
	WARN_ON(sk->sk_backlog.tail);

	percpu_counter_inc(sk->sk_prot->orphan_count);

	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
		return;

	if (sk->sk_state == TCP_FIN_WAIT2) {
		const int tmo = tcp_fin_time(sk);
		if (tmo > TCP_TIMEWAIT_LEN) {
			inet_csk_reset_keepalive_timer(sk,
						tmo - TCP_TIMEWAIT_LEN);
		} else {
			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
			return;
		}
	}
	if (sk->sk_state != TCP_CLOSE) {
		sk_mem_reclaim(sk);
		if (tcp_check_oom(sk, 0)) {
			tcp_set_state(sk, TCP_CLOSE);
			tcp_send_active_reset(sk, GFP_ATOMIC);
			NET_INC_STATS_BH(sock_net(sk),
					 LINUX_MIB_TCPABORTONMEMORY);
		}
	}
	if (sk->sk_state == TCP_CLOSE) {
		struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
		if (req != NULL)
			reqsk_fastopen_remove(sk, req, false);
		inet_csk_destroy_sock(sk);
	}
}
コード例 #30
0
ファイル: intel_pipe_crc.c プロジェクト: AshishNamdev/linux
static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
{
	assert_spin_locked(&pipe_crc->lock);
	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
			INTEL_PIPE_CRC_ENTRIES_NR);
}