Exemple #1
0
int
rw_rdlock_impl(rwlock_t *rwlp, timespec_t *tsp)
{
	ulwp_t *self = curthread;
	uberdata_t *udp = self->ul_uberdata;
	readlock_t *readlockp;
	tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp);
	int error;

	/*
	 * If we already hold a readers lock on this rwlock,
	 * just increment our reference count and return.
	 */
	sigoff(self);
	readlockp = rwl_entry(rwlp);
	if (readlockp->rd_count != 0) {
		if (readlockp->rd_count == READ_LOCK_MAX) {
			sigon(self);
			error = EAGAIN;
			goto out;
		}
		sigon(self);
		error = 0;
		goto out;
	}
	sigon(self);

	/*
	 * If we hold the writer lock, bail out.
	 */
	if (rw_write_held(rwlp)) {
		if (self->ul_error_detection)
			rwlock_error(rwlp, "rwlock_rdlock",
			    "calling thread owns the writer lock");
		error = EDEADLK;
		goto out;
	}

	if (read_lock_try(rwlp, 0))
		error = 0;
	else if (rwlp->rwlock_type == USYNC_PROCESS)	/* kernel-level */
		error = shared_rwlock_lock(rwlp, tsp, READ_LOCK);
	else						/* user-level */
		error = rwlock_lock(rwlp, tsp, READ_LOCK);

out:
	if (error == 0) {
		sigoff(self);
		rwl_entry(rwlp)->rd_count++;
		sigon(self);
		if (rwsp)
			tdb_incr(rwsp->rw_rdlock);
		DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK);
	} else {
		DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, error);
	}

	return (error);
}
void
rw_exit(krwlock_t *rw)
{

#ifdef LOCKDEBUG
	bool shared = !rw_write_held(rw);

	if (shared)
		KASSERT(rw_read_held(rw));
	UNLOCKED(rw, shared);
#endif
	rumpuser_rw_exit(RUMPRW(rw));
}
int
rw_lock_held(krwlock_t *rw)
{

	return rw_read_held(rw) || rw_write_held(rw);
}
Exemple #4
0
/**
 * chfs_remap_leb - unmap and then map a leb
 * @chmp: chfs mount structure
 *
 * This function gets an eraseblock from the erasable queue, unmaps it through
 * EBH and maps another eraseblock to the same LNR.
 * EBH will find a free eraseblock if any or will erase one if there isn't any
 * free, just dirty block.
 *
 * Returns zero on case of success, errorcode otherwise.
 *
 * Needs more brainstorming here.
 */
int
chfs_remap_leb(struct chfs_mount *chmp)
{
	int err;
	struct chfs_eraseblock *cheb;
	dbg("chfs_remap_leb\n");
	uint32_t dirty, unchecked, used, free, wasted;

	//dbg("chmp->chm_nr_erasable_blocks: %d\n", chmp->chm_nr_erasable_blocks);
	//dbg("ltree: %p ecl: %p\n", &chmp->chm_ebh->ltree_lock, &chmp->chm_lock_sizes);
	KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
	KASSERT(mutex_owned(&chmp->chm_lock_sizes));

	if (!chmp->chm_nr_erasable_blocks) {
		//TODO
		/* We don't have any erasable blocks, need to check if there are
		 * blocks on erasable_pending_wbuf_queue, flush the data and then
		 * we can remap it.
		 * If there aren't any blocks on that list too, we need to GC?
		 */
		if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) {
			cheb = TAILQ_FIRST(&chmp->chm_erasable_pending_wbuf_queue);
			TAILQ_REMOVE(&chmp->chm_erasable_pending_wbuf_queue, cheb, queue);
			if (chmp->chm_wbuf_len) {
				mutex_exit(&chmp->chm_lock_sizes);
				chfs_flush_pending_wbuf(chmp);
				mutex_enter(&chmp->chm_lock_sizes);
			}
			TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue, cheb, queue);
			chmp->chm_nr_erasable_blocks++;
		} else {
			/* We can't delete any block. */
			//FIXME should we return ENOSPC?
			return ENOSPC;
		}
	}
	cheb = TAILQ_FIRST(&chmp->chm_erase_pending_queue);
	TAILQ_REMOVE(&chmp->chm_erase_pending_queue, cheb, queue);
	chmp->chm_nr_erasable_blocks--;
	
	dirty = cheb->dirty_size;
	unchecked = cheb->unchecked_size;
	used = cheb->used_size;
	free = cheb->free_size;
	wasted = cheb->wasted_size;

	// Free allocated node references for this eraseblock
	chfs_free_node_refs(cheb);

	err = chfs_unmap_leb(chmp, cheb->lnr);
	if (err)
		return err;

	err = chfs_map_leb(chmp, cheb->lnr);
	if (err)
		return err;
	// Reset state to default and change chmp sizes too 
	chfs_change_size_dirty(chmp, cheb, -dirty);
	chfs_change_size_unchecked(chmp, cheb, -unchecked);
	chfs_change_size_used(chmp, cheb, -used);
	chfs_change_size_free(chmp, cheb, chmp->chm_ebh->eb_size - free);
	chfs_change_size_wasted(chmp, cheb, -wasted);

	KASSERT(cheb->dirty_size == 0);
	KASSERT(cheb->unchecked_size == 0);
	KASSERT(cheb->used_size == 0);
	KASSERT(cheb->free_size == chmp->chm_ebh->eb_size);
	KASSERT(cheb->wasted_size == 0);

	cheb->first_node = NULL;
	cheb->last_node  = NULL;
	//put it to free_queue
	TAILQ_INSERT_TAIL(&chmp->chm_free_queue, cheb, queue);
	chmp->chm_nr_free_blocks++;
	dbg("remaped (free: %d, erasable: %d)\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks);
	KASSERT(!TAILQ_EMPTY(&chmp->chm_free_queue));

	return 0;
}
Exemple #5
0
int
rw_lock_held(krwlock_t *rwlp)
{
	return (rw_read_held(rwlp) || rw_write_held(rwlp));
}