Esempio n. 1
0
static void
trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
{
	avl_index_t where;
	trim_seg_t tsearch, *ts_before, *ts_after, *ts;
	boolean_t merge_before, merge_after;

	ASSERT(MUTEX_HELD(&tm->tm_lock));
	VERIFY(start < end);

	tsearch.ts_start = start;
	tsearch.ts_end = end;

	ts = avl_find(&tm->tm_queued_frees, &tsearch, &where);
	if (ts != NULL) {
		if (start < ts->ts_start)
			trim_map_segment_add(tm, start, ts->ts_start, txg);
		if (end > ts->ts_end)
			trim_map_segment_add(tm, ts->ts_end, end, txg);
		return;
	}

	ts_before = avl_nearest(&tm->tm_queued_frees, where, AVL_BEFORE);
	ts_after = avl_nearest(&tm->tm_queued_frees, where, AVL_AFTER);

	merge_before = (ts_before != NULL && ts_before->ts_end == start &&
	    ts_before->ts_txg == txg);
	merge_after = (ts_after != NULL && ts_after->ts_start == end &&
	    ts_after->ts_txg == txg);

	if (merge_before && merge_after) {
		avl_remove(&tm->tm_queued_frees, ts_before);
		list_remove(&tm->tm_head, ts_before);
		ts_after->ts_start = ts_before->ts_start;
		kmem_free(ts_before, sizeof (*ts_before));
	} else if (merge_before) {
		ts_before->ts_end = end;
	} else if (merge_after) {
		ts_after->ts_start = start;
	} else {
		ts = kmem_alloc(sizeof (*ts), KM_SLEEP);
		ts->ts_start = start;
		ts->ts_end = end;
		ts->ts_txg = txg;
		avl_insert(&tm->tm_queued_frees, ts, where);
		list_insert_tail(&tm->tm_head, ts);
	}
}
Esempio n. 2
0
static void
trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
{
	zio_t zsearch, *zs;

	ASSERT(MUTEX_HELD(&tm->tm_lock));

	zsearch.io_offset = start;
	zsearch.io_size = end - start;

	zs = avl_find(&tm->tm_inflight_writes, &zsearch, NULL);
	if (zs == NULL) {
		trim_map_segment_add(tm, start, end, txg);
		return;
	}
	if (start < zs->io_offset)
		trim_map_free_locked(tm, start, zs->io_offset, txg);
	if (zs->io_offset + zs->io_size < end)
		trim_map_free_locked(tm, zs->io_offset + zs->io_size, end, txg);
}