static void trim_map_segment_remove(trim_map_t *tm, trim_seg_t *ts, uint64_t start, uint64_t end) { trim_seg_t *nts; boolean_t left_over, right_over; ASSERT(MUTEX_HELD(&tm->tm_lock)); left_over = (ts->ts_start < start); right_over = (ts->ts_end > end); TRIM_MAP_SDEC(tm, end - start); if (left_over && right_over) { nts = kmem_alloc(sizeof (*nts), KM_SLEEP); nts->ts_start = end; nts->ts_end = ts->ts_end; nts->ts_txg = ts->ts_txg; nts->ts_time = ts->ts_time; ts->ts_end = start; avl_insert_here(&tm->tm_queued_frees, nts, ts, AVL_AFTER); list_insert_after(&tm->tm_head, ts, nts); TRIM_MAP_QINC(tm); } else if (left_over) { ts->ts_end = start; } else if (right_over) { ts->ts_start = end; } else { avl_remove(&tm->tm_queued_frees, ts); list_remove(&tm->tm_head, ts); TRIM_MAP_QDEC(tm); kmem_free(ts, sizeof (*ts)); } }
static void trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd) { trim_map_t *tm = vd->vdev_trimmap; trim_seg_t *ts; uint64_t size, offset, txgtarget, txgsafe; hrtime_t timelimit; ASSERT(vd->vdev_ops->vdev_op_leaf); if (tm == NULL) return; timelimit = gethrtime() - trim_timeout * NANOSEC; if (vd->vdev_isl2cache) { txgsafe = UINT64_MAX; txgtarget = UINT64_MAX; } else { txgsafe = MIN(spa_last_synced_txg(spa), spa_freeze_txg(spa)); if (txgsafe > trim_txg_delay) txgtarget = txgsafe - trim_txg_delay; else txgtarget = 0; } mutex_enter(&tm->tm_lock); /* Loop until we have sent all outstanding free's */ while ((ts = trim_map_first(tm, txgtarget, txgsafe, timelimit)) != NULL) { list_remove(&tm->tm_head, ts); avl_remove(&tm->tm_queued_frees, ts); avl_add(&tm->tm_inflight_frees, ts); size = ts->ts_end - ts->ts_start; offset = ts->ts_start; TRIM_MAP_SDEC(tm, size); TRIM_MAP_QDEC(tm); /* * We drop the lock while we call zio_nowait as the IO * scheduler can result in a different IO being run e.g. * a write which would result in a recursive lock. */ mutex_exit(&tm->tm_lock); zio_nowait(zio_trim(zio, spa, vd, offset, size)); mutex_enter(&tm->tm_lock); ts = trim_map_first(tm, txgtarget, txgsafe, timelimit); } mutex_exit(&tm->tm_lock); }
void trim_map_destroy(vdev_t *vd) { trim_map_t *tm; trim_seg_t *ts; ASSERT(vd->vdev_ops->vdev_op_leaf); if (!zfs_trim_enabled) return; tm = vd->vdev_trimmap; if (tm == NULL) return; /* * We may have been called before trim_map_vdev_commit_done() * had a chance to run, so do it now to prune the remaining * inflight frees. */ trim_map_vdev_commit_done(vd->vdev_spa, vd); mutex_enter(&tm->tm_lock); while ((ts = list_head(&tm->tm_head)) != NULL) { avl_remove(&tm->tm_queued_frees, ts); list_remove(&tm->tm_head, ts); kmem_free(ts, sizeof (*ts)); TRIM_MAP_SDEC(tm, ts->ts_end - ts->ts_start); TRIM_MAP_QDEC(tm); } mutex_exit(&tm->tm_lock); avl_destroy(&tm->tm_queued_frees); avl_destroy(&tm->tm_inflight_frees); avl_destroy(&tm->tm_inflight_writes); list_destroy(&tm->tm_pending_writes); list_destroy(&tm->tm_head); mutex_destroy(&tm->tm_lock); kmem_free(tm, sizeof (*tm)); vd->vdev_trimmap = NULL; }
static void trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg) { avl_index_t where; trim_seg_t tsearch, *ts_before, *ts_after, *ts; boolean_t merge_before, merge_after; hrtime_t time; ASSERT(MUTEX_HELD(&tm->tm_lock)); VERIFY(start < end); time = gethrtime(); tsearch.ts_start = start; tsearch.ts_end = end; ts = avl_find(&tm->tm_queued_frees, &tsearch, &where); if (ts != NULL) { if (start < ts->ts_start) trim_map_segment_add(tm, start, ts->ts_start, txg); if (end > ts->ts_end) trim_map_segment_add(tm, ts->ts_end, end, txg); return; } ts_before = avl_nearest(&tm->tm_queued_frees, where, AVL_BEFORE); ts_after = avl_nearest(&tm->tm_queued_frees, where, AVL_AFTER); merge_before = (ts_before != NULL && ts_before->ts_end == start); merge_after = (ts_after != NULL && ts_after->ts_start == end); if (merge_before && merge_after) { TRIM_MAP_SINC(tm, ts_after->ts_start - ts_before->ts_end); TRIM_MAP_QDEC(tm); avl_remove(&tm->tm_queued_frees, ts_before); list_remove(&tm->tm_head, ts_before); ts_after->ts_start = ts_before->ts_start; ts_after->ts_txg = txg; ts_after->ts_time = time; kmem_free(ts_before, sizeof (*ts_before)); } else if (merge_before) { TRIM_MAP_SINC(tm, end - ts_before->ts_end); ts_before->ts_end = end; ts_before->ts_txg = txg; ts_before->ts_time = time; } else if (merge_after) { TRIM_MAP_SINC(tm, ts_after->ts_start - start); ts_after->ts_start = start; ts_after->ts_txg = txg; ts_after->ts_time = time; } else { TRIM_MAP_SINC(tm, end - start); TRIM_MAP_QINC(tm); ts = kmem_alloc(sizeof (*ts), KM_SLEEP); ts->ts_start = start; ts->ts_end = end; ts->ts_txg = txg; ts->ts_time = time; avl_insert(&tm->tm_queued_frees, ts, where); list_insert_tail(&tm->tm_head, ts); } }