/* * All B-Tree records within the specified key range which also conform * to the transaction id range are returned. Mirroring code keeps track * of the last transaction id fully scanned and can efficiently pick up * where it left off if interrupted. * * The PFS is identified in the mirror structure. The passed ip is just * some directory in the overall HAMMER filesystem and has nothing to * do with the PFS. */ int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_mirror_rw *mirror) { struct hammer_cmirror cmirror; struct hammer_cursor cursor; union hammer_ioc_mrecord_any mrec; hammer_btree_leaf_elm_t elm; const int crc_start = HAMMER_MREC_CRCOFF; char *uptr; int error; int data_len; int bytes; int eatdisk; int mrec_flags; u_int32_t localization; u_int32_t rec_crc; localization = (u_int32_t)mirror->pfs_id << 16; if ((mirror->key_beg.localization | mirror->key_end.localization) & HAMMER_LOCALIZE_PSEUDOFS_MASK) { return(EINVAL); } if (hammer_btree_cmp(&mirror->key_beg, &mirror->key_end) > 0) return(EINVAL); mirror->key_cur = mirror->key_beg; mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK; mirror->key_cur.localization += localization; bzero(&mrec, sizeof(mrec)); bzero(&cmirror, sizeof(cmirror)); /* * Make CRC errors non-fatal (at least on data), causing an EDOM * error instead of EIO. */ trans->flags |= HAMMER_TRANSF_CRCDOM; retry: error = hammer_init_cursor(trans, &cursor, NULL, NULL); if (error) { hammer_done_cursor(&cursor); goto failed; } cursor.key_beg = mirror->key_cur; cursor.key_end = mirror->key_end; cursor.key_end.localization &= HAMMER_LOCALIZE_MASK; cursor.key_end.localization += localization; cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; cursor.flags |= HAMMER_CURSOR_BACKEND; /* * This flag filters the search to only return elements whos create * or delete TID is >= mirror_tid. The B-Tree uses the mirror_tid * field stored with internal and leaf nodes to shortcut the scan. */ cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED; cursor.cmirror = &cmirror; cmirror.mirror_tid = mirror->tid_beg; error = hammer_btree_first(&cursor); while (error == 0) { /* * Yield to more important tasks */ if (error == 0) { error = hammer_signal_check(trans->hmp); if (error) break; } /* * An internal node can be returned in mirror-filtered * mode and indicates that the scan is returning a skip * range in the cursor->cmirror structure. */ uptr = (char *)mirror->ubuf + mirror->count; if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) { /* * Check space */ mirror->key_cur = cmirror.skip_beg; bytes = sizeof(mrec.skip); if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size) { break; } /* * Fill mrec */ mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE; mrec.head.type = HAMMER_MREC_TYPE_SKIP; mrec.head.rec_size = bytes; mrec.skip.skip_beg = cmirror.skip_beg; mrec.skip.skip_end = cmirror.skip_end; mrec.head.rec_crc = crc32(&mrec.head.rec_size, bytes - crc_start); error = copyout(&mrec, uptr, bytes); eatdisk = 0; goto didwrite; } /* * Leaf node. In full-history mode we could filter out * elements modified outside the user-requested TID range. * * However, such elements must be returned so the writer * can compare them against the target to determine what * needs to be deleted on the target, particular for * no-history mirrors. */ KKASSERT(cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF); elm = &cursor.node->ondisk->elms[cursor.index].leaf; mirror->key_cur = elm->base; /* * If the record was created after our end point we just * ignore it. */ if (elm->base.create_tid > mirror->tid_end) { error = 0; bytes = 0; eatdisk = 1; goto didwrite; } /* * Determine if we should generate a PASS or a REC. PASS * records are records without any data payload. Such * records will be generated if the target is already expected * to have the record, allowing it to delete the gaps. * * A PASS record is also used to perform deletions on the * target. * * Such deletions are needed if the master or files on the * master are no-history, or if the slave is so far behind * the master has already been pruned. */ if (elm->base.create_tid < mirror->tid_beg) { bytes = sizeof(mrec.rec); if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size) { break; } /* * Fill mrec. */ mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE; mrec.head.type = HAMMER_MREC_TYPE_PASS; mrec.head.rec_size = bytes; mrec.rec.leaf = *elm; mrec.head.rec_crc = crc32(&mrec.head.rec_size, bytes - crc_start); error = copyout(&mrec, uptr, bytes); eatdisk = 1; goto didwrite; } /* * The core code exports the data to userland. * * CRC errors on data are reported but passed through, * but the data must be washed by the user program. * * If userland just wants the btree records it can * request that bulk data not be returned. This is * use during mirror-stream histogram generation. */ mrec_flags = 0; data_len = (elm->data_offset) ? elm->data_len : 0; if (data_len && (mirror->head.flags & HAMMER_IOC_MIRROR_NODATA)) { data_len = 0; mrec_flags |= HAMMER_MRECF_NODATA; } if (data_len) { error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_DATA); if (error) { if (error != EDOM) break; mrec_flags |= HAMMER_MRECF_CRC_ERROR | HAMMER_MRECF_DATA_CRC_BAD; } } bytes = sizeof(mrec.rec) + data_len; if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size) break; /* * Construct the record for userland and copyout. * * The user is asking for a snapshot, if the record was * deleted beyond the user-requested ending tid, the record * is not considered deleted from the point of view of * userland and delete_tid is cleared. */ mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE; mrec.head.type = HAMMER_MREC_TYPE_REC | mrec_flags; mrec.head.rec_size = bytes; mrec.rec.leaf = *elm; if (elm->base.delete_tid > mirror->tid_end) mrec.rec.leaf.base.delete_tid = 0; rec_crc = crc32(&mrec.head.rec_size, sizeof(mrec.rec) - crc_start); if (data_len) rec_crc = crc32_ext(cursor.data, data_len, rec_crc); mrec.head.rec_crc = rec_crc; error = copyout(&mrec, uptr, sizeof(mrec.rec)); if (data_len && error == 0) { error = copyout(cursor.data, uptr + sizeof(mrec.rec), data_len); } eatdisk = 1; /* * eatdisk controls whether we skip the current cursor * position on the next scan or not. If doing a SKIP * the cursor is already positioned properly for the next * scan and eatdisk will be 0. */ didwrite: if (error == 0) { mirror->count += HAMMER_HEAD_DOALIGN(bytes); if (eatdisk) cursor.flags |= HAMMER_CURSOR_ATEDISK; else cursor.flags &= ~HAMMER_CURSOR_ATEDISK; error = hammer_btree_iterate(&cursor); } } if (error == ENOENT) { mirror->key_cur = mirror->key_end; error = 0; } hammer_done_cursor(&cursor); if (error == EDEADLK) goto retry; if (error == EINTR) { mirror->head.flags |= HAMMER_IOC_HEAD_INTR; error = 0; } failed: mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK; return(error); }
int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_prune *prune) { struct hammer_cursor cursor; hammer_btree_leaf_elm_t elm; struct hammer_ioc_prune_elm *copy_elms; struct hammer_ioc_prune_elm *user_elms; int error; int isdir; int elm_array_size; int seq; if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS) return(EINVAL); if ((prune->key_beg.localization | prune->key_end.localization) & HAMMER_LOCALIZE_PSEUDOFS_MASK) { return(EINVAL); } if (prune->key_beg.localization > prune->key_end.localization) return(EINVAL); if (prune->key_beg.localization == prune->key_end.localization) { if (prune->key_beg.obj_id > prune->key_end.obj_id) return(EINVAL); /* key-space limitations - no check needed */ } if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms) return(EINVAL); /* 22 EINVAL */ prune->key_cur.localization = (prune->key_end.localization & HAMMER_LOCALIZE_MASK) + ip->obj_localization; prune->key_cur.obj_id = prune->key_end.obj_id; prune->key_cur.key = HAMMER_MAX_KEY; /* * Copy element array from userland */ elm_array_size = sizeof(*copy_elms) * prune->nelms; user_elms = prune->elms; copy_elms = kmalloc(elm_array_size, M_TEMP, M_WAITOK); if ((error = copyin(user_elms, copy_elms, elm_array_size)) != 0) goto failed; prune->elms = copy_elms; seq = trans->hmp->flusher.done; /* * Scan backwards. Retries typically occur if a deadlock is detected. */ retry: error = hammer_init_cursor(trans, &cursor, NULL, NULL); if (error) { hammer_done_cursor(&cursor); goto failed; } cursor.key_beg.localization = (prune->key_beg.localization & HAMMER_LOCALIZE_MASK) + ip->obj_localization; cursor.key_beg.obj_id = prune->key_beg.obj_id; cursor.key_beg.key = HAMMER_MIN_KEY; cursor.key_beg.create_tid = 1; cursor.key_beg.delete_tid = 0; cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE; cursor.key_beg.obj_type = 0; cursor.key_end.localization = prune->key_cur.localization; cursor.key_end.obj_id = prune->key_cur.obj_id; cursor.key_end.key = prune->key_cur.key; cursor.key_end.create_tid = HAMMER_MAX_TID - 1; cursor.key_end.delete_tid = 0; cursor.key_end.rec_type = HAMMER_MAX_RECTYPE; cursor.key_end.obj_type = 0; cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; cursor.flags |= HAMMER_CURSOR_BACKEND; /* * This flag allows the B-Tree code to clean up loose ends. At * the moment (XXX) it also means we have to hold the sync lock * through the iteration. */ cursor.flags |= HAMMER_CURSOR_PRUNING; hammer_sync_lock_sh(trans); error = hammer_btree_last(&cursor); hammer_sync_unlock(trans); while (error == 0) { /* * Check for work */ elm = &cursor.node->ondisk->elms[cursor.index].leaf; prune->key_cur = elm->base; /* * Yield to more important tasks */ if ((error = hammer_signal_check(trans->hmp)) != 0) break; if (prune->stat_oldest_tid > elm->base.create_tid) prune->stat_oldest_tid = elm->base.create_tid; if (hammer_debug_general & 0x0200) { kprintf("check %016llx %016llx cre=%016llx del=%016llx\n", (long long)elm->base.obj_id, (long long)elm->base.key, (long long)elm->base.create_tid, (long long)elm->base.delete_tid); } if (prune_should_delete(prune, elm)) { if (hammer_debug_general & 0x0200) { kprintf("check %016llx %016llx: DELETE\n", (long long)elm->base.obj_id, (long long)elm->base.key); } /* * NOTE: This can return EDEADLK * * Acquiring the sync lock guarantees that the * operation will not cross a synchronization * boundary (see the flusher). * * We dont need to track inodes or next_tid when * we are destroying deleted records. */ isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY); hammer_sync_lock_sh(trans); error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, cursor.trans->tid, cursor.trans->time32, 0, &prune->stat_bytes); hammer_sync_unlock(trans); if (error) break; if (isdir) ++prune->stat_dirrecords; else ++prune->stat_rawrecords; /* * The current record might now be the one after * the one we deleted, set ATEDISK to force us * to skip it (since we are iterating backwards). */ cursor.flags |= HAMMER_CURSOR_ATEDISK; } else { /* * Nothing to delete, but we may have to check other * things. */ prune_check_nlinks(&cursor, elm); cursor.flags |= HAMMER_CURSOR_ATEDISK; if (hammer_debug_general & 0x0100) { kprintf("check %016llx %016llx: SKIP\n", (long long)elm->base.obj_id, (long long)elm->base.key); } } ++prune->stat_scanrecords; /* * WARNING: See warnings in hammer_unlock_cursor() function. */ while (hammer_flusher_meta_halflimit(trans->hmp) || hammer_flusher_undo_exhausted(trans, 2)) { hammer_unlock_cursor(&cursor); hammer_flusher_wait(trans->hmp, seq); hammer_lock_cursor(&cursor); seq = hammer_flusher_async_one(trans->hmp); } hammer_sync_lock_sh(trans); error = hammer_btree_iterate_reverse(&cursor); hammer_sync_unlock(trans); } if (error == ENOENT) error = 0; hammer_done_cursor(&cursor); if (error == EDEADLK) goto retry; if (error == EINTR) { prune->head.flags |= HAMMER_IOC_HEAD_INTR; error = 0; } failed: prune->key_cur.localization &= HAMMER_LOCALIZE_MASK; prune->elms = user_elms; kfree(copy_elms, M_TEMP); return(error); }
/* * Rollback the specified PFS to (trunc_tid - 1), removing everything * greater or equal to trunc_tid. The PFS must not have been in no-mirror * mode or the MIRROR_FILTERED scan will not work properly. * * This is typically used to remove any partial syncs when upgrading a * slave to a master. It can theoretically also be used to rollback * any PFS, including PFS#0, BUT ONLY TO POINTS THAT HAVE NOT YET BEEN * PRUNED, and to points that are older only if they are on a retained * (pruning softlink) boundary. * * Rollbacks destroy information. If you don't mind inode numbers changing * a better way would be to cpdup a snapshot back onto the master. */ static int hammer_pfs_rollback(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm, hammer_tid_t trunc_tid) { struct hammer_cmirror cmirror; struct hammer_cursor cursor; struct hammer_base_elm key_cur; int error; int seq; bzero(&cmirror, sizeof(cmirror)); bzero(&key_cur, sizeof(key_cur)); key_cur.localization = HAMMER_MIN_LOCALIZATION | pfsm->localization; key_cur.obj_id = HAMMER_MIN_OBJID; key_cur.key = HAMMER_MIN_KEY; key_cur.create_tid = 1; key_cur.rec_type = HAMMER_MIN_RECTYPE; seq = trans->hmp->flusher.done; retry: error = hammer_init_cursor(trans, &cursor, NULL, NULL); if (error) { hammer_done_cursor(&cursor); goto failed; } cursor.key_beg = key_cur; cursor.key_end.localization = HAMMER_MAX_LOCALIZATION | pfsm->localization; cursor.key_end.obj_id = HAMMER_MAX_OBJID; cursor.key_end.key = HAMMER_MAX_KEY; cursor.key_end.create_tid = HAMMER_MAX_TID; cursor.key_end.rec_type = HAMMER_MAX_RECTYPE; cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; cursor.flags |= HAMMER_CURSOR_BACKEND; /* * Do an optimized scan of only records created or modified * >= trunc_tid, so we can fix up those records. We must * still check the TIDs but this greatly reduces the size of * the scan. */ cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED; cursor.cmirror = &cmirror; cmirror.mirror_tid = trunc_tid; error = hammer_btree_first(&cursor); while (error == 0) { /* * Abort the rollback. */ if (error == 0) { error = hammer_signal_check(trans->hmp); if (error) break; } /* * We only care about leafs. Internal nodes can be returned * in mirror-filtered mode (they are used to generate SKIP * mrecords), but we don't need them for this code. * * WARNING: See warnings in hammer_unlock_cursor() function. */ cursor.flags |= HAMMER_CURSOR_ATEDISK; if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF) { key_cur = cursor.node->ondisk->elms[cursor.index].base; error = hammer_pfs_delete_at_cursor(&cursor, trunc_tid); } while (hammer_flusher_meta_halflimit(trans->hmp) || hammer_flusher_undo_exhausted(trans, 2)) { hammer_unlock_cursor(&cursor); hammer_flusher_wait(trans->hmp, seq); hammer_lock_cursor(&cursor); seq = hammer_flusher_async_one(trans->hmp); } if (error == 0) error = hammer_btree_iterate(&cursor); } if (error == ENOENT) error = 0; hammer_done_cursor(&cursor); if (error == EDEADLK) goto retry; failed: return(error); }
int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_reblock *reblock) { struct hammer_cursor cursor; hammer_btree_elm_t elm; int checkspace_count; int error; int seq; int slop; /* * A fill level <= 20% is considered an emergency. free_level is * inverted from fill_level. */ if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10) slop = HAMMER_CHKSPC_EMERGENCY; else slop = HAMMER_CHKSPC_REBLOCK; if ((reblock->key_beg.localization | reblock->key_end.localization) & HAMMER_LOCALIZE_PSEUDOFS_MASK) { return(EINVAL); } if (reblock->key_beg.obj_id >= reblock->key_end.obj_id) return(EINVAL); if (reblock->free_level < 0) return(EINVAL); reblock->key_cur = reblock->key_beg; reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK; reblock->key_cur.localization += ip->obj_localization; checkspace_count = 0; seq = trans->hmp->flusher.done; retry: error = hammer_init_cursor(trans, &cursor, NULL, NULL); if (error) { hammer_done_cursor(&cursor); goto failed; } cursor.key_beg.localization = reblock->key_cur.localization; cursor.key_beg.obj_id = reblock->key_cur.obj_id; cursor.key_beg.key = HAMMER_MIN_KEY; cursor.key_beg.create_tid = 1; cursor.key_beg.delete_tid = 0; cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE; cursor.key_beg.obj_type = 0; cursor.key_end.localization = (reblock->key_end.localization & HAMMER_LOCALIZE_MASK) + ip->obj_localization; cursor.key_end.obj_id = reblock->key_end.obj_id; cursor.key_end.key = HAMMER_MAX_KEY; cursor.key_end.create_tid = HAMMER_MAX_TID - 1; cursor.key_end.delete_tid = 0; cursor.key_end.rec_type = HAMMER_MAX_RECTYPE; cursor.key_end.obj_type = 0; cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; cursor.flags |= HAMMER_CURSOR_BACKEND; cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE; /* * This flag allows the btree scan code to return internal nodes, * so we can reblock them in addition to the leafs. Only specify it * if we intend to reblock B-Tree nodes. */ if (reblock->head.flags & HAMMER_IOC_DO_BTREE) cursor.flags |= HAMMER_CURSOR_REBLOCKING; error = hammer_btree_first(&cursor); while (error == 0) { /* * Internal or Leaf node */ KKASSERT(cursor.index < cursor.node->ondisk->count); elm = &cursor.node->ondisk->elms[cursor.index]; reblock->key_cur.obj_id = elm->base.obj_id; reblock->key_cur.localization = elm->base.localization; /* * Yield to more important tasks */ if ((error = hammer_signal_check(trans->hmp)) != 0) break; /* * If there is insufficient free space it may be due to * reserved bigblocks, which flushing might fix. * * We must force a retest in case the unlocked cursor is * moved to the end of the leaf, or moved to an internal * node. * * WARNING: See warnings in hammer_unlock_cursor() function. */ if (hammer_checkspace(trans->hmp, slop)) { if (++checkspace_count == 10) { error = ENOSPC; break; } hammer_unlock_cursor(&cursor); cursor.flags |= HAMMER_CURSOR_RETEST; hammer_flusher_wait(trans->hmp, seq); hammer_lock_cursor(&cursor); seq = hammer_flusher_async(trans->hmp, NULL); goto skip; } /* * Acquiring the sync_lock prevents the operation from * crossing a synchronization boundary. * * NOTE: cursor.node may have changed on return. * * WARNING: See warnings in hammer_unlock_cursor() function. */ hammer_sync_lock_sh(trans); error = hammer_reblock_helper(reblock, &cursor, elm); hammer_sync_unlock(trans); while (hammer_flusher_meta_halflimit(trans->hmp) || hammer_flusher_undo_exhausted(trans, 2)) { hammer_unlock_cursor(&cursor); hammer_flusher_wait(trans->hmp, seq); hammer_lock_cursor(&cursor); seq = hammer_flusher_async_one(trans->hmp); } /* * Setup for iteration, our cursor flags may be modified by * other threads while we are unlocked. */ cursor.flags |= HAMMER_CURSOR_ATEDISK; /* * We allocate data buffers, which atm we don't track * dirty levels for because we allow the kernel to write * them. But if we allocate too many we can still deadlock * the buffer cache. * * WARNING: See warnings in hammer_unlock_cursor() function. * (The cursor's node and element may change!) */ if (bd_heatup()) { hammer_unlock_cursor(&cursor); bwillwrite(HAMMER_XBUFSIZE); hammer_lock_cursor(&cursor); } /* XXX vm_wait_nominal(); */ skip: if (error == 0) { error = hammer_btree_iterate(&cursor); } } if (error == ENOENT) error = 0; hammer_done_cursor(&cursor); if (error == EWOULDBLOCK) { hammer_flusher_sync(trans->hmp); goto retry; } if (error == EDEADLK) goto retry; if (error == EINTR) { reblock->head.flags |= HAMMER_IOC_HEAD_INTR; error = 0; } failed: reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK; return(error); }