/* * Retrieve the PFS hammer cleanup utility config record. This is * different (newer than) the PFS config. * * This is kinda a hack. */ static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_config *config) { struct hammer_btree_leaf_elm leaf; struct hammer_cursor cursor; hammer_mount_t hmp = ip->hmp; int error; again: error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); if (error) { hammer_done_cursor(&cursor); return(error); } bzero(&leaf, sizeof(leaf)); leaf.base.obj_id = HAMMER_OBJID_ROOT; leaf.base.rec_type = HAMMER_RECTYPE_CONFIG; leaf.base.create_tid = hammer_alloc_tid(hmp, 1); leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; leaf.base.key = 0; /* page 0 */ leaf.data_len = sizeof(struct hammer_config_data); cursor.key_beg = leaf.base; cursor.asof = HAMMER_MAX_TID; cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; error = hammer_btree_lookup(&cursor); if (error == 0) { error = hammer_btree_extract_data(&cursor); error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 0, 0, 0, NULL); if (error == EDEADLK) { hammer_done_cursor(&cursor); goto again; } } if (error == ENOENT) error = 0; if (error == 0) { /* * NOTE: Must reload key_beg after an ASOF search because * the create_tid may have been modified during the * search. */ cursor.flags &= ~HAMMER_CURSOR_ASOF; cursor.key_beg = leaf.base; error = hammer_create_at_cursor(&cursor, &leaf, &config->config, HAMMER_CREATE_MODE_SYS); if (error == EDEADLK) { hammer_done_cursor(&cursor); goto again; } } config->head.error = error; hammer_done_cursor(&cursor); return(0); }
/* * Handle B-Tree records. * * We must iterate to mrec->base.key (non-inclusively), and then process * the record. We are allowed to write a new record or delete an existing * record, but cannot replace an existing record. * * mirror->key_cur must be carefully set when we succeed in processing * this mrec. */ static int hammer_ioc_mirror_write_rec(hammer_cursor_t cursor, struct hammer_ioc_mrecord_rec *mrec, struct hammer_ioc_mirror_rw *mirror, u_int32_t localization, char *uptr) { int error; if (mrec->leaf.data_len < 0 || mrec->leaf.data_len > HAMMER_XBUFSIZE || mrec->leaf.data_len + sizeof(*mrec) > mrec->head.rec_size) { return(EINVAL); } /* * Re-localize for target. relocalization of data is handled * by hammer_mirror_write(). */ mrec->leaf.base.localization &= HAMMER_LOCALIZE_MASK; mrec->leaf.base.localization += localization; /* * Delete records through until we reach (non-inclusively) the * target record. */ cursor->key_end = mrec->leaf.base; cursor->flags &= ~HAMMER_CURSOR_END_INCLUSIVE; cursor->flags |= HAMMER_CURSOR_BACKEND; error = hammer_mirror_delete_to(cursor, mirror); /* * Certain records are not part of the mirroring operation */ if (error == 0 && hammer_mirror_nomirror(&mrec->leaf.base)) return(0); /* * Locate the record. * * If the record exists only the delete_tid may be updated. * * If the record does not exist we can create it only if the * create_tid is not too old. If the create_tid is too old * it may have already been destroyed on the slave from pruning. * * Note that mirror operations are effectively as-of operations * and delete_tid can be 0 for mirroring purposes even if it is * not actually 0 at the originator. * * These functions can return EDEADLK */ if (error == 0) { cursor->key_beg = mrec->leaf.base; cursor->flags |= HAMMER_CURSOR_BACKEND; cursor->flags &= ~HAMMER_CURSOR_INSERT; error = hammer_btree_lookup(cursor); } if (error == 0 && hammer_mirror_check(cursor, mrec)) { error = hammer_mirror_update(cursor, mrec); } else if (error == ENOENT) { if (mrec->leaf.base.create_tid >= mirror->tid_beg) { error = hammer_create_at_cursor( cursor, &mrec->leaf, uptr, HAMMER_CREATE_MODE_UMIRROR); } else { error = 0; } } if (error == 0 || error == EALREADY) mirror->key_cur = mrec->leaf.base; return(error); }
/* * Add a snapshot transaction id(s) to the list of snapshots. * * NOTE: Records are created with an allocated TID. If a flush cycle * is in progress the record may be synced in the current flush * cycle and the volume header will reflect the allocation of the * TID, but the synchronization point may not catch up to the * TID until the next flush cycle. */ static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_snapshot *snap) { hammer_mount_t hmp = ip->hmp; struct hammer_btree_leaf_elm leaf; struct hammer_cursor cursor; int error; /* * Validate structure */ if (snap->count > HAMMER_SNAPS_PER_IOCTL) return (EINVAL); if (snap->index >= snap->count) return (EINVAL); hammer_lock_ex(&hmp->snapshot_lock); again: /* * Look for keys starting after the previous iteration, or at * the beginning if snap->count is 0. */ error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); if (error) { hammer_done_cursor(&cursor); return(error); } cursor.asof = HAMMER_MAX_TID; cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; bzero(&leaf, sizeof(leaf)); leaf.base.obj_id = HAMMER_OBJID_ROOT; leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT; leaf.base.create_tid = hammer_alloc_tid(hmp, 1); leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; leaf.data_len = sizeof(struct hammer_snapshot_data); while (snap->index < snap->count) { leaf.base.key = (int64_t)snap->snaps[snap->index].tid; cursor.key_beg = leaf.base; error = hammer_btree_lookup(&cursor); if (error == 0) { error = EEXIST; break; } /* * NOTE: Must reload key_beg after an ASOF search because * the create_tid may have been modified during the * search. */ cursor.flags &= ~HAMMER_CURSOR_ASOF; cursor.key_beg = leaf.base; error = hammer_create_at_cursor(&cursor, &leaf, &snap->snaps[snap->index], HAMMER_CREATE_MODE_SYS); if (error == EDEADLK) { hammer_done_cursor(&cursor); goto again; } cursor.flags |= HAMMER_CURSOR_ASOF; if (error) break; ++snap->index; } snap->head.error = error; hammer_done_cursor(&cursor); hammer_unlock(&hmp->snapshot_lock); return(0); }