/** * chfs_remap_leb - unmap and then map a leb * @chmp: chfs mount structure * * This function gets an eraseblock from the erasable queue, unmaps it through * EBH and maps another eraseblock to the same LNR. * EBH will find a free eraseblock if any or will erase one if there isn't any * free, just dirty block. * * Returns zero on case of success, errorcode otherwise. * * Needs more brainstorming here. */ int chfs_remap_leb(struct chfs_mount *chmp) { int err; struct chfs_eraseblock *cheb; dbg("chfs_remap_leb\n"); uint32_t dirty, unchecked, used, free, wasted; //dbg("chmp->chm_nr_erasable_blocks: %d\n", chmp->chm_nr_erasable_blocks); //dbg("ltree: %p ecl: %p\n", &chmp->chm_ebh->ltree_lock, &chmp->chm_lock_sizes); KASSERT(!rw_write_held(&chmp->chm_lock_wbuf)); KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); KASSERT(mutex_owned(&chmp->chm_lock_sizes)); if (!chmp->chm_nr_erasable_blocks) { //TODO /* We don't have any erasable blocks, need to check if there are * blocks on erasable_pending_wbuf_queue, flush the data and then * we can remap it. * If there aren't any blocks on that list too, we need to GC? */ if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) { cheb = TAILQ_FIRST(&chmp->chm_erasable_pending_wbuf_queue); TAILQ_REMOVE(&chmp->chm_erasable_pending_wbuf_queue, cheb, queue); if (chmp->chm_wbuf_len) { mutex_exit(&chmp->chm_lock_sizes); chfs_flush_pending_wbuf(chmp); mutex_enter(&chmp->chm_lock_sizes); } TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue, cheb, queue); chmp->chm_nr_erasable_blocks++; } else { /* We can't delete any block. */ //FIXME should we return ENOSPC? return ENOSPC; } } cheb = TAILQ_FIRST(&chmp->chm_erase_pending_queue); TAILQ_REMOVE(&chmp->chm_erase_pending_queue, cheb, queue); chmp->chm_nr_erasable_blocks--; dirty = cheb->dirty_size; unchecked = cheb->unchecked_size; used = cheb->used_size; free = cheb->free_size; wasted = cheb->wasted_size; // Free allocated node references for this eraseblock chfs_free_node_refs(cheb); err = chfs_unmap_leb(chmp, cheb->lnr); if (err) return err; err = chfs_map_leb(chmp, cheb->lnr); if (err) return err; // Reset state to default and change chmp sizes too chfs_change_size_dirty(chmp, cheb, -dirty); chfs_change_size_unchecked(chmp, cheb, -unchecked); chfs_change_size_used(chmp, cheb, -used); chfs_change_size_free(chmp, cheb, chmp->chm_ebh->eb_size - free); chfs_change_size_wasted(chmp, cheb, -wasted); KASSERT(cheb->dirty_size == 0); KASSERT(cheb->unchecked_size == 0); KASSERT(cheb->used_size == 0); KASSERT(cheb->free_size == chmp->chm_ebh->eb_size); KASSERT(cheb->wasted_size == 0); cheb->first_node = NULL; cheb->last_node = NULL; //put it to free_queue TAILQ_INSERT_TAIL(&chmp->chm_free_queue, cheb, queue); chmp->chm_nr_free_blocks++; dbg("remaped (free: %d, erasable: %d)\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks); KASSERT(!TAILQ_EMPTY(&chmp->chm_free_queue)); return 0; }
/* chfs_scan_check_vnode - check vnode crc and add it to vnode cache */ int chfs_scan_check_vnode(struct chfs_mount *chmp, struct chfs_eraseblock *cheb, void *buf, off_t ofs) { KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); struct chfs_vnode_cache *vc; struct chfs_flash_vnode *vnode = buf; struct chfs_node_ref *nref; int err; uint32_t crc; ino_t vno; crc = crc32(0, (uint8_t *)vnode, sizeof(struct chfs_flash_vnode) - 4); /* check node crc */ if (crc != le32toh(vnode->node_crc)) { err = chfs_update_eb_dirty(chmp, cheb, le32toh(vnode->length)); if (err) { return err; } return CHFS_NODE_BADCRC; } vno = le64toh(vnode->vno); /* find the corresponding vnode cache */ mutex_enter(&chmp->chm_lock_vnocache); vc = chfs_vnode_cache_get(chmp, vno); if (!vc) { vc = chfs_scan_make_vnode_cache(chmp, vno); if (!vc) { mutex_exit(&chmp->chm_lock_vnocache); return ENOMEM; } } nref = chfs_alloc_node_ref(cheb); nref->nref_offset = ofs; KASSERT(nref->nref_lnr == cheb->lnr); /* check version of vnode */ if ((struct chfs_vnode_cache *)vc->v != vc) { if (le64toh(vnode->version) > *vc->vno_version) { *vc->vno_version = le64toh(vnode->version); chfs_add_vnode_ref_to_vc(chmp, vc, nref); } else { err = chfs_update_eb_dirty(chmp, cheb, sizeof(struct chfs_flash_vnode)); return CHFS_NODE_OK; } } else { vc->vno_version = kmem_alloc(sizeof(uint64_t), KM_SLEEP); if (!vc->vno_version) return ENOMEM; *vc->vno_version = le64toh(vnode->version); chfs_add_vnode_ref_to_vc(chmp, vc, nref); } mutex_exit(&chmp->chm_lock_vnocache); /* update sizes */ mutex_enter(&chmp->chm_lock_sizes); chfs_change_size_free(chmp, cheb, -le32toh(vnode->length)); chfs_change_size_used(chmp, cheb, le32toh(vnode->length)); mutex_exit(&chmp->chm_lock_sizes); KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size); KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size); return CHFS_NODE_OK; }
/* * chfs_check_td_data - checks the data CRC of the node * * Returns: 0 - if everything OK; * 1 - if CRC is incorrect; * 2 - else; * error code if an error occured. */ int chfs_check_td_data(struct chfs_mount *chmp, struct chfs_tmp_dnode *td) { int err; size_t retlen, len, totlen; uint32_t crc; uint64_t ofs; char *buf; struct chfs_node_ref *nref = td->node->nref; KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); KASSERT(!mutex_owned(&chmp->chm_lock_sizes)); ofs = CHFS_GET_OFS(nref->nref_offset) + sizeof(struct chfs_flash_data_node); len = td->node->size; if (!len) return 0; /* Read data. */ buf = kmem_alloc(len, KM_SLEEP); if (!buf) { dbg("allocating error\n"); return 2; } err = chfs_read_leb(chmp, nref->nref_lnr, buf, ofs, len, &retlen); if (err) { dbg("error while reading: %d\n", err); err = 2; goto out; } /* Check crc. */ if (len != retlen) { dbg("len:%zu, retlen:%zu\n", len, retlen); err = 2; goto out; } crc = crc32(0, (uint8_t *)buf, len); if (crc != td->data_crc) { dbg("crc failed, calculated: 0x%x, orig: 0x%x\n", crc, td->data_crc); kmem_free(buf, len); return 1; } /* Correct sizes. */ CHFS_MARK_REF_NORMAL(nref); totlen = CHFS_PAD(sizeof(struct chfs_flash_data_node) + len); mutex_enter(&chmp->chm_lock_sizes); chfs_change_size_unchecked(chmp, &chmp->chm_blocks[nref->nref_lnr], -totlen); chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen); mutex_exit(&chmp->chm_lock_sizes); KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size); err = 0; out: kmem_free(buf, len); return err; }