static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector, int len, const char *buf) { struct ubi_volume_desc *uv = ubiblk->uv; int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size; unsigned short sectors_per_page = uv->vol->ubi->min_io_size >> 9; unsigned short page_shift = ffs(uv->vol->ubi->min_io_size) - 1; unsigned short virt_block, page, page_offset; unsigned long virt_page; virt_page = sector / sectors_per_page; page_offset = sector % sectors_per_page; virt_block = virt_page / ppb; page = virt_page % ppb; if(ubi_is_mapped(uv, virt_block ) == UNMAPPED ){ mutex_lock(&ubiblk->cache_mutex); ubiblk_flush_writecache(ubiblk); mutex_unlock(&ubiblk->cache_mutex); ubiblk_setup_writecache(ubiblk, virt_block); ubi_leb_map(uv, virt_block, UBI_UNKNOWN); } else { if ( STATE_USED == ubiblk->write_cache_state ) { if ( ubiblk->vbw != virt_block) { // Commit before we start a new cache. mutex_lock(&ubiblk->cache_mutex); ubiblk_flush_writecache(ubiblk); mutex_unlock(&ubiblk->cache_mutex); ubiblk_setup_writecache(ubiblk, virt_block); ubi_leb_unmap(uv, virt_block); ubi_leb_map(uv, virt_block, UBI_UNKNOWN); } else { //dprintk("cache hit: 0x%x\n", virt_page); } } else { // printk("with existing mapping\n"); ubiblk_setup_writecache(ubiblk, virt_block); ubi_leb_unmap(uv, virt_block); ubi_leb_map(uv, virt_block, UBI_UNKNOWN); } } memcpy(&ubiblk->write_cache[(page<<page_shift) +(page_offset<<9)], buf,len); return 0; }
int ubifs_leb_map(struct ubifs_info *c, int lnum) { int err; ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_map(c->ubi, lnum); else err = dbg_leb_map(c, lnum); if (err) { ubifs_err("mapping LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dbg_dump_stack(); } return err; }
/** * ubifs_add_bud_to_log - add a new bud to the log. * @c: UBIFS file-system description object * @jhead: journal head the bud belongs to * @lnum: LEB number of the bud * @offs: starting offset of the bud * * This function writes reference node for the new bud LEB @lnum it to the log, * and adds it to the buds tress. It also makes sure that log size does not * exceed the 'c->max_bud_bytes' limit. Returns zero in case of success, * %-EAGAIN if commit is required, and a negative error codes in case of * failure. */ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) { int err; struct ubifs_bud *bud; struct ubifs_ref_node *ref; bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS); if (!bud) return -ENOMEM; ref = kzalloc(c->ref_node_alsz, GFP_NOFS); if (!ref) { kfree(bud); return -ENOMEM; } mutex_lock(&c->log_mutex); ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) { err = -EROFS; goto out_unlock; } /* Make sure we have enough space in the log */ if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) { dbg_log("not enough log space - %lld, required %d", empty_log_bytes(c), c->min_log_bytes); ubifs_commit_required(c); err = -EAGAIN; goto out_unlock; } /* * Make sure the amount of space in buds will not exceed the * 'c->max_bud_bytes' limit, because we want to guarantee mount time * limits. * * It is not necessary to hold @c->buds_lock when reading @c->bud_bytes * because we are holding @c->log_mutex. All @c->bud_bytes take place * when both @c->log_mutex and @c->bud_bytes are locked. */ if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { dbg_log("bud bytes %lld (%lld max), require commit", c->bud_bytes, c->max_bud_bytes); ubifs_commit_required(c); err = -EAGAIN; goto out_unlock; } /* * If the journal is full enough - start background commit. Note, it is * OK to read 'c->cmt_state' without spinlock because integer reads * are atomic in the kernel. */ if (c->bud_bytes >= c->bg_bud_bytes && c->cmt_state == COMMIT_RESTING) { dbg_log("bud bytes %lld (%lld max), initiate BG commit", c->bud_bytes, c->max_bud_bytes); ubifs_request_bg_commit(c); } bud->lnum = lnum; bud->start = offs; bud->jhead = jhead; ref->ch.node_type = UBIFS_REF_NODE; ref->lnum = cpu_to_le32(bud->lnum); ref->offs = cpu_to_le32(bud->start); ref->jhead = cpu_to_le32(jhead); if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c->lhead_offs = 0; } if (c->lhead_offs == 0) { /* Must ensure next log LEB has been unmapped */ err = ubifs_leb_unmap(c, c->lhead_lnum); if (err) goto out_unlock; } if (bud->start == 0) { /* * Before writing the LEB reference which refers an empty LEB * to the log, we have to make sure it is mapped, because * otherwise we'd risk to refer an LEB with garbage in case of * an unclean reboot, because the target LEB might have been * unmapped, but not yet physically erased. */ err = ubi_leb_map(c->ubi, bud->lnum, UBI_SHORTTERM); if (err) goto out_unlock; } dbg_log("write ref LEB %d:%d", c->lhead_lnum, c->lhead_offs); err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, c->lhead_offs, UBI_SHORTTERM); if (err) goto out_unlock; c->lhead_offs += c->ref_node_alsz; ubifs_add_bud(c, bud); mutex_unlock(&c->log_mutex); kfree(ref); return 0; out_unlock: if (err != -EAGAIN) ubifs_ro_mode(c, err); mutex_unlock(&c->log_mutex); kfree(ref); kfree(bud); return err; }