static void mtdblock_release(struct mtd_blktrans_dev *mbd) { struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); pr_debug("mtdblock_release\n"); mutex_lock(&mtdblks_lock); mutex_lock(&mtdblk->cache_mutex); write_cached_data(mtdblk); mutex_unlock(&mtdblk->cache_mutex); if (!--mtdblk->count) { /* * It was the last usage. Free the cache, but only sync if * opened for writing. */ if (mbd->file_mode & FMODE_WRITE) mtd_sync(mbd->mtd); vfree(mtdblk->cache_data); } mutex_unlock(&mtdblks_lock); pr_debug("ok\n"); }
static void jffs2_put_super (struct super_block *sb) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); jffs2_dbg(2, "%s()\n", __func__); if (sb->s_dirt) jffs2_write_super(sb); mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); jffs2_sum_exit(c); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else kfree(c->blocks); jffs2_flash_cleanup(c); kfree(c->inocache_list); jffs2_clear_xattr_subsystem(c); mtd_sync(c->mtd); jffs2_dbg(1, "%s(): returning\n", __func__); }
static int mtdblock_flush(struct mtd_blktrans_dev *dev) { struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); mutex_lock(&mtdblk->cache_mutex); write_cached_data(mtdblk); mutex_unlock(&mtdblk->cache_mutex); mtd_sync(dev->mtd); return 0; }
static void concat_sync(struct mtd_info *mtd) { struct mtd_concat *concat = CONCAT(mtd); int i; for (i = 0; i < concat->num_subdev; i++) { struct mtd_info *subdev = concat->subdev[i]; mtd_sync(subdev); } }
/** * ubi_sync - synchronize UBI device buffers. * @ubi_num: UBI device to synchronize * * The underlying MTD device may cache data in hardware or in software. This * function ensures the caches are flushed. Returns zero in case of success and * a negative error code in case of failure. */ int ubi_sync(int ubi_num) { struct ubi_device *ubi; ubi = ubi_get_device(ubi_num); if (!ubi) return -ENODEV; mtd_sync(ubi->mtd); ubi_put_device(ubi); return 0; }
s32 nv_emmc_remove(const s8* path) { s32 ret = -1; struct nv_emmc_file_header_stru* fd = NULL; struct erase_info erase = {0,}; struct mtd_info* mtd = NULL; u32 i = 0; nv_file_debug(NV_FILE_REMOVE_API,0,0,0,0); for(i=0;i<NV_FILE_BUTT;i++) { if(0 == strcmp(path,g_nv_file[i].path)) { fd = &g_nv_file[i]; break; } } if(NULL == fd) { nv_file_debug(NV_FILE_REMOVE_API,1,0,0,0); return -1; } switch(fd->emmc_type) { case NV_FILE_DLOAD: g_emmc_info.nv_dload.nv_bin.magic_num = NV_FLASH_NULL; break; case NV_FILE_BACKUP: memset(&g_emmc_info.bak_sec,NV_FLASH_FILL,sizeof(struct nv_file_info_stru)); goto flash_erase; case NV_FILE_CUST_CARD_1: g_emmc_info.nv_dload.cust_xml[0].magic_num = NV_FLASH_NULL; break; case NV_FILE_XNV_CARD_1: g_emmc_info.nv_dload.xnv_xml[0].magic_num = NV_FLASH_NULL; break; case NV_FILE_CUST_CARD_2: g_emmc_info.nv_dload.cust_xml[1].magic_num = NV_FLASH_NULL; break; case NV_FILE_XNV_CARD_2: g_emmc_info.nv_dload.xnv_xml[1].magic_num = NV_FLASH_NULL; break; case NV_FILE_XNV_MAP_CARD_1: g_emmc_info.nv_dload.xnv_map[0].magic_num = NV_FLASH_NULL; break; case NV_FILE_XNV_MAP_CARD_2: g_emmc_info.nv_dload.xnv_map[1].magic_num = NV_FLASH_NULL; break; case NV_FILE_DEFAULT: memset(&g_emmc_info.def_sec,NV_FLASH_FILL,sizeof(struct nv_file_info_stru)); goto flash_erase; case NV_FILE_SYS_NV: memset(&g_emmc_info.sys_nv,NV_FLASH_FILL,sizeof(g_emmc_info.sys_nv)); goto flash_erase; default: return BSP_ERR_NV_INVALID_PARAM; } if(true == nv_dload_exist_file()) { return NV_OK; } flash_erase: mtd = get_mtd_device_nm(fd->name); if(IS_ERR(mtd)) { printf("[%s]:get mtd device err! %s\n",__func__,fd->name); return -1; } erase.addr = 0; erase.mtd = mtd; erase.len = mtd->size; erase.callback = NULL; erase.priv = 0; erase.time = 10000; erase.retries = 2; ret = mtd_erase(mtd,&erase); mtd_sync(mtd); put_mtd_device(mtd); if(ret) { nv_file_debug(NV_FILE_REMOVE_API,2,(u32)ret,fd->emmc_type,0); printf("[%s]:ret 0x%x,mtd->name %s\n",__func__,ret,mtd->name); return ret; } return NV_OK; }
static void part_sync(struct mtd_info *mtd) { struct mtd_part *part = PART(mtd); mtd_sync(part->master); }