// // To trancate the file, this is the last block to be trancated. // We need to discard one or more pages within this block, hence requires 'block recover'. // static URET do_TruncateInternalWithBlockRecover(uffs_Object *obj, u16 fdn, u32 remain, RunOptionE run_opt) { uffs_Device *dev = obj->dev; TreeNode *fnode = obj->node; u16 page_id, max_page_id; TreeNode *node; uffs_Buf *buf = NULL; u8 type; u32 block_start, block_offset; u16 parent, serial; int slot; uffs_BlockInfo *bc = NULL; int block = -1; if (fdn == 0) { node = fnode; type = UFFS_TYPE_FILE; max_page_id = obj->head_pages; block_start = 0; parent = node->u.file.parent; serial = node->u.file.serial; block = node->u.file.block; } else { node = uffs_TreeFindDataNode(dev, fnode->u.file.serial, fdn); if (node == NULL) { obj->err = UEIOERR; uffs_Perror(UFFS_MSG_SERIOUS, "can't find data node when truncate obj"); goto ext; } block = node->u.data.block; type = UFFS_TYPE_DATA; max_page_id = dev->attr->pages_per_block - 1; block_start = obj->head_pages * dev->com.pg_data_size + (fdn - 1) * dev->com.pg_data_size * dev->attr->pages_per_block; parent = node->u.data.parent; serial = node->u.data.serial; } if (run_opt == eDRY_RUN) { // checking the buffer. this is the main reason why we need the 'dry run' mode. for (page_id = 0; page_id <= max_page_id; page_id++) { buf = uffs_BufFind(dev, parent, serial, page_id); if (buf) { //!< ok, the buffer was loaded before ... if (uffs_BufIsFree(buf) == U_FALSE) { obj->err = UEEXIST; break; //!< and someone is still holding the buffer, // can't truncate it !!! } } } buf = NULL; goto ext; } // find the last page *after* truncate block_offset = remain - block_start; page_id = block_offset / dev->com.pg_data_size; if (fdn == 0) page_id++; if (!uffs_Assert(page_id <= max_page_id, "fdn = %d, block_start = %d, remain = %d\n", fdn, block_start, remain)) { obj->err = UEUNKNOWN_ERR; goto ext; } // flush buffer before performing block recovery uffs_BufFlushGroup(dev, parent, serial); // load the last page buf = uffs_BufGetEx(dev, type, node, page_id, obj->oflag); if (buf == NULL) { obj->err = UENOMEM; uffs_Perror(UFFS_MSG_SERIOUS, "Can't get buf"); goto ext; } uffs_BufWrite(dev, buf, NULL, 0, 0); // just make this buf dirty // lock the group slot = uffs_BufFindGroupSlot(dev, parent, serial); uffs_BufLockGroup(dev, slot); if (remain == 0) // remain == 0: means discard all data in this block. buf->data_len = 0; else { remain = (remain % dev->com.pg_data_size); // remain == 0: means that we need to keep all data in this page. buf->data_len = (remain == 0 ? dev->com.pg_data_size : remain); } /* mark this buf as UFFS_BUF_EXT_MARK_TRUNC_TAIL, when flushing dirty buffers, UFFS will not do page recover for pages after this buf page id (because this file is ended at this page) */ buf->ext_mark |= UFFS_BUF_EXT_MARK_TRUNC_TAIL; uffs_BufPut(dev, buf); // invalidate the rest page buf page_id++; for (; page_id <= max_page_id; page_id++) { buf = uffs_BufFind(dev, parent, serial, page_id); if (buf) uffs_BufMarkEmpty(dev, buf); } // flush dirty buffer immediately, forcing block recovery. uffs_BufFlushGroupEx(dev, parent, serial, U_TRUE); // unlock the group uffs_BufUnLockGroup(dev, slot); // Invalidate block info cache for the 'old' block bc = uffs_BlockInfoGet(dev, block); if (bc) { uffs_BlockInfoExpire(dev, bc, UFFS_ALL_PAGES); uffs_BlockInfoPut(dev, bc); } ext: return (obj->err == UENOERR ? U_SUCC : U_FAIL); }
static URET do_TruncateInternalWithBlockRecover(uffs_Object *obj, u16 fdn, u32 remain, UBOOL dry_run) { uffs_Device *dev = obj->dev; TreeNode *fnode = obj->node; u16 page_id, max_page_id; TreeNode *node; uffs_Buf *buf = NULL; u8 type; u32 block_start; u16 parent, serial; int slot; if (fdn == 0) { node = fnode; type = UFFS_TYPE_FILE; max_page_id = obj->head_pages; block_start = 0; parent = node->u.file.parent; serial = node->u.file.serial; } else { node = uffs_TreeFindDataNode(dev, fnode->u.file.serial, fdn); if (node == NULL) { obj->err = UEIOERR; uffs_Perror(UFFS_ERR_SERIOUS, "can't find data node when truncate obj"); goto ext; } type = UFFS_TYPE_DATA; max_page_id = dev->attr->pages_per_block - 1; block_start = obj->head_pages * dev->com.pg_data_size + (fdn - 1) * dev->com.pg_data_size * dev->attr->pages_per_block; parent = node->u.data.parent; serial = node->u.data.serial; } if (dry_run == U_TRUE) { // checking the buffer. this is the main reason why we need the 'dry run' mode. for (page_id = 0; page_id <= max_page_id; page_id++) { buf = uffs_BufFind(dev, parent, serial, page_id); if (buf) { //!< ok, the buffer was loaded before ... if (uffs_BufIsFree(buf) == U_FALSE) { obj->err = UEEXIST; break; //!< and someone is still holding the buffer, can't truncate it !!! } } } buf = NULL; goto ext; } // find the last page after truncate for (page_id = (fdn == 0 ? 1 : 0); page_id <= max_page_id; page_id++) { if (block_start + (page_id + 1) * dev->com.pg_data_size >= remain) break; } if (page_id > max_page_id) { obj->err = UEUNKNOWN; uffs_Perror(UFFS_ERR_SERIOUS, "Overflow"); goto ext; } // flush buffer before performing block recovery uffs_BufFlushGroup(dev, parent, serial); // load the last page buf = uffs_BufGetEx(dev, type, node, page_id); if (buf == NULL) { obj->err = UENOMEM; uffs_Perror(UFFS_ERR_SERIOUS, "Can't get buf"); goto ext; } uffs_BufWrite(dev, buf, NULL, 0, 0); // just make this buf dirty // lock the group slot = uffs_BufFindGroupSlot(dev, parent, serial); uffs_BufLockGroup(dev, slot); if (remain == 0) buf->data_len = 0; else { remain = (remain % dev->com.pg_data_size); buf->data_len = (remain == 0 ? dev->com.pg_data_size : 0); } buf->ext_mark |= UFFS_BUF_EXT_MARK_TRUNC_TAIL; uffs_BufPut(dev, buf); // invalidate the rest page buf page_id++; for (; page_id <= max_page_id; page_id++) { buf = uffs_BufFind(dev, parent, serial, page_id); if (buf) uffs_BufMarkEmpty(dev, buf); } // flush dirty buffer immediately, forcing block recovery. uffs_BufFlushGroupEx(dev, parent, serial, U_TRUE); // unlock the group uffs_BufUnLockGroup(dev, slot); ext: return (obj->err == UENOERR ? U_SUCC : U_FAIL); }