/* * check_free_sector: check if a free sector is actually FREE, * i.e. All 0xff in data and oob area. */ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address, int len, int check_oob) { int i, retlen; u8 buf[SECTORSIZE]; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=0x%x," "address=0x%x,len=%d,check_oob=%d)\n", (int)inftl, address, len, check_oob); for (i = 0; i < len; i += SECTORSIZE) { /* * We want to read the sector without ECC check here since a * free sector does not have ECC syndrome on it yet. */ if (MTD_READ(inftl->mtd, address, SECTORSIZE, &retlen, buf) < 0) return -1; if (memcmpb(buf, 0xff, SECTORSIZE) != 0) return -1; if (check_oob) { if (MTD_READOOB(inftl->mtd, address, inftl->mtd->oobsize, &retlen, buf) < 0) return -1; if (memcmpb(buf, 0xff, inftl->mtd->oobsize) != 0) return -1; } address += SECTORSIZE; } return 0; }
/* nothing to init, always read! */ void rtcnvet_init(void) { #ifdef DEBUGME printk(KERN_ERR\ "********* %s called! ***********\n",\ __func__); #endif /*DEBUGME*/ return; } #else /*CONFIG_CIRRUS_DUAL_MTD_ENV*/ void rtcnvet_init(void) { size_t retlen = 0; int _crc32; rtcnved_mtd = get_mtd_device_nm(UBOOTENV_MTD_NAME); if (IS_ERR(rtcnved_mtd)) { printk(KERN_ERR \ "Not found this MTD(%s)\r\n", UBOOTENV_MTD_NAME); return; } /* #define ENV_SIZE (CONFIG_ENV_SIZE - ENV_HEADER_SIZE) Assume mtd->size is CONFIG_ENV_SIZE in uboot */ config_env_size = rtcnved_mtd->size; env_size = config_env_size - ENV_HEADER_SIZE; env_buf = kmalloc(config_env_size, GFP_KERNEL); MTD_READ(rtcnved_mtd, 0, config_env_size, &retlen, env_buf); _crc32 = ubcrc32(0, &env_buf[ENV_HEADER_SIZE], env_size); if (*((uint32_t *)&env_buf[0]) != _crc32) { printk(KERN_ERR \ "%s: invalid env, crc check failed! 08%x vs ", \ __func__, _crc32); dump_hex_f((uint8_t *)&env_buf[0], 4, DUMP_HEX); } else env_valid = 1; return; }
static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct INFTLrecord *inftl = (void *)mbd; unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); unsigned int status; int silly = MAX_LOOPS; struct inftl_bci bci; size_t retlen; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld," "buffer=%p)\n", inftl, block, buffer); while (thisEUN < inftl->nb_blocks) { if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_DELETED: thisEUN = BLOCK_NIL; goto foundit; case SECTOR_USED: goto foundit; case SECTOR_FREE: case SECTOR_IGNORE: break; default: printk(KERN_WARNING "INFTL: unknown status for " "block %ld in EUN %d: 0x%04x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in " "Virtual Unit Chain 0x%lx\n", block / (inftl->EraseSize / SECTORSIZE)); return 1; } thisEUN = inftl->PUtable[thisEUN]; } foundit: if (thisEUN == BLOCK_NIL) { /* The requested block is not on the media, return all 0x00 */ memset(buffer, 0, SECTORSIZE); } else { size_t retlen; loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; if (MTD_READ(inftl->mbd.mtd, ptr, SECTORSIZE, &retlen, buffer)) return -EIO; } return 0; }
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) { struct mtd_info *mtd = file->private_data; size_t retlen=0; size_t total_retlen=0; int ret=0; int len; char *kbuf; DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); if (*ppos + count > mtd->size) count = mtd->size - *ppos; if (!count) return 0; /* FIXME: Use kiovec in 2.5 to lock down the user's buffers and pass them directly to the MTD functions */ while (count) { if (count > MAX_KMALLOC_SIZE) len = MAX_KMALLOC_SIZE; else len = count; kbuf=kmalloc(len,GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf); /* Nand returns -EBADMSG on ecc errors, but it returns * the data. For our userspace tools it is important * to dump areas with ecc errors ! * Userspace software which accesses NAND this way * must be aware of the fact that it deals with NAND */ if (!ret || (ret == -EBADMSG)) { *ppos += retlen; if (copy_to_user(buf, kbuf, retlen)) { kfree(kbuf); return -EFAULT; } else total_retlen += retlen; count -= retlen; buf += retlen; } else { kfree(kbuf); return ret; } kfree(kbuf); } return total_retlen; } /* mtd_read */
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, int len, char *buf) { struct mtd_info *mtd = mtdblk->mtd; unsigned int sect_size = mtdblk->cache_size; size_t retlen; int ret; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", mtd->name, pos, len); if (!sect_size) return MTD_READ (mtd, pos, len, &retlen, buf); while (len > 0) { unsigned long sect_start = (pos/sect_size)*sect_size; unsigned int offset = pos - sect_start; unsigned int size = sect_size - offset; if (size > len) size = len; /* * Check if the requested data is already cached * Read the requested amount of data from our internal cache if it * contains what we want, otherwise we read the data directly * from flash. */ if (mtdblk->cache_state != STATE_EMPTY && mtdblk->cache_offset == sect_start) { memcpy (buf, mtdblk->cache_data + offset, size); } else { ret = MTD_READ (mtd, pos, size, &retlen, buf); if (ret) return ret; if (retlen != size) return -EIO; } buf += size; pos += size; len -= size; } return 0; }
static ssize_t mtd_read(struct file *file, char *buf, size_t count,loff_t *ppos) { struct mtd_info *mtd = (struct mtd_info *)file->private_data; size_t retlen=0; size_t total_retlen=0; int ret=0; int len; char *kbuf; DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); if (*ppos + count > mtd->size) count = mtd->size - *ppos; if (!count) return 0; /* FIXME: Use kiovec in 2.5 to lock down the user's buffers and pass them directly to the MTD functions */ while (count) { if (count > MAX_KMALLOC_SIZE) len = MAX_KMALLOC_SIZE; else len = count; kbuf=kmalloc(len,GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf); if (!ret) { *ppos += retlen; if (copy_to_user(buf, kbuf, retlen)) { kfree(kbuf); return -EFAULT; } else total_retlen += retlen; count -= retlen; buf += retlen; } else { kfree(kbuf); return ret; } kfree(kbuf); } return total_retlen; } /* mtd_read */
int _nvram_read(char *buf) { struct nvram_header *header = (struct nvram_header *) buf; size_t len; if (!nvram_mtd || MTD_READ(nvram_mtd, nvram_mtd->size - NVRAM_SPACE, NVRAM_SPACE, &len, buf) || len != NVRAM_SPACE || header->magic != NVRAM_MAGIC) { /* Maybe we can recover some data from early initialization */ memcpy(buf, nvram_buf, NVRAM_SPACE); } return 0; }
static ssize_t mtdconfig_read(FAR struct file *filep, FAR char *buffer, size_t len) { FAR struct inode *inode = filep->f_inode; FAR struct mtdconfig_struct_s *dev = inode->i_private; size_t bytes; if (dev->readoff >= dev->neraseblocks * dev->erasesize) { return 0; } /* Read data from the file */ bytes = MTD_READ(dev->mtd, dev->readoff, len, (uint8_t *) buffer); dev->readoff += bytes; return bytes; }
static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock) { u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN, prevEUN, status; int block, silly; unsigned int targetEUN; struct inftl_oob oob; size_t retlen; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," "pending=%d)\n", inftl, thisVUC, pendingblock); memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockDeleted, 0, sizeof(BlockDeleted)); thisEUN = targetEUN = inftl->VUtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "INFTL: trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* * Scan to find the Erase Unit which holds the actual data for each * 512-byte block within the Chain. */ silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { if ((BlockMap[block] != 0xffff) || BlockDeleted[block]) continue; if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) + (block * SECTORSIZE), 16 , &retlen, (char *)&oob) < 0) status = SECTOR_IGNORE; else status = oob.b.Status | oob.b.Status1; switch(status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_USED: BlockMap[block] = thisEUN; continue; case SECTOR_DELETED: BlockDeleted[block] = 1; continue; default: printk(KERN_WARNING "INFTL: unknown status " "for block %d in EUN %d: %x\n", block, thisEUN, status); break; } } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = inftl->PUtable[thisEUN]; } /* * OK. We now know the location of every block in the Virtual Unit * Chain, and the Erase Unit into which we are supposed to be copying. * Go for it. */ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { unsigned char movebuf[SECTORSIZE]; int ret; /* * If it's in the target EUN already, or if it's pending write, * do nothing. */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) { continue; } /* * Copy only in non free block (free blocks can only * happen in case of media errors or deleted blocks). */ if (BlockMap[block] == BLOCK_NIL) continue; ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret < 0) { ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret != -EIO) DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went " "away on retry?\n"); } memset(&oob, 0xff, sizeof(struct inftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; MTD_WRITEECC(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf, (char *)&oob, &inftl->oobinfo); } /* * Newest unit in chain now contains data from _all_ older units. * So go through and erase each unit in chain, oldest first. (This * is important, by doing oldest first if we crash/reboot then it * it is relatively simple to clean up the mess). */ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n", thisVUC); for (;;) { /* Find oldest unit in chain. */ thisEUN = inftl->VUtable[thisVUC]; prevEUN = BLOCK_NIL; while (inftl->PUtable[thisEUN] != BLOCK_NIL) { prevEUN = thisEUN; thisEUN = inftl->PUtable[thisEUN]; } /* Check if we are all done */ if (thisEUN == targetEUN) break; if (INFTL_formatblock(inftl, thisEUN) < 0) { /* * Could not erase : mark block as reserved. */ inftl->PUtable[thisEUN] = BLOCK_RESERVED; } else { /* Correctly erased : mark it as free */ inftl->PUtable[thisEUN] = BLOCK_FREE; inftl->PUtable[prevEUN] = BLOCK_NIL; inftl->numfreeEUNs++; } } return targetEUN; }
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, int len, const char *buf) { struct mtd_info *mtd = mtdblk->mtd; unsigned int sect_size = mtdblk->cache_size; size_t retlen; int ret; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", mtd->name, pos, len); if (!sect_size) return MTD_WRITE (mtd, pos, len, &retlen, buf); while (len > 0) { unsigned long sect_start = (pos/sect_size)*sect_size; unsigned int offset = pos - sect_start; unsigned int size = sect_size - offset; if( size > len ) size = len; if (size == sect_size) { /* * We are covering a whole sector. Thus there is no * need to bother with the cache while it may still be * useful for other partial writes. */ ret = erase_write (mtd, pos, size, buf); if (ret) return ret; } else { /* Partial sector: need to use the cache */ if (mtdblk->cache_state == STATE_DIRTY && mtdblk->cache_offset != sect_start) { ret = write_cached_data(mtdblk); if (ret) return ret; } if (mtdblk->cache_state == STATE_EMPTY || mtdblk->cache_offset != sect_start) { /* fill the cache with the current sector */ mtdblk->cache_state = STATE_EMPTY; ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data); if (ret) return ret; if (retlen != sect_size) return -EIO; mtdblk->cache_offset = sect_start; mtdblk->cache_size = sect_size; mtdblk->cache_state = STATE_CLEAN; } /* write data to our local cache */ memcpy (mtdblk->cache_data + offset, buf, size); mtdblk->cache_state = STATE_DIRTY; } buf += size; pos += size; len -= size; } return 0; }
/* * find_boot_record: Find the INFTL Media Header and its Spare copy which * contains the various device information of the INFTL partition and * Bad Unit Table. Update the PUtable[] table according to the Bad * Unit Table. PUtable[] is used for management of Erase Unit in * other routines in inftlcore.c and inftlmount.c. */ static int find_boot_record(struct INFTLrecord *inftl) { struct inftl_unittail h1; //struct inftl_oob oob; unsigned int i, block; u8 buf[SECTORSIZE]; struct INFTLMediaHeader *mh = &inftl->MediaHdr; struct INFTLPartition *ip; size_t retlen; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); /* * Assume logical EraseSize == physical erasesize for starting the * scan. We'll sort it out later if we find a MediaHeader which says * otherwise. */ inftl->EraseSize = inftl->mbd.mtd->erasesize; inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; inftl->MediaUnit = BLOCK_NIL; /* Search for a valid boot record */ for (block = 0; block < inftl->nb_blocks; block++) { int ret; /* * Check for BNAND header first. Then whinge if it's found * but later checks fail. */ ret = MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize, SECTORSIZE, &retlen, buf); /* We ignore ret in case the ECC of the MediaHeader is invalid (which is apparently acceptable) */ if (retlen != SECTORSIZE) { static int warncount = 5; if (warncount) { printk(KERN_WARNING "INFTL: block read at 0x%x " "of mtd%d failed: %d\n", block * inftl->EraseSize, inftl->mbd.mtd->index, ret); if (!--warncount) printk(KERN_WARNING "INFTL: further " "failures for this block will " "not be printed\n"); } continue; } if (retlen < 6 || memcmp(buf, "BNAND", 6)) { /* BNAND\0 not found. Continue */ continue; } /* To be safer with BIOS, also use erase mark as discriminant */ if ((ret = MTD_READOOB(inftl->mbd.mtd, block * inftl->EraseSize + SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0)) { printk(KERN_WARNING "INFTL: ANAND header found at " "0x%x in mtd%d, but OOB data read failed " "(err %d)\n", block * inftl->EraseSize, inftl->mbd.mtd->index, ret); continue; } /* * This is the first we've seen. * Copy the media header structure into place. */ memcpy(mh, buf, sizeof(struct INFTLMediaHeader)); /* Read the spare media header at offset 4096 */ MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize + 4096, SECTORSIZE, &retlen, buf); if (retlen != SECTORSIZE) { printk(KERN_WARNING "INFTL: Unable to read spare " "Media Header\n"); return -1; } /* Check if this one is the same as the first one we found. */ if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) { printk(KERN_WARNING "INFTL: Primary and spare Media " "Headers disagree.\n"); return -1; } mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks); mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions); mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions); mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits); mh->FormatFlags = le32_to_cpu(mh->FormatFlags); mh->PercentUsed = le32_to_cpu(mh->PercentUsed); #ifdef CONFIG_MTD_DEBUG_VERBOSE if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { printk("INFTL: Media Header ->\n" " bootRecordID = %s\n" " NoOfBootImageBlocks = %d\n" " NoOfBinaryPartitions = %d\n" " NoOfBDTLPartitions = %d\n" " BlockMultiplerBits = %d\n" " FormatFlgs = %d\n" " OsakVersion = 0x%x\n" " PercentUsed = %d\n", mh->bootRecordID, mh->NoOfBootImageBlocks, mh->NoOfBinaryPartitions, mh->NoOfBDTLPartitions, mh->BlockMultiplierBits, mh->FormatFlags, mh->OsakVersion, mh->PercentUsed); } #endif if (mh->NoOfBDTLPartitions == 0) { printk(KERN_WARNING "INFTL: Media Header sanity check " "failed: NoOfBDTLPartitions (%d) == 0, " "must be at least 1\n", mh->NoOfBDTLPartitions); return -1; } if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) { printk(KERN_WARNING "INFTL: Media Header sanity check " "failed: Total Partitions (%d) > 4, " "BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions, mh->NoOfBDTLPartitions, mh->NoOfBinaryPartitions); return -1; } if (mh->BlockMultiplierBits > 1) { printk(KERN_WARNING "INFTL: sorry, we don't support " "UnitSizeFactor 0x%02x\n", mh->BlockMultiplierBits); return -1; } else if (mh->BlockMultiplierBits == 1) { printk(KERN_WARNING "INFTL: support for INFTL with " "UnitSizeFactor 0x%02x is experimental\n", mh->BlockMultiplierBits); inftl->EraseSize = inftl->mbd.mtd->erasesize << mh->BlockMultiplierBits; inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; block >>= mh->BlockMultiplierBits; } /* Scan the partitions */ for (i = 0; (i < 4); i++) { ip = &mh->Partitions[i]; ip->virtualUnits = le32_to_cpu(ip->virtualUnits); ip->firstUnit = le32_to_cpu(ip->firstUnit); ip->lastUnit = le32_to_cpu(ip->lastUnit); ip->flags = le32_to_cpu(ip->flags); ip->spareUnits = le32_to_cpu(ip->spareUnits); ip->Reserved0 = le32_to_cpu(ip->Reserved0); #ifdef CONFIG_MTD_DEBUG_VERBOSE if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { printk(" PARTITION[%d] ->\n" " virtualUnits = %d\n" " firstUnit = %d\n" " lastUnit = %d\n" " flags = 0x%x\n" " spareUnits = %d\n", i, ip->virtualUnits, ip->firstUnit, ip->lastUnit, ip->flags, ip->spareUnits); } #endif if (ip->Reserved0 != ip->firstUnit) { struct erase_info *instr = &inftl->instr; instr->mtd = inftl->mbd.mtd; /* * Most likely this is using the * undocumented qiuck mount feature. * We don't support that, we will need * to erase the hidden block for full * compatibility. */ instr->addr = ip->Reserved0 * inftl->EraseSize; instr->len = inftl->EraseSize; MTD_ERASE(inftl->mbd.mtd, instr); } if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) { printk(KERN_WARNING "INFTL: Media Header " "Partition %d sanity check failed\n" " firstUnit %d : lastUnit %d > " "virtualUnits %d\n", i, ip->lastUnit, ip->firstUnit, ip->Reserved0); return -1; } if (ip->Reserved1 != 0) { printk(KERN_WARNING "INFTL: Media Header " "Partition %d sanity check failed: " "Reserved1 %d != 0\n", i, ip->Reserved1); return -1; } if (ip->flags & INFTL_BDTL) break; } if (i >= 4) { printk(KERN_WARNING "INFTL: Media Header Partition " "sanity check failed:\n No partition " "marked as Disk Partition\n"); return -1; } inftl->nb_boot_blocks = ip->firstUnit; inftl->numvunits = ip->virtualUnits; if (inftl->numvunits > (inftl->nb_blocks - inftl->nb_boot_blocks - 2)) { printk(KERN_WARNING "INFTL: Media Header sanity check " "failed:\n numvunits (%d) > nb_blocks " "(%d) - nb_boot_blocks(%d) - 2\n", inftl->numvunits, inftl->nb_blocks, inftl->nb_boot_blocks); return -1; } inftl->mbd.size = inftl->numvunits * (inftl->EraseSize / SECTORSIZE); /* * Block count is set to last used EUN (we won't need to keep * any meta-data past that point). */ inftl->firstEUN = ip->firstUnit; inftl->lastEUN = ip->lastUnit; inftl->nb_blocks = ip->lastUnit + 1; /* Memory alloc */ inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL); if (!inftl->PUtable) { printk(KERN_WARNING "INFTL: allocation of PUtable " "failed (%zd bytes)\n", inftl->nb_blocks * sizeof(u16)); return -ENOMEM; } inftl->VUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL); if (!inftl->VUtable) { kfree(inftl->PUtable); printk(KERN_WARNING "INFTL: allocation of VUtable " "failed (%zd bytes)\n", inftl->nb_blocks * sizeof(u16)); return -ENOMEM; } /* Mark the blocks before INFTL MediaHeader as reserved */ for (i = 0; i < inftl->nb_boot_blocks; i++) inftl->PUtable[i] = BLOCK_RESERVED; /* Mark all remaining blocks as potentially containing data */ for (; i < inftl->nb_blocks; i++) inftl->PUtable[i] = BLOCK_NOTEXPLORED; /* Mark this boot record (NFTL MediaHeader) block as reserved */ inftl->PUtable[block] = BLOCK_RESERVED; /* Read Bad Erase Unit Table and modify PUtable[] accordingly */ for (i = 0; i < inftl->nb_blocks; i++) { int physblock; /* If any of the physical eraseblocks are bad, don't use the unit. */ for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) { if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock)) inftl->PUtable[i] = BLOCK_RESERVED; } } inftl->MediaUnit = block; return 0; }
int up_fillpage(FAR struct tcb_s *tcb, FAR void *vpage) { #if defined(CONFIG_PAGING_BINPATH) ssize_t nbytes; off_t offset; off_t pos; #elif defined(CONFIG_PAGING_M25PX) || defined(CONFIG_PAGING_AT45DB) ssize_t nbytes; off_t offset; #endif pglldbg("TCB: %p vpage: %p far: %08x\n", tcb, vpage, tcb->xcp.far); DEBUGASSERT(tcb->xcp.far >= PG_PAGED_VBASE && tcb->xcp.far < PG_PAGED_VEND); /* If BINPATH is defined, then it is the full path to a file on a mounted file * system. In this case initialization will be deferred until the first * time that up_fillpage() is called. Are we initialized? */ #if defined(CONFIG_PAGING_BINPATH) /* Perform initialization of the paging source device (if necessary) */ lpc31_initsrc(); /* Create an offset into the binary image that corresponds to the * virtual address. File offset 0 corresponds to PG_LOCKED_VBASE. */ offset = (off_t)tcb->xcp.far - PG_LOCKED_VBASE; /* Seek to that position */ pos = lseek(g_pgsrc.fd, offset, SEEK_SET); DEBUGASSERT(pos != (off_t)-1); /* And read the page data from that offset */ nbytes = read(g_pgsrc.fd, vpage, PAGESIZE); DEBUGASSERT(nbytes == PAGESIZE); return OK; #elif defined(CONFIG_PAGING_M25PX) || defined(CONFIG_PAGING_AT45DB) /* !CONFIG_PAGING_BINPATH */ /* Perform initialization of the paging source device (if necessary) */ lpc31_initsrc(); /* Create an offset into the binary image that corresponds to the * virtual address. File offset 0 corresponds to PG_LOCKED_VBASE. */ offset = (off_t)tcb->xcp.far - PG_LOCKED_VBASE + CONFIG_EA3131_PAGING_BINOFFSET; /* Read the page at the correct offset into the SPI FLASH device */ nbytes = MTD_READ(g_pgsrc.mtd, offset, PAGESIZE, (FAR uint8_t *)vpage); DEBUGASSERT(nbytes == PAGESIZE); return OK; #else /* !CONFIG_PAGING_BINPATH && !CONFIG_PAGING_M25PX && !CONFIG_PAGING_AT45DB */ # warning "Not implemented" return -ENOSYS; #endif /* !CONFIG_PAGING_BINPATH && !CONFIG_PAGING_M25PX && !CONFIG_PAGING_AT45DB */ }
/* will fill ed from the good partition * if no env is ok, it will not try to correct this! * read NAME1, do crc1, extract flag1 * read NAME2, do crc2, extract flag2 * if (crc1 && !crc2) env=1; stop * if (!crc1 && crc2) env=2; stop * if (!crc1 && !crc2) env=0; stop * if (crc1 && crc2) { * if (flag1 A && flag2 O) env=1; stop * if (flag1 O && flag2 A) env=2; stop * if (flag1 == flag2) env=1; stop * if (flag1 == 0xff) env=1; stop * if (flag2 = 0xff) env=2; stop * env=0; stop * } * rtcnved_mtd */ static int env_read(struct env_data *ed) { size_t retlen = 0; unsigned int crc1, crc2; unsigned char flag1, flag2; struct mtd_info *mtd1; struct mtd_info *mtd2; int config_env_size1, config_env_size2; int env_size1, env_size2; unsigned char *env_buf1; unsigned char *env_buf2; int env = -1; #ifdef DEBUGME printk(KERN_ERR\ "********* %s called! ***********\n",\ __func__); #endif /*DEBUGME*/ /*sanity check*/ if (NULL == ed) { printk(KERN_ERR\ "%s: Incorrect function call!\n",\ __func__); return 1; } flag1 = 0; flag2 = 0; #ifdef DEBUGME printk(KERN_ERR\ "%s: About to read first partition!\n",\ __func__); #endif /*DEBUGME*/ /* First partition */ mtd1 = get_mtd_device_nm(UBOOTENV_MTD_NAME1); if (IS_ERR(mtd1)) { printk(KERN_ERR\ "%s: Not found this MTD(%s)\n",\ __func__,UBOOTENV_MTD_NAME1); return 1; } config_env_size1 = mtd1->size; env_size1 = config_env_size1 - ENV_HEADER_SIZE; env_buf1 = kmalloc(config_env_size1, GFP_KERNEL); MTD_READ(mtd1, 0, config_env_size1, &retlen, env_buf1); if (retlen != config_env_size1) { printk(KERN_ERR\ "%s: Failed to read data from partition!\n",\ __func__); return 1; } crc1 = ubcrc32(0, &env_buf1[ENV_HEADER_SIZE], env_size1); if (*((uint32_t *)&env_buf1[0]) != crc1) { crc1 = 0; #ifdef DEBUGME printk(KERN_ERR\ "%s: CRC is not ok on partition one!\n",\ __func__); #endif /*DEBUGME*/ } else { crc1 = 1; flag1 = env_buf1[ENV_HEADER_SIZE - 1]; #ifdef DEBUGME printk(KERN_ERR\ "%s: CRC is ok on partition one!\n",\ __func__); #endif /*DEBUGME*/ } /* Second partition */ #ifdef DEBUGME printk(KERN_ERR\ "%s: About to read second partition!\n",\ __func__); #endif /*DEBUGME*/ mtd2 = get_mtd_device_nm(UBOOTENV_MTD_NAME2); if (IS_ERR(mtd2)) { printk(KERN_ERR\ "%s: Not found this MTD(%s)\n",\ __func__, UBOOTENV_MTD_NAME2); kfree(env_buf1); put_mtd_device(mtd1); return 1; } config_env_size2 = mtd2->size; env_size2 = config_env_size2 - ENV_HEADER_SIZE; env_buf2 = kmalloc(config_env_size2, GFP_KERNEL); MTD_READ(mtd2, 0, config_env_size2, &retlen, env_buf2); if (retlen != config_env_size2) { printk(KERN_ERR\ "%s: Failed to read data from partition!\n",\ __func__); kfree(env_buf1); put_mtd_device(mtd1); kfree(env_buf2); put_mtd_device(mtd2); return 1; } crc2 = ubcrc32(0, &env_buf2[ENV_HEADER_SIZE], env_size2); if (*((uint32_t *)&env_buf2[0]) != crc2) { crc2 = 0; #ifdef DEBUGME printk(KERN_ERR\ "%s: CRC is not ok on partition two!\n",\ __func__); #endif /*DEBUGME*/ } else { crc2 = 1; flag2 = env_buf2[ENV_HEADER_SIZE - 1]; #ifdef DEBUGME printk(KERN_ERR\ "%s: CRC is ok on partition two!\n",\ __func__); #endif /*DEBUGME*/ } /* do the dance */ if (crc1 && !crc2) { env = 0; } else if (!crc1 && crc2) { env = 1; } else if (!crc1 && !crc2) { /* 2 bad crcs! */ printk(KERN_ERR \ "%s: Not a single partition is good for env!\n",\ __func__); env = -1; } else { if (flag1 == active_flag && flag2 == obsolete_flag) { env = 0; } else if (flag1 == obsolete_flag && flag2 == active_flag) { env = 1; } else if (flag1 == flag2) { env = 0; } else if (flag1 == 0xff) { env = 0; } else if (flag2 == 0xff) { env = 1; } else { /* 2 bad flags!*/ env = 0; } } if (-1 == env) { printk(KERN_ERR\ "%s: No good env found, giving up!\n",\ __func__); kfree(env_buf1); kfree(env_buf2); put_mtd_device(mtd1); put_mtd_device(mtd2); ed->size = 0; ed->data = NULL; ed->mtds[0] = NULL; ed->mtds[1] = NULL; return 1; } ed->mtd = env; if (0 == env) { kfree(env_buf2); ed->size = config_env_size1; ed->data = env_buf1; } if (1 == env) { kfree(env_buf1); ed->size = config_env_size2; ed->data = env_buf2; } ed->mtds[0] = mtd1; ed->mtds[1] = mtd2; #ifdef DEBUGME printk(KERN_ERR\ "%s: Read in env %d as good!\n",\ __func__, env); #endif /*DEBUGME*/ return 0; }
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) { u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockLastState[MAX_SECTORS_PER_UNIT]; unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN; int block; int silly; unsigned int targetEUN; struct nftl_oob oob; int inplace = 1; size_t retlen; memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockFreeFound, 0, sizeof(BlockFreeFound)); thisEUN = nftl->EUNtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "Trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* Scan to find the Erase Unit which holds the actual data for each 512-byte block within the Chain. */ silly = MAX_LOOPS; targetEUN = BLOCK_NIL; while (thisEUN <= nftl->lastEUN ) { unsigned int status, foldmark; targetEUN = thisEUN; for (block = 0; block < nftl->EraseSize / 512; block ++) { MTD_READOOB(nftl->mbd.mtd, (thisEUN * nftl->EraseSize) + (block * 512), 16 , &retlen, (char *)&oob); if (block == 2) { foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; if (foldmark == FOLD_MARK_IN_PROGRESS) { DEBUG(MTD_DEBUG_LEVEL1, "Write Inhibited on EUN %d\n", thisEUN); inplace = 0; } else { /* There's no other reason not to do inplace, except ones that come later. So we don't need to preserve inplace */ inplace = 1; } } status = oob.b.Status | oob.b.Status1; BlockLastState[block] = status; switch(status) { case SECTOR_FREE: BlockFreeFound[block] = 1; break; case SECTOR_USED: if (!BlockFreeFound[block]) BlockMap[block] = thisEUN; else printk(KERN_WARNING "SECTOR_USED found after SECTOR_FREE " "in Virtual Unit Chain %d for block %d\n", thisVUC, block); break; case SECTOR_DELETED: if (!BlockFreeFound[block]) BlockMap[block] = BLOCK_NIL; else printk(KERN_WARNING "SECTOR_DELETED found after SECTOR_FREE " "in Virtual Unit Chain %d for block %d\n", thisVUC, block); break; case SECTOR_IGNORE: break; default: printk("Unknown status for block %d in EUN %d: %x\n", block, thisEUN, status); } } if (!silly--) { printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = nftl->ReplUnitTable[thisEUN]; } if (inplace) { /* We're being asked to be a fold-in-place. Check that all blocks which actually have data associated with them (i.e. BlockMap[block] != BLOCK_NIL) are either already present or SECTOR_FREE in the target block. If not, we're going to have to fold out-of-place anyway. */ for (block = 0; block < nftl->EraseSize / 512 ; block++) { if (BlockLastState[block] != SECTOR_FREE && BlockMap[block] != BLOCK_NIL && BlockMap[block] != targetEUN) { DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, " "block %d was %x lastEUN, " "and is in EUN %d (%s) %d\n", thisVUC, block, BlockLastState[block], BlockMap[block], BlockMap[block]== targetEUN ? "==" : "!=", targetEUN); inplace = 0; break; } } if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) && pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != SECTOR_FREE) { DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. " "Folding out of place.\n", targetEUN); inplace = 0; } } if (!inplace) { DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. " "Trying out-of-place\n", thisVUC); /* We need to find a targetEUN to fold into. */ targetEUN = NFTL_findfreeblock(nftl, 1); if (targetEUN == BLOCK_NIL) { /* Ouch. Now we're screwed. We need to do a fold-in-place of another chain to make room for this one. We need a better way of selecting which chain to fold, because makefreeblock will only ask us to fold the same one again. */ printk(KERN_WARNING "NFTL_findfreeblock(desperate) returns 0xffff.\n"); return BLOCK_NIL; } } else { /* We put a fold mark in the chain we are folding only if we fold in place to help the mount check code. If we do not fold in place, it is possible to find the valid chain by selecting the longer one */ oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS); oob.u.c.unused = 0xffffffff; MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8, 8, &retlen, (char *)&oob.u); } /* OK. We now know the location of every block in the Virtual Unit Chain, and the Erase Unit into which we are supposed to be copying. Go for it. */ DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < nftl->EraseSize / 512 ; block++) { unsigned char movebuf[512]; int ret; /* If it's in the target EUN already, or if it's pending write, do nothing */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) { continue; } /* copy only in non free block (free blocks can only happen in case of media errors or deleted blocks) */ if (BlockMap[block] == BLOCK_NIL) continue; ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 512, &retlen, movebuf); if (ret < 0) { ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 512, &retlen, movebuf); if (ret != -EIO) printk("Error went away on retry.\n"); } memset(&oob, 0xff, sizeof(struct nftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; MTD_WRITEECC(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512), 512, &retlen, movebuf, (char *)&oob, &nftl->oobinfo); } /* add the header so that it is now a valid chain */ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff; MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 8, 8, &retlen, (char *)&oob.u); /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */ /* At this point, we have two different chains for this Virtual Unit, and no way to tell them apart. If we crash now, we get confused. However, both contain the same data, so we shouldn't actually lose data in this case. It's just that when we load up on a medium which has duplicate chains, we need to free one of the chains because it's not necessary any more. */ thisEUN = nftl->EUNtable[thisVUC]; DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n"); /* For each block in the old chain (except the targetEUN of course), free it and make it available for future use */ while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) { unsigned int EUNtmp; EUNtmp = nftl->ReplUnitTable[thisEUN]; if (NFTL_formatblock(nftl, thisEUN) < 0) { /* could not erase : mark block as reserved */ nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED; } else { /* correctly erased : mark it as free */ nftl->ReplUnitTable[thisEUN] = BLOCK_FREE; nftl->numfreeEUNs++; } thisEUN = EUNtmp; } /* Make this the new start of chain for thisVUC */ nftl->ReplUnitTable[targetEUN] = BLOCK_NIL; nftl->EUNtable[thisVUC] = targetEUN; return targetEUN; }
static void mtdblock_request(RQFUNC_ARG) { struct request *current_request; unsigned int res = 0; struct mtd_info *mtd; while (1) { /* Grab the Request and unlink it from the request list, INIT_REQUEST will execute a return if we are done. */ INIT_REQUEST; current_request = CURRENT; if (MINOR(current_request->rq_dev) >= MAX_MTD_DEVICES) { printk("mtd: Unsupported device!\n"); end_request(0); continue; } // Grab our MTD structure mtd = __get_mtd_device(NULL, MINOR(current_request->rq_dev)); if (!mtd) { printk("MTD device %d doesn't appear to exist any more\n", CURRENT_DEV); end_request(0); } if (current_request->sector << 9 > mtd->size || (current_request->sector + current_request->current_nr_sectors) << 9 > mtd->size) { printk("mtd: Attempt to read past end of device!\n"); printk("size: %x, sector: %lx, nr_sectors %lx\n", mtd->size, current_request->sector, current_request->current_nr_sectors); end_request(0); continue; } /* Remove the request we are handling from the request list so nobody messes with it */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) /* Now drop the lock that the ll_rw_blk functions grabbed for us and process the request. This is necessary due to the extreme time we spend processing it. */ spin_unlock_irq(&io_request_lock); #endif // Handle the request switch (current_request->cmd) { size_t retlen; case READ: if (MTD_READ(mtd,current_request->sector<<9, current_request->current_nr_sectors << 9, &retlen, current_request->buffer) == 0) res = 1; else res = 0; break; case WRITE: /* printk("mtdblock_request WRITE sector=%d(%d)\n",current_request->sector, current_request->current_nr_sectors); */ // Read only device if ((mtd->flags & MTD_CAP_RAM) == 0) { res = 0; break; } // Do the write if (MTD_WRITE(mtd,current_request->sector<<9, current_request->current_nr_sectors << 9, &retlen, current_request->buffer) == 0) res = 1; else res = 0; break; // Shouldn't happen default: printk("mtd: unknown request\n"); break; } // Grab the lock and re-thread the item onto the linked list #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) spin_lock_irq(&io_request_lock); #endif end_request(res); } }
static int mtdconfig_readbytes(FAR struct mtdconfig_struct_s *dev, int offset, FAR uint8_t *pdata, int readlen) { off_t bytestoread = readlen; off_t bytesthisblock, firstbyte; off_t block, index; int ret = OK; size_t bytes; /* Test if read interface supported. If it is, use it directly */ if ((dev->mtd->read == NULL) && (readlen < dev->blocksize)) { /* Read interface available. Read directly to buffer */ bytes = MTD_READ(dev->mtd, offset, readlen, pdata); if (bytes != readlen) { /* Error reading data! */ ret = -EIO; } } else { /* Read interface not available, do a block read into our buffer */ block = offset / dev->blocksize; firstbyte = offset - (block * dev->blocksize); bytesthisblock = dev->blocksize - firstbyte; if (bytesthisblock > readlen) { bytesthisblock = readlen; } index = 0; while (bytestoread > 0) { if (bytesthisblock < dev->blocksize || bytestoread < dev->blocksize) { /* Copy to temp buffer first...don't need the whole block */ bytes = MTD_BREAD(dev->mtd, block, 1, dev->buffer); if (bytes != 1) { /* Error reading data! */ ret = -EIO; goto errout; } /* Copy data to the output buffer */ memcpy(&pdata[index], &dev->buffer[firstbyte], bytesthisblock); } else { /* We are reading a whole block. Read directly to buffer */ bytes = MTD_BREAD(dev->mtd, block, 1, &pdata[index]); if (bytes != 1) { /* Error reading data! */ ret = -EIO; goto errout; } } /* Update values for next block read */ bytestoread -= bytesthisblock; index += bytesthisblock; bytesthisblock = dev->blocksize; if (bytesthisblock > bytestoread) { bytesthisblock = bytestoread; } firstbyte = 0; block++; } } errout: return ret; }
static int mtd_read(struct inode *inode,struct file *file, char *buf, int count) #endif { struct mtd_info *mtd = (struct mtd_info *)file->private_data; size_t retlen=0; size_t total_retlen=0; int ret=0; #ifndef NO_MM int len; char *kbuf; #endif DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); if (FILE_POS + count > mtd->size) count = mtd->size - FILE_POS; if (!count) return 0; /* FIXME: Use kiovec in 2.3 or 2.2+rawio, or at * least split the IO into smaller chunks. */ #ifdef NO_MM ret = MTD_READ(mtd, FILE_POS, count, &retlen, buf); if (!ret) { FILE_POS += retlen; ret = retlen; } total_retlen = ret; #else while (count) { if (count > MAX_KMALLOC_SIZE) len = MAX_KMALLOC_SIZE; else len = count; kbuf=kmalloc(len,GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = MTD_READ(mtd, FILE_POS, len, &retlen, kbuf); if (!ret) { FILE_POS += retlen; if (copy_to_user(buf, kbuf, retlen)) { kfree(kbuf); return -EFAULT; } else total_retlen += retlen; count -= retlen; buf += retlen; } else { kfree(kbuf); return ret; } kfree(kbuf); } #endif return total_retlen; } /* mtd_read */
static off_t mtdconfig_consolidate(FAR struct mtdconfig_struct_s *dev) { off_t src_block, dst_block; off_t src_offset, dst_offset; uint16_t blkper, x, bytes, bytes_left_in_block; struct mtdconfig_header_s hdr; int ret; uint8_t sig[CONFIGDATA_BLOCK_HDR_SIZE]; uint8_t *pBuf; /* Prepare to copy block 0 to the last block (erase blocks) */ src_block = 0; dst_block = dev->neraseblocks - 1; /* Ensure the last block is erased */ MTD_ERASE(dev->mtd, dst_block, 1); blkper = dev->erasesize / dev->blocksize; dst_block *= blkper; /* Convert to read/write blocks */ /* Allocate a small buffer for moving data */ pBuf = (uint8_t *)kmm_malloc(dev->blocksize); if (pBuf == NULL) { return 0; } /* Now copy block zero to last block */ for (x = 0; x < blkper; x++) { ret = MTD_BREAD(dev->mtd, src_block++, 1, dev->buffer); if (ret < 0) { /* I/O Error! */ goto errout; } ret = MTD_BWRITE(dev->mtd, dst_block++, 1, dev->buffer); if (ret < 0) { /* I/O Error! */ goto errout; } } /* Erase block zero and write a format signature. */ MTD_ERASE(dev->mtd, 0, 1); sig[0] = 'C'; sig[1] = 'D'; sig[2] = CONFIGDATA_FORMAT_VERSION; mtdconfig_writebytes(dev, 0, sig, sizeof(sig)); /* Now consolidate entries */ src_block = 1; dst_block = 0; src_offset = src_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; dst_offset = CONFIGDATA_BLOCK_HDR_SIZE; while (src_block < dev->neraseblocks) { /* Scan all headers and move them to the src_offset */ retry_relocate: MTD_READ(dev->mtd, src_offset, sizeof(hdr), (uint8_t *) &hdr); if (hdr.flags == MTD_ERASED_FLAGS) { /* Test if the source entry is active or if we are at the end * of data for this erase block. */ if (hdr.id == MTD_ERASED_ID) { /* No more data in this erase block. Advance to the * next one. */ src_offset = (src_block + 1) * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } else { /* Test if this entry will fit in the current destination block */ bytes_left_in_block = (dst_block + 1) * dev->erasesize - dst_offset; if (hdr.len + sizeof(hdr) > bytes_left_in_block) { /* Item doesn't fit in the block. Advance to the next one */ /* Update control variables */ dst_block++; dst_offset = dst_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; DEBUGASSERT(dst_block != src_block); /* Retry the relocate */ goto retry_relocate; } /* Copy this entry to the destination */ //printf("REL HDR: ID=%04X,%02X Len=%4d Off=%5d Src off=%4d\n", // hdr.id, hdr.instance, hdr.len, dst_offset, src_offset); mtdconfig_writebytes(dev, dst_offset, (uint8_t *) &hdr, sizeof(hdr)); src_offset += sizeof(hdr); dst_offset += sizeof(hdr); /* Now copy the data */ while (hdr.len) { bytes = hdr.len; if (bytes > dev->blocksize) { bytes = dev->blocksize; } /* Move the data. */ mtdconfig_readbytes(dev, src_offset, pBuf, bytes); mtdconfig_writebytes(dev, dst_offset, pBuf, bytes); /* Update control variables */ hdr.len -= bytes; src_offset += bytes; dst_offset += bytes; } } } else { /* This item has been released. Skip it! */ src_offset += sizeof(hdr) + hdr.len; if (src_offset + sizeof(hdr) >= (src_block + 1) * dev->erasesize || src_offset == (src_block +1 ) * dev->erasesize) { /* No room left at end of source block */ src_offset = (src_block + 1) * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } } /* Test if we are out of space in the src block */ if (src_offset + sizeof(hdr) >= (src_block + 1) * dev->erasesize) { /* No room at end of src block for another header. Go to next * source block. */ src_offset = (src_block + 1) * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } /* Test if we advanced to the next block. If we did, then erase the * old block. */ if (src_block != src_offset / dev->erasesize) { /* Erase the block ... we have emptied it */ MTD_ERASE(dev->mtd, src_block, 1); src_block++; } /* Test if we are out of space in the dst block */ if (dst_offset + sizeof(hdr) >= (dst_block + 1) * dev->erasesize) { /* No room at end of dst block for another header. Go to next block. */ dst_block++; dst_offset = dst_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; DEBUGASSERT(dst_block != src_block); } } errout: kmm_free(pBuf); return 0; }
static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct NFTLrecord *nftl = (void *)mbd; u16 lastgoodEUN; u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)]; unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1); unsigned int status; int silly = MAX_LOOPS; size_t retlen; struct nftl_bci bci; lastgoodEUN = BLOCK_NIL; if (thisEUN != BLOCK_NIL) { while (thisEUN < nftl->nb_blocks) { if (MTD_READOOB(nftl->mbd.mtd, (thisEUN * nftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_FREE: /* no modification of a sector should follow a free sector */ goto the_end; case SECTOR_DELETED: lastgoodEUN = BLOCK_NIL; break; case SECTOR_USED: lastgoodEUN = thisEUN; break; case SECTOR_IGNORE: break; default: printk("Unknown status for block %ld in EUN %d: %x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n", block / (nftl->EraseSize / 512)); return 1; } thisEUN = nftl->ReplUnitTable[thisEUN]; } } the_end: if (lastgoodEUN == BLOCK_NIL) { /* the requested block is not on the media, return all 0x00 */ memset(buffer, 0, 512); } else { loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs; size_t retlen; if (MTD_READ(nftl->mbd.mtd, ptr, 512, &retlen, buffer)) return -EIO; } return 0; }
int nvram_commit(void) { char *buf; size_t erasesize, len, magic_len; unsigned int i; int ret; struct nvram_header *header; unsigned long flags; u_int32_t offset; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; struct erase_info erase; u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */ if (!nvram_mtd) { printk("nvram_commit: NVRAM not found\n"); return -ENODEV; } if (in_interrupt()) { printk("nvram_commit: not committing in interrupt\n"); return -EINVAL; } /* Backup sector blocks to be erased */ erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize); if (!(buf = kmalloc(erasesize, GFP_KERNEL))) { printk("nvram_commit: out of memory\n"); return -ENOMEM; } down(&nvram_sem); if ((i = erasesize - NVRAM_SPACE) > 0) { offset = nvram_mtd->size - erasesize; len = 0; ret = MTD_READ(nvram_mtd, offset, i, &len, buf); if (ret || len != i) { printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i); ret = -EIO; goto done; } header = (struct nvram_header *)(buf + i); magic_offset = i + ((void *)&header->magic - (void *)header); } else { offset = nvram_mtd->size - NVRAM_SPACE; magic_offset = ((void *)&header->magic - (void *)header); header = (struct nvram_header *)buf; } /* clear the existing magic # to mark the NVRAM as unusable we can pull MAGIC bits low without erase */ header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */ /* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */ if(nvram_mtd->unlock) nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize); ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), &magic_len, (char *)&header->magic); if (ret || magic_len != sizeof(header->magic)) { printk("nvram_commit: clear MAGIC error\n"); ret = -EIO; goto done; } header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM, otherwise we'll have an incorrect CRC */ /* Regenerate NVRAM */ spin_lock_irqsave(&nvram_lock, flags); ret = _nvram_commit(header); spin_unlock_irqrestore(&nvram_lock, flags); if (ret) goto done; /* Erase sector blocks */ init_waitqueue_head(&wait_q); for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) { erase.mtd = nvram_mtd; erase.addr = offset; erase.len = nvram_mtd->erasesize; erase.callback = erase_callback; erase.priv = (u_long) &wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); /* Unlock sector blocks */ if (nvram_mtd->unlock) nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize); if ((ret = MTD_ERASE(nvram_mtd, &erase))) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk("nvram_commit: erase error\n"); goto done; } /* Wait for erase to finish */ schedule(); remove_wait_queue(&wait_q, &wait); } /* Write partition up to end of data area */ header->magic = NVRAM_INVALID_MAGIC; /* All ones magic */ offset = nvram_mtd->size - erasesize; i = erasesize - NVRAM_SPACE + header->len; ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf); if (ret || len != i) { printk("nvram_commit: write error\n"); ret = -EIO; goto done; } /* Now mark the NVRAM in flash as "valid" by setting the correct MAGIC # */ header->magic = NVRAM_MAGIC; ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), &magic_len, (char *)&header->magic); if (ret || magic_len != sizeof(header->magic)) { printk("nvram_commit: write MAGIC error\n"); ret = -EIO; goto done; } /* * Reading a few bytes back here will put the device * back to the correct mode on certain flashes */ offset = nvram_mtd->size - erasesize; ret = MTD_READ(nvram_mtd, offset, 4, &len, buf); done: up(&nvram_sem); kfree(buf); return ret; }