/* tear down ed*/ ed->mtd = -1; kfree(ed->data); ed->data = NULL; #ifdef DEBUGME printk(KERN_ERR\ "%s: About to put the mdt's down!\n",\ __func__); #endif /*DEBUGME*/ put_mtd_device(ed->mtds[0]); ed->mtds[0] = NULL; put_mtd_device(ed->mtds[1]); ed->mtds[1] = NULL; #ifdef DEBUGME printk(KERN_ERR\ "%s: About to exit!\n",\ __func__); #endif /*DEBUGME*/ return retcode; } #else /*CONFIG_CIRRUS_DUAL_MTD_ENV*/ static int env_save(void) { int ret; size_t retlen = 0; struct erase_info ei; DECLARE_COMPLETION_ONSTACK(complete); if (!env_valid) return -1; /* erase */ memset(&ei, 0, sizeof(ei)); ei.mtd = rtcnved_mtd; ei.addr = 0; ei.len = config_env_size; ei.callback = env_erase_callback; ei.priv = (long)&complete; ret = rtcnved_mtd->erase(rtcnved_mtd, &ei); if (ret) { printk(KERN_ERR "%s: call erase failed!\r\n", __func__); return -EIO; } wait_for_completion(&complete); if (ei.state != MTD_ERASE_DONE) return -EIO; /* write */ MTD_WRITE(rtcnved_mtd, 0, config_env_size, &retlen, env_buf); return 0; }
static int erase_write (struct mtd_info *mtd, unsigned long pos, int len, const char *buf) { struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; size_t retlen; int ret; /* * First, let's erase the flash block. */ init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = erase_callback; erase.addr = pos; erase.len = len; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = MTD_ERASE(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " "on \"%s\" failed\n", pos, len, mtd->name); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); /* * Next, writhe data to flash. */ ret = MTD_WRITE (mtd, pos, len, &retlen, buf); if (ret) return ret; if (retlen != len) return -EIO; return 0; }
static int erase_write (struct mtd_info *mtd, unsigned long pos, int len, const char *buf) { struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; size_t retlen; int ret; /* * First, let's erase the flash block. */ init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = erase_callback; erase.addr = pos; erase.len = len; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = MTD_ERASE(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " "on \"%s\" failed\n", pos, len, mtd->name); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); /* * Next, writhe data to flash. */ #ifdef CONFIG_MOT_FEAT_MTD_AUTO_BBM bbm_retry: ret = MTD_WRITE (mtd, pos, len, &retlen, buf); if (ret) { if (mtd->block_replace) { DEBUG(MTD_DEBUG_LEVEL0, "mtdblock: block_replace with pos %08x\n", (unsigned int)pos); if (mtd->block_replace(mtd, pos, 0)) { printk (KERN_ERR "mtdblock: out of replacement block for pos %08x\n", (unsigned int)pos); return ret; } /* try to write again with replacement block */ goto bbm_retry; } return ret; } #else ret = MTD_WRITE (mtd, pos, len, &retlen, buf); if (ret) return ret; #endif if (retlen != len) return -EIO; return 0; }
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, int len, const char *buf) { struct mtd_info *mtd = mtdblk->mtd; unsigned int sect_size = mtdblk->cache_size; size_t retlen; int ret; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", mtd->name, pos, len); if (!sect_size) return MTD_WRITE (mtd, pos, len, &retlen, buf); while (len > 0) { unsigned long sect_start = (pos/sect_size)*sect_size; unsigned int offset = pos - sect_start; unsigned int size = sect_size - offset; if( size > len ) size = len; if (size == sect_size) { /* * We are covering a whole sector. Thus there is no * need to bother with the cache while it may still be * useful for other partial writes. */ ret = erase_write (mtd, pos, size, buf); if (ret) return ret; } else { /* Partial sector: need to use the cache */ if (mtdblk->cache_state == STATE_DIRTY && mtdblk->cache_offset != sect_start) { ret = write_cached_data(mtdblk); if (ret) return ret; } if (mtdblk->cache_state == STATE_EMPTY || mtdblk->cache_offset != sect_start) { /* fill the cache with the current sector */ mtdblk->cache_state = STATE_EMPTY; ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data); if (ret) return ret; if (retlen != sect_size) return -EIO; mtdblk->cache_offset = sect_start; mtdblk->cache_size = sect_size; mtdblk->cache_state = STATE_CLEAN; } /* write data to our local cache */ memcpy (mtdblk->cache_data + offset, buf, size); mtdblk->cache_state = STATE_DIRTY; } buf += size; pos += size; len -= size; } return 0; }
int nvram_commit(void) { char *buf; size_t erasesize, len, magic_len; unsigned int i; int ret; struct nvram_header *header; unsigned long flags; u_int32_t offset; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; struct erase_info erase; u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */ if (!nvram_mtd) { printk("nvram_commit: NVRAM not found\n"); return -ENODEV; } if (in_interrupt()) { printk("nvram_commit: not committing in interrupt\n"); return -EINVAL; } /* Backup sector blocks to be erased */ erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize); if (!(buf = kmalloc(erasesize, GFP_KERNEL))) { printk("nvram_commit: out of memory\n"); return -ENOMEM; } down(&nvram_sem); if ((i = erasesize - NVRAM_SPACE) > 0) { offset = nvram_mtd->size - erasesize; len = 0; ret = MTD_READ(nvram_mtd, offset, i, &len, buf); if (ret || len != i) { printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i); ret = -EIO; goto done; } header = (struct nvram_header *)(buf + i); magic_offset = i + ((void *)&header->magic - (void *)header); } else { offset = nvram_mtd->size - NVRAM_SPACE; magic_offset = ((void *)&header->magic - (void *)header); header = (struct nvram_header *)buf; } /* clear the existing magic # to mark the NVRAM as unusable we can pull MAGIC bits low without erase */ header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */ /* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */ if(nvram_mtd->unlock) nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize); ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), &magic_len, (char *)&header->magic); if (ret || magic_len != sizeof(header->magic)) { printk("nvram_commit: clear MAGIC error\n"); ret = -EIO; goto done; } header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM, otherwise we'll have an incorrect CRC */ /* Regenerate NVRAM */ spin_lock_irqsave(&nvram_lock, flags); ret = _nvram_commit(header); spin_unlock_irqrestore(&nvram_lock, flags); if (ret) goto done; /* Erase sector blocks */ init_waitqueue_head(&wait_q); for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) { erase.mtd = nvram_mtd; erase.addr = offset; erase.len = nvram_mtd->erasesize; erase.callback = erase_callback; erase.priv = (u_long) &wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); /* Unlock sector blocks */ if (nvram_mtd->unlock) nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize); if ((ret = MTD_ERASE(nvram_mtd, &erase))) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk("nvram_commit: erase error\n"); goto done; } /* Wait for erase to finish */ schedule(); remove_wait_queue(&wait_q, &wait); } /* Write partition up to end of data area */ header->magic = NVRAM_INVALID_MAGIC; /* All ones magic */ offset = nvram_mtd->size - erasesize; i = erasesize - NVRAM_SPACE + header->len; ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf); if (ret || len != i) { printk("nvram_commit: write error\n"); ret = -EIO; goto done; } /* Now mark the NVRAM in flash as "valid" by setting the correct MAGIC # */ header->magic = NVRAM_MAGIC; ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), &magic_len, (char *)&header->magic); if (ret || magic_len != sizeof(header->magic)) { printk("nvram_commit: write MAGIC error\n"); ret = -EIO; goto done; } /* * Reading a few bytes back here will put the device * back to the correct mode on certain flashes */ offset = nvram_mtd->size - erasesize; ret = MTD_READ(nvram_mtd, offset, 4, &len, buf); done: up(&nvram_sem); kfree(buf); return ret; }
static void mtdblock_request(RQFUNC_ARG) { struct request *current_request; unsigned int res = 0; struct mtd_info *mtd; while (1) { /* Grab the Request and unlink it from the request list, INIT_REQUEST will execute a return if we are done. */ INIT_REQUEST; current_request = CURRENT; if (MINOR(current_request->rq_dev) >= MAX_MTD_DEVICES) { printk("mtd: Unsupported device!\n"); end_request(0); continue; } // Grab our MTD structure mtd = __get_mtd_device(NULL, MINOR(current_request->rq_dev)); if (!mtd) { printk("MTD device %d doesn't appear to exist any more\n", CURRENT_DEV); end_request(0); } if (current_request->sector << 9 > mtd->size || (current_request->sector + current_request->current_nr_sectors) << 9 > mtd->size) { printk("mtd: Attempt to read past end of device!\n"); printk("size: %x, sector: %lx, nr_sectors %lx\n", mtd->size, current_request->sector, current_request->current_nr_sectors); end_request(0); continue; } /* Remove the request we are handling from the request list so nobody messes with it */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) /* Now drop the lock that the ll_rw_blk functions grabbed for us and process the request. This is necessary due to the extreme time we spend processing it. */ spin_unlock_irq(&io_request_lock); #endif // Handle the request switch (current_request->cmd) { size_t retlen; case READ: if (MTD_READ(mtd,current_request->sector<<9, current_request->current_nr_sectors << 9, &retlen, current_request->buffer) == 0) res = 1; else res = 0; break; case WRITE: /* printk("mtdblock_request WRITE sector=%d(%d)\n",current_request->sector, current_request->current_nr_sectors); */ // Read only device if ((mtd->flags & MTD_CAP_RAM) == 0) { res = 0; break; } // Do the write if (MTD_WRITE(mtd,current_request->sector<<9, current_request->current_nr_sectors << 9, &retlen, current_request->buffer) == 0) res = 1; else res = 0; break; // Shouldn't happen default: printk("mtd: unknown request\n"); break; } // Grab the lock and re-thread the item onto the linked list #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) spin_lock_irq(&io_request_lock); #endif end_request(res); } }
static read_write_t mtd_write(struct inode *inode,struct file *file, const char *buf, count_t count) #endif { struct mtd_info *mtd = (struct mtd_info *)file->private_data; size_t retlen; size_t total_retlen=0; int ret=0; #ifndef NO_MM int len; char *kbuf; #endif DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); if (FILE_POS == mtd->size) return -ENOSPC; if (FILE_POS + count > mtd->size) count = mtd->size - FILE_POS; if (!count) return 0; #ifdef NO_MM ret = MTD_WRITE(mtd, FILE_POS, count, &retlen, buf); if (!ret) { FILE_POS += retlen; ret = retlen; } total_retlen = ret; #else while (count) { if (count > MAX_KMALLOC_SIZE) len = MAX_KMALLOC_SIZE; else len = count; kbuf=kmalloc(len,GFP_KERNEL); if (!kbuf) { printk("kmalloc is null\n"); return -ENOMEM; } if (copy_from_user(kbuf, buf, len)) { kfree(kbuf); return -EFAULT; } ret = (*(mtd->write))(mtd, FILE_POS, len, &retlen, kbuf); if (!ret) { FILE_POS += retlen; total_retlen += retlen; count -= retlen; buf += retlen; } else { kfree(kbuf); return ret; } kfree(kbuf); } #endif return total_retlen; } /* mtd_write */
static int mtdconfig_writebytes(FAR struct mtdconfig_struct_s *dev, int offset, FAR const uint8_t *pdata, int writelen) { int ret = OK; #ifdef CONFIG_MTD_BYTE_WRITE /* Test if this MTD device supports byte write */ if (dev->mtd->write != NULL) { ret = MTD_WRITE(dev->mtd, offset, writelen, pdata); } else #endif /* Perform the write using the block write method of the MTD */ { uint16_t block; uint16_t index; off_t bytes_this_block; off_t bytes_written = 0; while (writelen) { /* Read existing data from the the block into the buffer */ block = offset / dev->blocksize; ret = MTD_BREAD(dev->mtd, block, 1, dev->buffer); if (ret != 1) { ret = -EIO; goto errout; } index = offset - block * dev->blocksize; bytes_this_block = dev->blocksize - index; if (bytes_this_block > writelen) { bytes_this_block = writelen; } /* Now write data to the block */ memcpy(&dev->buffer[index], pdata, bytes_this_block); ret = MTD_BWRITE(dev->mtd, block, 1, dev->buffer); if (ret != 1) { ret = -EIO; goto errout; } /* Update writelen, etc. */ writelen -= bytes_this_block; pdata += bytes_this_block; offset += bytes_this_block; bytes_written += bytes_this_block; } /* Return the number of bytes written */ ret = bytes_written; } errout: return ret; }
static int env_save(struct env_data *ed) { int ret; size_t retlen = 0; int mtd; int retcode = 0; struct erase_info ei; DECLARE_COMPLETION_ONSTACK(complete); #ifdef DEBUGME printk(KERN_ERR\ "********* %s called! ***********\n",\ __func__); #endif /*DEBUGME*/ /* sanity */ if (NULL == ed) { printk(KERN_ERR\ "%s: Called with invalid data!\n",\ __func__); return -1; } if (-1 == ed->mtd) { printk(KERN_ERR\ "%s: Nothing to save, no sane env present!\n",\ __func__); return -1; } /*invert them*/ mtd = ed->mtd; if (0 == mtd) { mtd = 1; } else { mtd = 0; } #ifdef DEBUGME printk(KERN_ERR\ "%s: Current env %d, preparing %d\n",\ __func__, ed->mtd, mtd); #endif /*DEBUGME*/ /* set as active */ ed->data[ENV_HEADER_SIZE-1] = active_flag; /* erase */ memset(&ei, 0, sizeof(ei)); ei.mtd = ed->mtds[mtd]; ei.addr = 0; ei.len = ed->size; ei.callback = env_erase_callback; ei.priv = (long)&complete; #ifdef DEBUGME printk(KERN_ERR\ "%s: About to call erase on MTD %d!\n",\ __func__, mtd); #endif /*DEBUGME*/ ret = ed->mtds[mtd]->erase(ed->mtds[mtd], &ei); if (ret) { printk(KERN_ERR\ "%s: Call to erase failed!\n",\ __func__); retcode = -EIO; goto bail; } wait_for_completion(&complete); if (ei.state != MTD_ERASE_DONE) { printk(KERN_ERR\ "%s: Failed to wait for erase completion!\n",\ __func__); retcode = -EIO; goto bail; } #ifdef DEBUGME printk(KERN_ERR\ "%s: About to call write on MTD %d!\n",\ __func__, mtd); #endif /*DEBUGME*/ /* write */ MTD_WRITE(ed->mtds[mtd], 0, ed->size, &retlen, ed->data); if (ed->size != retlen) { printk(KERN_ERR\ "%s: Failed to write the new data!\n",\ __func__); retcode = -EIO; goto bail; } #ifdef DEBUGME printk(KERN_ERR\ "%s: About to call write on MTD %d for flags!\n",\ __func__, ed->mtd); #endif /*DEBUGME*/ /* write obsolete on current partition */ MTD_WRITE(ed->mtds[ed->mtd], ENV_HEADER_SIZE - 1, 1, &retlen, &obsolete_flag); if (1 != retlen) { printk(KERN_ERR\ "%s: Failed to write the data flag!\n",\ __func__); retcode = -EIO; } bail: /* tear down ed*/ ed->mtd = -1; kfree(ed->data); ed->data = NULL; #ifdef DEBUGME printk(KERN_ERR\ "%s: About to put the mdt's down!\n",\ __func__); #endif /*DEBUGME*/ put_mtd_device(ed->mtds[0]); ed->mtds[0] = NULL; put_mtd_device(ed->mtds[1]); ed->mtds[1] = NULL; #ifdef DEBUGME printk(KERN_ERR\ "%s: About to exit!\n",\ __func__); #endif /*DEBUGME*/ return retcode; }