static release_t mtdblock_release(struct inode *inode, struct file *file) { int dev; struct mtd_info *mtd; DEBUG(1, "mtdblock_release\n"); if (inode == NULL) release_return(-ENODEV); dev = MINOR(inode->i_rdev); mtd = __get_mtd_device(NULL, dev); if (!mtd) { printk(KERN_WARNING "MTD device is absent on mtd_release!\n"); BLK_DEC_USE_COUNT; release_return(-ENODEV); } if (mtd->sync) mtd->sync(mtd); put_mtd_device(mtd); DEBUG(1, "ok\n"); BLK_DEC_USE_COUNT; release_return(0); }
static int mtdblock_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct mtd_info *mtd; mtd = __get_mtd_device(NULL, MINOR(inode->i_rdev)); if (!mtd) return -EINVAL; switch (cmd) { case BLKGETSIZE: /* Return device size */ return put_user((mtd->size >> 9), (unsigned long *) arg); #ifdef BLKGETSIZE64 case BLKGETSIZE64: return put_user((u64)mtd->size, (u64 *)arg); #endif case BLKFLSBUF: #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) if(!capable(CAP_SYS_ADMIN)) return -EACCES; #endif fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); if (mtd->sync) mtd->sync(mtd); return 0; default: return -ENOTTY; } }
static int mtdblock_romptr(kdev_t dev, struct vm_area_struct * vma) { struct mtd_info *mtd; u_char *ptr; size_t len; mtd = __get_mtd_device(NULL, MINOR(dev)); if (!mtd->point) return -ENOSYS; /* Can't do it, No function to point to correct addr */ if ((*mtd->point)(mtd,vma->vm_offset,vma->vm_end-vma->vm_start,&len,&ptr) != 0) return -ENOSYS; vma->vm_start = (unsigned long) ptr; vma->vm_end = vma->vm_start + len; return 0; }
static void mtdblock_request(RQFUNC_ARG) { struct request *current_request; unsigned int res = 0; struct mtd_info *mtd; while (1) { /* Grab the Request and unlink it from the request list, INIT_REQUEST will execute a return if we are done. */ INIT_REQUEST; current_request = CURRENT; if (MINOR(current_request->rq_dev) >= MAX_MTD_DEVICES) { printk("mtd: Unsupported device!\n"); end_request(0); continue; } // Grab our MTD structure mtd = __get_mtd_device(NULL, MINOR(current_request->rq_dev)); if (!mtd) { printk("MTD device %d doesn't appear to exist any more\n", CURRENT_DEV); end_request(0); } if (current_request->sector << 9 > mtd->size || (current_request->sector + current_request->current_nr_sectors) << 9 > mtd->size) { printk("mtd: Attempt to read past end of device!\n"); printk("size: %x, sector: %lx, nr_sectors %lx\n", mtd->size, current_request->sector, current_request->current_nr_sectors); end_request(0); continue; } /* Remove the request we are handling from the request list so nobody messes with it */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) /* Now drop the lock that the ll_rw_blk functions grabbed for us and process the request. This is necessary due to the extreme time we spend processing it. */ spin_unlock_irq(&io_request_lock); #endif // Handle the request switch (current_request->cmd) { size_t retlen; case READ: if (MTD_READ(mtd,current_request->sector<<9, current_request->current_nr_sectors << 9, &retlen, current_request->buffer) == 0) res = 1; else res = 0; break; case WRITE: /* printk("mtdblock_request WRITE sector=%d(%d)\n",current_request->sector, current_request->current_nr_sectors); */ // Read only device if ((mtd->flags & MTD_CAP_RAM) == 0) { res = 0; break; } // Do the write if (MTD_WRITE(mtd,current_request->sector<<9, current_request->current_nr_sectors << 9, &retlen, current_request->buffer) == 0) res = 1; else res = 0; break; // Shouldn't happen default: printk("mtd: unknown request\n"); break; } // Grab the lock and re-thread the item onto the linked list #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) spin_lock_irq(&io_request_lock); #endif end_request(res); } }