Пример #1
0
/*
 * Handle actual transfers of data.
 */
static int sbullr_transfer (Sbull_Dev *dev, char *buf, size_t count,
                loff_t *offset, int rw)
{
    struct kiobuf *iobuf;       
    int result;
    
    /* Only block alignment and size allowed */
    if ((*offset & SBULLR_SECTOR_MASK) || (count & SBULLR_SECTOR_MASK))
        return -EINVAL;
    if ((unsigned long) buf & SBULLR_SECTOR_MASK)
        return -EINVAL;

    /* Allocate an I/O vector */
    result = alloc_kiovec(1, &iobuf);
    if (result)
        return result;

    /* Map the user I/O buffer and do the I/O. */
    result = map_user_kiobuf(rw, iobuf, (unsigned long) buf, count);
    if (result) {
        free_kiovec(1, &iobuf);
        return result;
    }
    spin_lock(&dev->lock);
    result = sbullr_rw_iovec(dev, iobuf, rw, *offset >> SBULLR_SECTOR_SHIFT,
                    count >> SBULLR_SECTOR_SHIFT);
    spin_unlock(&dev->lock);

    /* Clean up and return. */
    unmap_kiobuf(iobuf);
    free_kiovec(1, &iobuf);
    if (result > 0)
        *offset += result << SBULLR_SECTOR_SHIFT;
    return result << SBULLR_SECTOR_SHIFT;
}
Пример #2
0
static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, struct file *f)
{
	struct inode *inode;
	static LIST_HEAD(kill_list);
	int error;

	f->f_flags = flags;
	f->f_mode = (flags+1) & O_ACCMODE;
	inode = dentry->d_inode;
	if (f->f_mode & FMODE_WRITE) {
		error = get_write_access(inode);
		if (error)
			goto cleanup_file;
	}

	f->f_dentry = dentry;
	f->f_vfsmnt = mnt;
	f->f_pos = 0;
	f->f_reada = 0;
	f->f_op = fops_get(inode->i_fop);
	file_move(f, &inode->i_sb->s_files);

	/* preallocate kiobuf for O_DIRECT */
	f->f_iobuf = NULL;
	f->f_iobuf_lock = 0;
	if (f->f_flags & O_DIRECT) {
		error = alloc_kiovec(1, &f->f_iobuf);
		if (error)
			goto cleanup_all;
	}

	if (f->f_op && f->f_op->open) {
		error = f->f_op->open(inode,f);
		if (error)
			goto cleanup_all;
	}
	f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);

	/* NB: we're sure to have correct a_ops only after f_op->open */
	if (f->f_flags & O_DIRECT) {
		if (!inode->i_mapping || !inode->i_mapping->a_ops ||
		    !(inode->i_mapping->a_ops->direct_IO ||
		      inode->i_mapping->a_ops->direct_sector_IO)) {
			fput(f);
			f = ERR_PTR(-EINVAL);
		}
	}

	return f;

cleanup_all:
	if (f->f_iobuf)
		free_kiovec(1, &f->f_iobuf);
	fops_put(f->f_op);
	if (f->f_mode & FMODE_WRITE)
		put_write_access(inode);
	file_move(f, &kill_list); /* out of the way.. */
	f->f_dentry = NULL;
	f->f_vfsmnt = NULL;
cleanup_file:
	put_filp(f);
	dput(dentry);
	mntput(mnt);
	return ERR_PTR(error);
}
Пример #3
0
/* This is the kernel thread that empties the write queue to disk */
static int write_queue_task(void *data)
{
  int err;
  struct task_struct *tsk = current;
  struct kiobuf *iobuf;

  DECLARE_WAITQUEUE(wait, tsk);
  DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid);
  daemonize();
  strcpy(tsk->comm, "blkmtdd");
  tsk->tty = NULL;
  spin_lock_irq(&tsk->sigmask_lock);
  sigfillset(&tsk->blocked);
  recalc_sigpending(tsk);
  spin_unlock_irq(&tsk->sigmask_lock);
  exit_sighand(tsk);

  if(alloc_kiovec(1, &iobuf))
    return 0;
  DEBUG(2, "blkmtd: writetask: entering main loop\n");
  add_wait_queue(&thr_wq, &wait);

  while(1) {
    spin_lock(&mbd_writeq_lock);

    if(!write_queue_cnt) {
      /* If nothing in the queue, wake up anyone wanting to know when there
	 is space in the queue then sleep for 2*HZ */
      spin_unlock(&mbd_writeq_lock);
      DEBUG(3, "blkmtd: writetask: queue empty\n");
      if(waitqueue_active(&mtbd_sync_wq))
	 wake_up(&mtbd_sync_wq);
      interruptible_sleep_on_timeout(&thr_wq, 2*HZ);
      DEBUG(3, "blkmtd: writetask: woken up\n");
      if(write_task_finish)
	break;
    } else {
      /* we have stuff to write */
      mtdblkdev_write_queue_t *item = &write_queue[write_queue_tail];
      struct page **pages = item->pages;
      int pagecnt = item->pagecnt;
      int pagenr = item->pagenr;
      int i;
      int max_sectors = KIO_MAX_SECTORS >> (item->rawdevice->sector_bits - 9);
      kdev_t dev = to_kdev_t(item->rawdevice->binding->bd_dev);
  

      DEBUG(3, "blkmtd: writetask: got %d queue items\n", write_queue_cnt);
      set_current_state(TASK_RUNNING);
      spin_unlock(&mbd_writeq_lock);

      DEBUG(2, "blkmtd: write_task: writing pagenr = %d pagecnt = %d", 
	    item->pagenr, item->pagecnt);

      iobuf->offset = 0;
      iobuf->locked = 1;

      /* Loop through all the pages to be written in the queue item, remembering
	 we can only write KIO_MAX_SECTORS at a time */
	 
      while(pagecnt) {
	int sectornr = pagenr << (PAGE_SHIFT - item->rawdevice->sector_bits);
	int sectorcnt = pagecnt << (PAGE_SHIFT - item->rawdevice->sector_bits);
	int cursectors = (sectorcnt < max_sectors) ? sectorcnt : max_sectors;
	int cpagecnt = (cursectors << item->rawdevice->sector_bits) + PAGE_SIZE-1;
	cpagecnt >>= PAGE_SHIFT;
	
	for(i = 0; i < cpagecnt; i++) 
	    iobuf->maplist[i] = *(pages++);
	
	for(i = 0; i < cursectors; i++) {
	  iobuf->blocks[i] = sectornr++;
	}
	
	iobuf->nr_pages = cpagecnt;
	iobuf->length = cursectors << item->rawdevice->sector_bits;
	DEBUG(3, "blkmtd: write_task: about to kiovec\n");
	err = brw_kiovec(WRITE, 1, &iobuf, dev, iobuf->blocks, item->rawdevice->sector_size);
	DEBUG(3, "bklmtd: write_task: done, err = %d\n", err);
	if(err != (cursectors << item->rawdevice->sector_bits)) {
	  /* if an error occured - set this to exit the loop */
	  pagecnt = 0;
	} else {
	  pagenr += cpagecnt;
	  pagecnt -= cpagecnt;
	}
      }

      /* free up the pages used in the write and list of pages used in the write
	 queue item */
      iobuf->locked = 0;
      spin_lock(&mbd_writeq_lock);
      write_queue_cnt--;
      write_queue_tail++;
      write_queue_tail %= WRITE_QUEUE_SZ;
      for(i = 0 ; i < item->pagecnt; i++) {
	UnlockPage(item->pages[i]);
	__free_pages(item->pages[i], 0);
      }
      kfree(item->pages);
      item->pages = NULL;
      spin_unlock(&mbd_writeq_lock);
      /* Tell others there is some space in the write queue */
      if(waitqueue_active(&mtbd_sync_wq))
	wake_up(&mtbd_sync_wq);
    }
  }
  remove_wait_queue(&thr_wq, &wait);
  DEBUG(1, "blkmtd: writetask: exiting\n");
  free_kiovec(1, &iobuf);
  /* Tell people we have exitd */
  up(&thread_sem);
  return 0;
}
Пример #4
0
struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
{
    struct file * f;
    struct inode *inode;
    static LIST_HEAD(kill_list);
    int error;

    error = -ENFILE;
    f = get_empty_filp();
    if (!f)
        goto cleanup_dentry;
    f->f_flags = flags;
    f->f_mode = (flags+1) & O_ACCMODE;
    inode = dentry->d_inode;
    if (f->f_mode & FMODE_WRITE) {
        error = get_write_access(inode);
        if (error)
            goto cleanup_file;
    }

    f->f_dentry = dentry;
    f->f_vfsmnt = mnt;
    f->f_pos = 0;
    f->f_reada = 0;
    f->f_op = fops_get(inode->i_fop);
    file_move(f, &inode->i_sb->s_files);

    /* preallocate kiobuf for O_DIRECT */
    f->f_iobuf = NULL;
    f->f_iobuf_lock = 0;
    if (f->f_flags & O_DIRECT) {
        error = alloc_kiovec(1, &f->f_iobuf);
        if (error)
            goto cleanup_all;
    }

    if (f->f_op && f->f_op->open) {
        error = f->f_op->open(inode,f);
        if (error)
            goto cleanup_all;
    }
    f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);

    return f;

cleanup_all:
    if (f->f_iobuf)
        free_kiovec(1, &f->f_iobuf);
    fops_put(f->f_op);
    if (f->f_mode & FMODE_WRITE)
        put_write_access(inode);
    file_move(f, &kill_list); /* out of the way.. */
    f->f_dentry = NULL;
    f->f_vfsmnt = NULL;
cleanup_file:
    put_filp(f);
cleanup_dentry:
    dput(dentry);
    mntput(mnt);
    return ERR_PTR(error);
}
Пример #5
0
/* readpage() - reads one page from the block device */                 
static int blkmtd_readpage(struct file *file, struct page *page)
{  
  int err;
  int sectornr, sectors, i;
  struct kiobuf *iobuf;
  mtd_raw_dev_data_t *rawdevice = (mtd_raw_dev_data_t *)file->private_data;
  kdev_t dev;

  if(!rawdevice) {
    printk("blkmtd: readpage: PANIC file->private_data == NULL\n");
    return -EIO;
  }
  dev = to_kdev_t(rawdevice->binding->bd_dev);

  DEBUG(2, "blkmtd: readpage called, dev = `%s' page = %p index = %ld\n",
	bdevname(dev), page, page->index);

  if(Page_Uptodate(page)) {
    DEBUG(1, "blkmtd: readpage page %ld is already upto date\n", page->index);
    UnlockPage(page);
    return 0;
  }

  ClearPageUptodate(page);
  ClearPageError(page);

  /* see if page is in the outgoing write queue */
  spin_lock(&mbd_writeq_lock);
  if(write_queue_cnt) {
    int i = write_queue_tail;
    while(i != write_queue_head) {
      mtdblkdev_write_queue_t *item = &write_queue[i];
      if(page->index >= item->pagenr && page->index < item->pagenr+item->pagecnt) {
	/* yes it is */
	int index = item->pagenr - page->index;
	DEBUG(1, "blkmtd: readpage: found page %ld in outgoing write queue\n",
	      page->index);
	if(item->iserase) {
	  memset(page_address(page), 0xff, PAGE_SIZE);
	} else {
	  memcpy(page_address(page), page_address(item->pages[index]), PAGE_SIZE);
	}
	SetPageUptodate(page);
	flush_dcache_page(page);
	UnlockPage(page);
	spin_unlock(&mbd_writeq_lock);
	return 0;
      }
      i++;
      i %= WRITE_QUEUE_SZ;
    }
  }
  spin_unlock(&mbd_writeq_lock);


  DEBUG(3, "blkmtd: readpage: getting kiovec\n");
  err = alloc_kiovec(1, &iobuf);
  if (err) {
    return err;
  }
  iobuf->offset = 0;
  iobuf->nr_pages = 1;
  iobuf->length = PAGE_SIZE;
  iobuf->locked = 1;
  iobuf->maplist[0] = page;
  sectornr = page->index << (PAGE_SHIFT - rawdevice->sector_bits);
  sectors = 1 << (PAGE_SHIFT - rawdevice->sector_bits);
  DEBUG(3, "blkmtd: readpage: sectornr = %d sectors = %d\n", sectornr, sectors);
  for(i = 0; i < sectors; i++) {
    iobuf->blocks[i] = sectornr++;
  }

  DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n");
  err = brw_kiovec(READ, 1, &iobuf, dev, iobuf->blocks, rawdevice->sector_size);
  DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err);
  iobuf->locked = 0;
  free_kiovec(1, &iobuf);
  if(err != PAGE_SIZE) {
    printk("blkmtd: readpage: error reading page %ld\n", page->index);
    memset(page_address(page), 0, PAGE_SIZE);
    SetPageError(page);
    err = -EIO;
  } else {
    DEBUG(3, "blkmtd: readpage: setting page upto date\n");
    SetPageUptodate(page);
    err = 0;
  }
  flush_dcache_page(page);
  UnlockPage(page);
  DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err);
  return 0;
}