static void end_swap_bio_write(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;
	static unsigned long swap_error_rs_time;

	if (!uptodate) {
		SetPageError(page);
		/*
		 * We failed to write the page out to swap-space.
		 * Re-dirty the page in order to avoid it being reclaimed.
		 * Also print a dire warning that things will go BAD (tm)
		 * very quickly.
		 *
		 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
		 */
		set_page_dirty(page);
		if (printk_timed_ratelimit(&swap_error_rs_time,
					   SWAP_ERROR_LOG_RATE_MS))
			printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
		ClearPageReclaim(page);
	}
	end_page_writeback(page);
	bio_put(bio);
}
Пример #2
0
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
	int unaligned_aio = 0;
	int ret;

	trace_ext4_file_write(iocb->ki_filp->f_path.dentry, iocb->ki_left);
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		size_t length = iov_length(iov, nr_segs);

		if ((pos > sbi->s_bitmap_maxbytes ||
		    (pos == sbi->s_bitmap_maxbytes && length > 0)))
			return -EFBIG;

		if (pos + length > sbi->s_bitmap_maxbytes) {
			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
					      sbi->s_bitmap_maxbytes - pos);
		}
	} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
		   !is_sync_kiocb(iocb))) {
		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
	}

	/* Unaligned direct AIO must be serialized; see comment above */
	if (unaligned_aio) {
		static unsigned long unaligned_warn_time;

		/* Warn about this once per day */
		if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
			ext4_msg(inode->i_sb, KERN_WARNING,
				 "Unaligned AIO/DIO on inode %ld by %s; "
				 "performance will be poor.",
				 inode->i_ino, current->comm);
		mutex_lock(ext4_aio_mutex(inode));
		ext4_aiodio_wait(inode);
	}

	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);

	if (unaligned_aio)
		mutex_unlock(ext4_aio_mutex(inode));

	trace_file_write_done(iocb->ki_filp);
	return ret;
}
Пример #3
0
static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long nr_segs, loff_t pos)
{
    struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
    int unaligned_aio = 0;
    int ret;

    trace_ext4_file_write(iocb->ki_filp->f_path.dentry, iocb->ki_left);

    if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        size_t length = iov_length(iov, nr_segs);

        if ((pos > sbi->s_bitmap_maxbytes ||
                (pos == sbi->s_bitmap_maxbytes && length > 0)))
            return -EFBIG;

        if (pos + length > sbi->s_bitmap_maxbytes) {
            nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
                                  sbi->s_bitmap_maxbytes - pos);
        }
    } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
                        !is_sync_kiocb(iocb))) {
        unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
    }


    if (unaligned_aio) {
        static unsigned long unaligned_warn_time;


        if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
            ext4_msg(inode->i_sb, KERN_WARNING,
                     "Unaligned AIO/DIO on inode %ld by %s; "
                     "performance will be poor.",
                     inode->i_ino, current->comm);
        mutex_lock(ext4_aio_mutex(inode));
        ext4_aiodio_wait(inode);
    }

    ret = generic_file_aio_write(iocb, iov, nr_segs, pos);

    if (unaligned_aio)
        mutex_unlock(ext4_aio_mutex(inode));

    return ret;
}
/**
 * modem_m6718_spi_send() - send a frame using the IPC protocol
 * @modem_spi_dev: pointer to modem driver information structure
 * @channel:       L2 channel to send on
 * @len:           length of data to send
 * @data:          pointer to buffer containing data
 *
 * Check that the requested channel is supported and open, queue a frame
 * containing the data on the appropriate link and ensure the state machine
 * is running to start the transfer.
 */
int modem_m6718_spi_send(struct modem_spi_dev *modem_spi_dev, u8 channel,
	u32 len, void *data)
{
	int err;
	struct ipc_link_context *context;

	if (!channels[channel].open) {
		dev_err(modem_spi_dev->dev,
			"error: invalid channel (%d), discarding frame\n",
			channel);
		return -EINVAL;
	}

	context = &l1_context.device_context[channels[channel].link];
	if (context->state == NULL || context->state->id == IPC_SM_HALT) {
		static unsigned long linkfail_warn_time;
		if (printk_timed_ratelimit(&linkfail_warn_time, 60 * 1000))
			dev_err(modem_spi_dev->dev,
				"error: link %d for ch %d is not available, "
				"discarding frames\n",
				channels[channel].link, channel);
		return -ENODEV;
	}

	err = ipc_queue_push_frame(context, channel, len, data);
	if (err < 0)
		return err;

	if (ipc_util_link_is_idle(context)) {
		dev_dbg(modem_spi_dev->dev,
			"link %d is idle, kicking\n", channels[channel].link);
		ipc_sm_kick(IPC_SM_RUN_TX_REQ, context);
	} else {
		dev_dbg(modem_spi_dev->dev,
			"link %d is already running\n", channels[channel].link);
	}
	return 0;
}
/* Ratelimit attempts to initialise FDMA */
static int init_fdma_nand_ratelimit(struct stm_nand_emi *data)
{
	if (printk_timed_ratelimit(&data->init_fdma_jiffies,  500))
		return init_fdma_nand(data);
	return -EBUSY;
}