示例#1
0
static int wrapfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int err = -EIO;
	struct inode *inode;
	struct inode *lower_inode;
	struct page *lower_page;
	struct address_space *lower_mapping; /* lower inode mapping */
	gfp_t mask;
	char *lower_page_data = NULL;
/*#ifdef WRAPFS_CRYPTO
	char *enc_buf = NULL;
#endif*/
	wrapfs_debug_aops(
		WRAPFS_SB(page->mapping->host->i_sb)->wrapfs_debug_a_ops, "");
	wrapfs_debug("");
	BUG_ON(!PageUptodate(page));
	wrapfs_debug("");
	inode = page->mapping->host;

	/* if no lower inode, nothing to do */
	if (!inode || !WRAPFS_I(inode) || WRAPFS_I(inode)->lower_inode) {
		err = 0;
		goto out;
	}
	lower_inode = wrapfs_lower_inode(inode);
	lower_mapping = lower_inode->i_mapping;

	/*
	 * find lower page (returns a locked page)
	 *
	 * We turn off __GFP_FS while we look for or create a new lower
	 * page.  This prevents a recursion into the file system code, which
	 * under memory pressure conditions could lead to a deadlock.  This
	 * is similar to how the loop driver behaves (see loop_set_fd in
	 * drivers/block/loop.c).  If we can't find the lower page, we
	 * redirty our page and return "success" so that the VM will call us
	 * again in the (hopefully near) future.
	 */
	mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS);
	lower_page = find_or_create_page(lower_mapping, page->index, mask);
	if (!lower_page) {
		err = 0;
		set_page_dirty(page);
		goto out;
	}
	lower_page_data = (char *)kmap(lower_page);

	/* copy page data from our upper page to the lower page */
	copy_highpage(lower_page, page);
	flush_dcache_page(lower_page);
	SetPageUptodate(lower_page);
	set_page_dirty(lower_page);

/*#ifdef WRAPFS_CRYPTO
	enc_buf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
	if (enc_buf == NULL) {
		wrapfs_debug("No memory!!");
		err = -ENOMEM;
		goto out_release;
	}
	err = my_encrypt(lower_page_data, PAGE_CACHE_SIZE, enc_buf,
			PAGE_CACHE_SIZE,
			WRAPFS_SB(inode->i_sb)->key,
			WRAPFS_CRYPTO_KEY_LEN);
	if (err < 0) {
		wrapfs_debug("encrypt error!!");
		kfree(enc_buf);
		err = -EINVAL;
		goto out_release;
	}
	memcpy(lower_page_data, enc_buf, PAGE_CACHE_SIZE);
	kfree(enc_buf);
#endif*/

	/*
	 * Call lower writepage (expects locked page).  However, if we are
	 * called with wbc->for_reclaim, then the VFS/VM just wants to
	 * reclaim our page.  Therefore, we don't need to call the lower
	 * ->writepage: just copy our data to the lower page (already done
	 * above), then mark the lower page dirty and unlock it, and return
	 * success.
	 */
	/*if (wbc->for_reclaim) {
		unlock_page(lower_page);
		goto out_release;
	}*/

	BUG_ON(!lower_mapping->a_ops->writepage);
	wait_on_page_writeback(lower_page); /* prevent multiple writers */
	clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */
	err = lower_mapping->a_ops->writepage(lower_page, wbc);
	if (err < 0)
		goto out_release;

	/*
	 * Lower file systems such as ramfs and tmpfs, may return
	 * AOP_WRITEPAGE_ACTIVATE so that the VM won't try to (pointlessly)
	 * write the page again for a while.  But those lower file systems
	 * also set the page dirty bit back again.  Since we successfully
	 * copied our page data to the lower page, then the VM will come
	 * back to the lower page (directly) and try to flush it.  So we can
	 * save the VM the hassle of coming back to our page and trying to
	 * flush too.  Therefore, we don't re-dirty our own page, and we
	 * never return AOP_WRITEPAGE_ACTIVATE back to the VM (we consider
	 * this a success).
	 *
	 * We also unlock the lower page if the lower ->writepage returned
	 * AOP_WRITEPAGE_ACTIVATE.  (This "anomalous" behaviour may be
	 * addressed in future shmem/VM code.)
	 */
	if (err == AOP_WRITEPAGE_ACTIVATE) {
		err = 0;
		unlock_page(lower_page);
	}

out_release:
	kunmap(lower_page);
	/* b/c find_or_create_page increased refcnt */
	page_cache_release(lower_page);
out:
	/*
	 * We unlock our page unconditionally, because we never return
	 * AOP_WRITEPAGE_ACTIVATE.
	 */
	unlock_page(page);
	wrapfs_debug_aops(WRAPFS_SB(inode->i_sb)->wrapfs_debug_a_ops,
				"err : %d", err);
	return err;
}
static int ext4_destroy_inline_data_nolock(handle_t *handle,
					   struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_xattr_ibody_find is = {
		.s = { .not_found = 0, },
	};
	struct ext4_xattr_info i = {
		.name_index = EXT4_XATTR_INDEX_SYSTEM,
		.name = EXT4_XATTR_SYSTEM_DATA,
		.value = NULL,
		.value_len = 0,
	};
	int error;

	if (!ei->i_inline_off)
		return 0;

	error = ext4_get_inode_loc(inode, &is.iloc);
	if (error)
		return error;

	error = ext4_xattr_ibody_find(inode, &i, &is);
	if (error)
		goto out;

	error = ext4_journal_get_write_access(handle, is.iloc.bh);
	if (error)
		goto out;

	error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
	if (error)
		goto out;

	memset((void *)ext4_raw_inode(&is.iloc)->i_block,
		0, EXT4_MIN_INLINE_DATA_SIZE);

	if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
				      EXT4_FEATURE_INCOMPAT_EXTENTS)) {
		if (S_ISDIR(inode->i_mode) ||
		    S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
			ext4_ext_tree_init(handle, inode);
		}
	}
	ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);

	get_bh(is.iloc.bh);
	error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);

	EXT4_I(inode)->i_inline_off = 0;
	EXT4_I(inode)->i_inline_size = 0;
	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
out:
	brelse(is.iloc.bh);
	if (error == -ENODATA)
		error = 0;
	return error;
}

static int ext4_read_inline_page(struct inode *inode, struct page *page)
{
	void *kaddr;
	int ret = 0;
	size_t len;
	struct ext4_iloc iloc;

	BUG_ON(!PageLocked(page));
	BUG_ON(!ext4_has_inline_data(inode));
	BUG_ON(page->index);

	if (!EXT4_I(inode)->i_inline_off) {
		ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
			     inode->i_ino);
		goto out;
	}

	ret = ext4_get_inode_loc(inode, &iloc);
	if (ret)
		goto out;

	len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
	kaddr = kmap_atomic(page);
	ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
	flush_dcache_page(page);
	kunmap_atomic(kaddr);
	zero_user_segment(page, len, PAGE_CACHE_SIZE);
	SetPageUptodate(page);
	brelse(iloc.bh);

out:
	return ret;
}
示例#3
0
static void
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
			  unsigned int result,
			  struct msm_dmov_errdata *err)
{
	struct msmsdcc_dma_data	*dma_data =
		container_of(cmd, struct msmsdcc_dma_data, hdr);
	struct msmsdcc_host	*host = dma_data->host;
	unsigned long		flags;
	struct mmc_request	*mrq;

	spin_lock_irqsave(&host->lock, flags);
	mrq = host->curr.mrq;
	BUG_ON(!mrq);

	if (!(result & DMOV_RSLT_VALID)) {
		printk(KERN_ERR "msmsdcc: Invalid DataMover result\n");
		goto out;
	}

	if (result & DMOV_RSLT_DONE) {
		host->curr.data_xfered = host->curr.xfer_size;
	} else {
		/* Error or flush  */
		if (result & DMOV_RSLT_ERROR)
			printk(KERN_ERR "%s: DMA error (0x%.8x)\n",
			       mmc_hostname(host->mmc), result);
		if (result & DMOV_RSLT_FLUSH)
			printk(KERN_ERR "%s: DMA channel flushed (0x%.8x)\n",
			       mmc_hostname(host->mmc), result);
		if (err)
			printk(KERN_ERR
			       "Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
			       err->flush[0], err->flush[1], err->flush[2],
			       err->flush[3], err->flush[4], err->flush[5]);
		if (!mrq->data->error)
			mrq->data->error = -EIO;
	}
	host->dma.busy = 0;
	dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
		     host->dma.dir);

	if (host->curr.user_pages) {
		struct scatterlist *sg = host->dma.sg;
		int i;

		for (i = 0; i < host->dma.num_ents; i++, sg++)
			flush_dcache_page(sg_page(sg));
	}

	host->dma.sg = NULL;

	if ((host->curr.got_dataend && host->curr.got_datablkend)
             || mrq->data->error) {

		/*
		 * If we've already gotten our DATAEND / DATABLKEND
		 * for this request, then complete it through here.
		 */
		msmsdcc_stop_data(host);

		if (!mrq->data->error)
			host->curr.data_xfered = host->curr.xfer_size;
		if (!mrq->data->stop || mrq->cmd->error) {
			writel(0, host->base + MMCICOMMAND);
			host->curr.mrq = NULL;
			host->curr.cmd = NULL;
			mrq->data->bytes_xfered = host->curr.data_xfered;

			spin_unlock_irqrestore(&host->lock, flags);
			mmc_request_done(host->mmc, mrq);
			return;
		} else
			msmsdcc_start_command(host, mrq->data->stop, 0);
	}

out:
	spin_unlock_irqrestore(&host->lock, flags);
	return;
}
int isp_process_mem_data(struct isp_mem_data *data)
{
	int i;
	int ret = -1;
	struct isp_mem_data *ppreview_user = \
		(struct isp_mem_data *)data;
	struct isp_mem_data preview_param;
	u32 input_buffer_size, output_buffer_size;
	u32 input_nr_pages, output_nr_pages;
	struct page **input_pages = NULL;
	struct page **output_pages = NULL;
	unsigned long isp_addr_in = 0;
	unsigned long  isp_addr_out = 0;
	unsigned long  isp_addr_tmp = 0;
	unsigned long timeout;
	struct isp_mem_resize_data resizer_param;
	u16 cropadjust = 0;

	if (ppreview_user == NULL) {
		printk(KERN_ERR "ISP_PROC_ERR: Invalid user data!\n");
		return -EINVAL;
	}

	memcpy(&preview_param, ppreview_user, \
		sizeof(struct isp_mem_data));

	DPRINTK_ISPPROC("input(%d-%d) - output(%d-%d)\n",
		preview_param.input_width,
		preview_param.input_height,
		preview_param.output_width,
		preview_param.output_height);

	DPRINTK_ISPPROC("start(%d-%d) - end(%d-%d)\n",
		preview_param.left,
		preview_param.top,
		preview_param.crop_width,
		preview_param.crop_height);

	if (ppreview_user->datain == 0 || ppreview_user->dataout == 0)
		return -EINVAL;

	isppreview_enable(0);
	ispresizer_enable(0);
	timeout = jiffies + msecs_to_jiffies(200);
	while (isppreview_busy() ||
			ispresizer_busy()) {
		if (time_after(jiffies, timeout))
			return -EINVAL;
		msleep(1);
	}

	isppreview_save_context();
	ispresizer_save_context();
	isppreview_free();
	ispresizer_free();
	isppreview_request();
	ispresizer_request();

	/* set data path before configuring modules. */
	isppreview_update_datapath(PRV_RAW_MEM, PREVIEW_MEM);
	ispresizer_config_datapath(RSZ_MEM_YUV, 0);

	ret = isppreview_try_size(preview_param.input_width,
		preview_param.input_height,
		&preview_param.output_width,
		&preview_param.output_height);
	if (ret < 0)
		goto exit_cleanup;
	ret = isppreview_config_size(preview_param.input_width,
		preview_param.input_height,
		preview_param.output_width,
		preview_param.output_height);
	if (ret < 0)
		goto exit_cleanup;

	input_buffer_size = ALIGN_TO(ppreview_user->input_width* \
		ppreview_user->input_height*2 , 0x100);
	input_pages = map_user_memory_to_kernel(preview_param.datain,
		input_buffer_size, &input_nr_pages);
	if (input_pages == NULL) {
		ret = -EINVAL;
		printk(KERN_ERR "ISP_PROC_ERR: memory allocation failed\n");
		goto exit_cleanup;
	}

	output_buffer_size = ALIGN_TO(ppreview_user->output_width* \
		ppreview_user->output_height*2, 0x1000);
	output_pages = map_user_memory_to_kernel(preview_param.dataout,
		output_buffer_size, &output_nr_pages);
	if (output_pages == NULL) {
		ret = -EINVAL;
		printk(KERN_ERR "ISP_PROC_ERR: memory allocation failed\n");
		goto exit_cleanup;
	}
	for (i = 0; i < output_nr_pages; ++i)
		flush_dcache_page(output_pages[i]);

	isp_addr_in = ispmmu_vmap_pages(input_pages, input_nr_pages);
	if (IS_ERR((void *)isp_addr_in)) {
		isp_addr_in = 0;
		ret = -EINVAL;
		printk(KERN_ERR "ISP_PROC_ERR: isp mmu map failed\n");
		goto exit_cleanup;
	}
	isp_addr_out = ispmmu_vmap_pages(output_pages, output_nr_pages);
	if (IS_ERR((void *)isp_addr_out)) {
		isp_addr_out = 0;
		ret = -EINVAL;
		printk(KERN_ERR "ISP_PROC_ERR: isp mmu map failed\n");
		goto exit_cleanup;
	}

	/* This buffer must be allocated and mapped to
		the ISP MMU previously. */
	isp_addr_tmp = isp_tmp_buf_addr();
	if (isp_addr_tmp == 0) {
		printk(KERN_ERR "ISP_PROC_ERR: Invalid isp tmp buffer address!\n");
		goto exit_cleanup;
	}

	isppreview_config_inlineoffset(ppreview_user->input_width * 2);
	isppreview_set_inaddr(isp_addr_in);
	isppreview_set_outaddr(isp_addr_tmp);

	resizer_param.input_width = preview_param.output_width;
	resizer_param.input_height = preview_param.output_height;
	resizer_param.output_width = ppreview_user->output_width;
	resizer_param.output_height = ppreview_user->output_height;

	if ((preview_param.left == 0) && (preview_param.top == 0)) {
		ret = ispresizer_try_size(&resizer_param.input_width,
				&resizer_param.input_height,
				&resizer_param.output_width,
				&resizer_param.output_height);
		if (ret < 0)
			goto exit_cleanup;
		ret = ispresizer_config_size(resizer_param.input_width,
				resizer_param.input_height,
				resizer_param.output_width,
				resizer_param.output_height);
		if (ret < 0)
			goto exit_cleanup;
		ispresizer_set_inaddr(isp_addr_tmp);
	} else {
		ispresizer_trycrop(preview_param.left,
				preview_param.top,
				preview_param.crop_width,
				preview_param.crop_height,
				resizer_param.output_width,
				resizer_param.output_height);

		ispresizer_applycrop();

		/* account for pixel loss when using crop*/
		if ((preview_param.input_height > preview_param.output_height)
				&& (preview_param.top > 16))
			cropadjust = 8;
		else
			cropadjust = 0;

		/* pixel alignment in 32bit space, vertical must be 0 per TRM */
		isp_reg_writel(((preview_param.left%16) <<
					ISPRSZ_IN_START_HORZ_ST_SHIFT) |
					(0 <<
					ISPRSZ_IN_START_VERT_ST_SHIFT),
					OMAP3_ISP_IOMEM_RESZ,
					ISPRSZ_IN_START);

		/* Align input address for cropping, per TRM  */
		ispresizer_set_inaddr(isp_addr_tmp -
				(resizer_param.input_width*2*cropadjust) +
				(preview_param.top*resizer_param.input_width*2)
				+ ((preview_param.left/16)*32));
	}

	ispresizer_set_outaddr(isp_addr_out);
	ispresizer_config_inlineoffset(
		ALIGN_TO(resizer_param.input_width*2, 32));

	if (isp_set_callback(CBK_PREV_DONE, prv_isr,
			(void *) NULL, (void *)NULL) != 0) {
		printk(KERN_ERR "ISP_PROC_ERR: Error setting PRV callback.\n");
		goto exit_cleanup;
	}

	if (isp_set_callback(CBK_RESZ_DONE, rsz_isr,
			(void *) NULL, (void *)NULL) != 0) {
		printk(KERN_ERR "ISP_PROC_ERR: Error setting RSZ callback.\n");
		goto exit_cleanup;
	}

	isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
	isp_wfc.done = 0;

	/* start preview engine. */
	isppreview_enable(1);

	ret = wait_for_completion_timeout(&isp_wfc, msecs_to_jiffies(1000));
	if (!ret) {
		isppreview_enable(0);
		ispresizer_enable(0);
	}

	timeout = jiffies + msecs_to_jiffies(50);
	while (ispresizer_busy()) {
		msleep(5);
		if (time_after(jiffies, timeout)) {
			printk(KERN_ERR "ISP_RESZ_ERR: Resizer still busy");
			break;
		}
	}

	isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
	isp_unset_callback(CBK_PREV_DONE);
	isp_unset_callback(CBK_RESZ_DONE);

exit_cleanup:
	isppreview_restore_context();
	ispresizer_restore_context();

	if (isp_addr_in != 0)
		ispmmu_vunmap(isp_addr_in);
	if (isp_addr_out != 0)
		ispmmu_vunmap(isp_addr_out);
	if (input_pages != NULL) {
		unmap_user_memory_from_kernel(input_pages, input_nr_pages);
		kfree(input_pages);
	}
	if (output_pages != NULL) {
		unmap_user_memory_from_kernel(output_pages, output_nr_pages);
		kfree(output_pages);
	}

	DPRINTK_ISPPROC("exit.\n");

	return ret;
}
示例#5
0
static void
mmc_data_transfer(unsigned long h)
{
    struct asic3_mmc_host *host = (struct asic3_mmc_host *)h;
    struct mmc_data *data = host->data;
    unsigned short *buf;
    int count;
    /* unsigned long flags; */

    if(!data){
        printk(KERN_WARNING DRIVER_NAME ": Spurious Data IRQ\n");
        return;
    }

    /* local_irq_save(flags); */
    /* buf = kmap_atomic(host->sg_ptr->page, KM_BIO_SRC_IRQ); */
    buf = kmap(host->sg_ptr->page);
    buf += host->sg_ptr->offset/2 + host->sg_off/2;

    /*
     * Ensure we dont read more than one block. The chip will interrupt us
     * When the next block is available.
     */
    count = host->sg_ptr->length - host->sg_off;
    if(count > data->blksz) {
        count = data->blksz;
    }

    DBG("count: %08x, page: %p, offset: %08x flags %08x\n",
        count, host->sg_ptr->page, host->sg_off, data->flags);

    host->sg_off += count;

    /* Transfer the data */
    if(data->flags & MMC_DATA_READ) {
        while(count > 0) {
            /* Read two bytes from SD/MMC controller. */
            *buf = ASIC3_MMC_REG(host, SD_CTRL, DataPort);
            buf++;
            count -= 2;
        }
	flush_dcache_page(host->sg_ptr->page);
    } else {
        while(count > 0) {
            /* Write two bytes to SD/MMC controller. */
            ASIC3_MMC_REG(host, SD_CTRL, DataPort) = *buf;
            buf++;
            count -= 2;
        }
    }

    /* kunmap_atomic(host->sg_ptr->page, KM_BIO_SRC_IRQ); */
    kunmap(host->sg_ptr->page);
    /* local_irq_restore(flags); */
    if(host->sg_off == host->sg_ptr->length) {
        host->sg_ptr++;
        host->sg_off = 0;
        --host->sg_len;
    }

    return;
}
示例#6
0
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
    struct sock *sk;
    struct packet_opt *po;
    struct sockaddr_ll *sll;
    struct tpacket_hdr *h;
    u8 * skb_head = skb->data;
    int skb_len = skb->len;
    unsigned snaplen;
    unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
    unsigned short macoff, netoff;
    struct sk_buff *copy_skb = NULL;

    if (skb->pkt_type == PACKET_LOOPBACK)
        goto drop;

    sk = (struct sock *) pt->data;
    po = sk->protinfo.af_packet;

    if (dev->hard_header) {
        if (sk->type != SOCK_DGRAM)
            skb_push(skb, skb->data - skb->mac.raw);
        else if (skb->pkt_type == PACKET_OUTGOING) {
            /* Special case: outgoing packets have ll header at head */
            skb_pull(skb, skb->nh.raw - skb->data);
            if (skb->ip_summed == CHECKSUM_HW)
                status |= TP_STATUS_CSUMNOTREADY;
        }
    }

    snaplen = skb->len;

#ifdef CONFIG_FILTER
    if (sk->filter) {
        unsigned res = snaplen;
        struct sk_filter *filter;

        bh_lock_sock(sk);
        if ((filter = sk->filter) != NULL)
            res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
        bh_unlock_sock(sk);

        if (res == 0)
            goto drop_n_restore;
        if (snaplen > res)
            snaplen = res;
    }
#endif

    if (sk->type == SOCK_DGRAM) {
        macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
    } else {
        unsigned maclen = skb->nh.raw - skb->data;
        netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
        macoff = netoff - maclen;
    }

    if (macoff + snaplen > po->frame_size) {
        if (po->copy_thresh &&
                atomic_read(&sk->rmem_alloc) + skb->truesize < (unsigned)sk->rcvbuf) {
            if (skb_shared(skb)) {
                copy_skb = skb_clone(skb, GFP_ATOMIC);
            } else {
                copy_skb = skb_get(skb);
                skb_head = skb->data;
            }
            if (copy_skb)
                skb_set_owner_r(copy_skb, sk);
        }
        snaplen = po->frame_size - macoff;
        if ((int)snaplen < 0)
            snaplen = 0;
    }
    if (snaplen > skb->len-skb->data_len)
        snaplen = skb->len-skb->data_len;

    spin_lock(&sk->receive_queue.lock);
    h = po->iovec[po->head];

    if (h->tp_status)
        goto ring_is_full;
    po->head = po->head != po->iovmax ? po->head+1 : 0;
    po->stats.tp_packets++;
    if (copy_skb) {
        status |= TP_STATUS_COPY;
        __skb_queue_tail(&sk->receive_queue, copy_skb);
    }
    if (!po->stats.tp_drops)
        status &= ~TP_STATUS_LOSING;
    spin_unlock(&sk->receive_queue.lock);

    memcpy((u8*)h + macoff, skb->data, snaplen);

    h->tp_len = skb->len;
    h->tp_snaplen = snaplen;
    h->tp_mac = macoff;
    h->tp_net = netoff;
    h->tp_sec = skb->stamp.tv_sec;
    h->tp_usec = skb->stamp.tv_usec;

    sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
    sll->sll_halen = 0;
    if (dev->hard_header_parse)
        sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
    sll->sll_family = AF_PACKET;
    sll->sll_hatype = dev->type;
    sll->sll_protocol = skb->protocol;
    sll->sll_pkttype = skb->pkt_type;
    sll->sll_ifindex = dev->ifindex;

    h->tp_status = status;
    mb();

    {
        struct page *p_start, *p_end;
        u8 *h_end = (u8 *)h + macoff + snaplen - 1;

        p_start = virt_to_page(h);
        p_end = virt_to_page(h_end);
        while (p_start <= p_end) {
            flush_dcache_page(p_start);
            p_start++;
        }
    }

    sk->data_ready(sk, 0);

drop_n_restore:
    if (skb_head != skb->data && skb_shared(skb)) {
        skb->data = skb_head;
        skb->len = skb_len;
    }
drop:
    kfree_skb(skb);
    return 0;

ring_is_full:
    po->stats.tp_drops++;
    spin_unlock(&sk->receive_queue.lock);

    sk->data_ready(sk, 0);
    if (copy_skb)
        kfree_skb(copy_skb);
    goto drop_n_restore;
}
示例#7
0
文件: aops.c 项目: nemumu/linux
static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
{
    struct gfs2_inode *ip = GFS2_I(mapping->host);
    struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
    unsigned requested = 0;
    int alloc_required;
    int error = 0;
    pgoff_t index = pos >> PAGE_CACHE_SHIFT;
    unsigned from = pos & (PAGE_CACHE_SIZE - 1);
    struct page *page;

    gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
    error = gfs2_glock_nq(&ip->i_gh);
    if (unlikely(error))
        goto out_uninit;
    if (&ip->i_inode == sdp->sd_rindex) {
        error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
                                   GL_NOCACHE, &m_ip->i_gh);
        if (unlikely(error)) {
            gfs2_glock_dq(&ip->i_gh);
            goto out_uninit;
        }
    }

    alloc_required = gfs2_write_alloc_required(ip, pos, len);

    if (alloc_required || gfs2_is_jdata(ip))
        gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);

    if (alloc_required) {
        struct gfs2_alloc_parms ap = { .aflags = 0, };
        error = gfs2_quota_lock_check(ip);
        if (error)
            goto out_unlock;

        requested = data_blocks + ind_blocks;
        ap.target = requested;
        error = gfs2_inplace_reserve(ip, &ap);
        if (error)
            goto out_qunlock;
    }

    rblocks = RES_DINODE + ind_blocks;
    if (gfs2_is_jdata(ip))
        rblocks += data_blocks ? data_blocks : 1;
    if (ind_blocks || data_blocks)
        rblocks += RES_STATFS + RES_QUOTA;
    if (&ip->i_inode == sdp->sd_rindex)
        rblocks += 2 * RES_STATFS;
    if (alloc_required)
        rblocks += gfs2_rg_blocks(ip, requested);

    error = gfs2_trans_begin(sdp, rblocks,
                             PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
    if (error)
        goto out_trans_fail;

    error = -ENOMEM;
    flags |= AOP_FLAG_NOFS;
    page = grab_cache_page_write_begin(mapping, index, flags);
    *pagep = page;
    if (unlikely(!page))
        goto out_endtrans;

    if (gfs2_is_stuffed(ip)) {
        error = 0;
        if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
            error = gfs2_unstuff_dinode(ip, page);
            if (error == 0)
                goto prepare_write;
        } else if (!PageUptodate(page)) {
            error = stuffed_readpage(ip, page);
        }
        goto out;
    }

prepare_write:
    error = __block_write_begin(page, from, len, gfs2_block_map);
out:
    if (error == 0)
        return 0;

    unlock_page(page);
    page_cache_release(page);

    gfs2_trans_end(sdp);
    if (pos + len > ip->i_inode.i_size)
        gfs2_trim_blocks(&ip->i_inode);
    goto out_trans_fail;

out_endtrans:
    gfs2_trans_end(sdp);
out_trans_fail:
    if (alloc_required) {
        gfs2_inplace_release(ip);
out_qunlock:
        gfs2_quota_unlock(ip);
    }
out_unlock:
    if (&ip->i_inode == sdp->sd_rindex) {
        gfs2_glock_dq(&m_ip->i_gh);
        gfs2_holder_uninit(&m_ip->i_gh);
    }
    gfs2_glock_dq(&ip->i_gh);
out_uninit:
    gfs2_holder_uninit(&ip->i_gh);
    return error;
}

/**
 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
 * @inode: the rindex inode
 */
static void adjust_fs_space(struct inode *inode)
{
    struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
    struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
    struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
    struct buffer_head *m_bh, *l_bh;
    u64 fs_total, new_free;

    /* Total up the file system space, according to the latest rindex. */
    fs_total = gfs2_ri_total(sdp);
    if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
        return;

    spin_lock(&sdp->sd_statfs_spin);
    gfs2_statfs_change_in(m_sc, m_bh->b_data +
                          sizeof(struct gfs2_dinode));
    if (fs_total > (m_sc->sc_total + l_sc->sc_total))
        new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
    else
        new_free = 0;
    spin_unlock(&sdp->sd_statfs_spin);
    fs_warn(sdp, "File system extended by %llu blocks.\n",
            (unsigned long long)new_free);
    gfs2_statfs_change(sdp, new_free, new_free, 0);

    if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
        goto out;
    update_statfs(sdp, m_bh, l_bh);
    brelse(l_bh);
out:
    brelse(m_bh);
}

/**
 * gfs2_stuffed_write_end - Write end for stuffed files
 * @inode: The inode
 * @dibh: The buffer_head containing the on-disk inode
 * @pos: The file position
 * @len: The length of the write
 * @copied: How much was actually copied by the VFS
 * @page: The page
 *
 * This copies the data from the page into the inode block after
 * the inode data structure itself.
 *
 * Returns: errno
 */
static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
                                  loff_t pos, unsigned len, unsigned copied,
                                  struct page *page)
{
    struct gfs2_inode *ip = GFS2_I(inode);
    struct gfs2_sbd *sdp = GFS2_SB(inode);
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    u64 to = pos + copied;
    void *kaddr;
    unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);

    BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
    kaddr = kmap_atomic(page);
    memcpy(buf + pos, kaddr + pos, copied);
    memset(kaddr + pos + copied, 0, len - copied);
    flush_dcache_page(page);
    kunmap_atomic(kaddr);

    if (!PageUptodate(page))
        SetPageUptodate(page);
    unlock_page(page);
    page_cache_release(page);

    if (copied) {
        if (inode->i_size < to)
            i_size_write(inode, to);
        mark_inode_dirty(inode);
    }

    if (inode == sdp->sd_rindex) {
        adjust_fs_space(inode);
        sdp->sd_rindex_uptodate = 0;
    }

    brelse(dibh);
    gfs2_trans_end(sdp);
    if (inode == sdp->sd_rindex) {
        gfs2_glock_dq(&m_ip->i_gh);
        gfs2_holder_uninit(&m_ip->i_gh);
    }
    gfs2_glock_dq(&ip->i_gh);
    gfs2_holder_uninit(&ip->i_gh);
    return copied;
}

/**
 * gfs2_write_end
 * @file: The file to write to
 * @mapping: The address space to write to
 * @pos: The file position
 * @len: The length of the data
 * @copied:
 * @page: The page that has been written
 * @fsdata: The fsdata (unused in GFS2)
 *
 * The main write_end function for GFS2. We have a separate one for
 * stuffed files as they are slightly different, otherwise we just
 * put our locking around the VFS provided functions.
 *
 * Returns: errno
 */

static int gfs2_write_end(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
{
    struct inode *inode = page->mapping->host;
    struct gfs2_inode *ip = GFS2_I(inode);
    struct gfs2_sbd *sdp = GFS2_SB(inode);
    struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    struct buffer_head *dibh;
    unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
    unsigned int to = from + len;
    int ret;
    struct gfs2_trans *tr = current->journal_info;
    BUG_ON(!tr);

    BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);

    ret = gfs2_meta_inode_buffer(ip, &dibh);
    if (unlikely(ret)) {
        unlock_page(page);
        page_cache_release(page);
        goto failed;
    }

    if (gfs2_is_stuffed(ip))
        return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);

    if (!gfs2_is_writeback(ip))
        gfs2_page_add_databufs(ip, page, from, to);

    ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
    if (tr->tr_num_buf_new)
        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
    else
        gfs2_trans_add_meta(ip->i_gl, dibh);


    if (inode == sdp->sd_rindex) {
        adjust_fs_space(inode);
        sdp->sd_rindex_uptodate = 0;
    }

    brelse(dibh);
failed:
    gfs2_trans_end(sdp);
    gfs2_inplace_release(ip);
    if (ip->i_res->rs_qa_qd_num)
        gfs2_quota_unlock(ip);
    if (inode == sdp->sd_rindex) {
        gfs2_glock_dq(&m_ip->i_gh);
        gfs2_holder_uninit(&m_ip->i_gh);
    }
    gfs2_glock_dq(&ip->i_gh);
    gfs2_holder_uninit(&ip->i_gh);
    return ret;
}

/**
 * gfs2_set_page_dirty - Page dirtying function
 * @page: The page to dirty
 *
 * Returns: 1 if it dirtyed the page, or 0 otherwise
 */

static int gfs2_set_page_dirty(struct page *page)
{
    SetPageChecked(page);
    return __set_page_dirty_buffers(page);
}
示例#8
0
文件: regops.c 项目: jeppeter/vbox
    /* filemap_write_and_wait(inode->i_mapping); */
    if (   inode->i_mapping->nrpages
        && filemap_fdatawrite(inode->i_mapping) != -EIO)
        filemap_fdatawait(inode->i_mapping);
#endif
    rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
    if (RT_FAILURE(rc))
        LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));

    kfree(sf_r);
    sf_i->file = NULL;
    sf_i->handle = SHFL_HANDLE_NIL;
    file->private_data = NULL;
    return 0;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
# define SET_TYPE(t) *type = (t)
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
# define SET_TYPE(t)
#endif
{
    struct page *page;
    char *buf;
    loff_t off;
    uint32_t nread = PAGE_SIZE;
    int err;
    struct file *file = vma->vm_file;
    struct inode *inode = GET_F_DENTRY(file)->d_inode;
    struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
    struct sf_reg_info *sf_r = file->private_data;

    TRACE();
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    if (vmf->pgoff > vma->vm_end)
        return VM_FAULT_SIGBUS;
#else
    if (vaddr > vma->vm_end)
    {
        SET_TYPE(VM_FAULT_SIGBUS);
        return NOPAGE_SIGBUS;
    }
#endif

    /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
     * which works on virtual addresses. On Linux cannot reliably determine the
     * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
    page = alloc_page(GFP_USER);
    if (!page) {
        LogRelFunc(("failed to allocate page\n"));
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
        return VM_FAULT_OOM;
#else
        SET_TYPE(VM_FAULT_OOM);
        return NOPAGE_OOM;
#endif
    }

    buf = kmap(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    off = (vmf->pgoff << PAGE_SHIFT);
#else
    off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
#endif
    err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
    if (err)
    {
        kunmap(page);
        put_page(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
        return VM_FAULT_SIGBUS;
#else
        SET_TYPE(VM_FAULT_SIGBUS);
        return NOPAGE_SIGBUS;
#endif
    }

    BUG_ON (nread > PAGE_SIZE);
    if (!nread)
    {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
        clear_user_page(page_address(page), vmf->pgoff, page);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
        clear_user_page(page_address(page), vaddr, page);
#else
        clear_user_page(page_address(page), vaddr);
#endif
    }
    else
        memset(buf + nread, 0, PAGE_SIZE - nread);

    flush_dcache_page(page);
    kunmap(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    vmf->page = page;
    return 0;
#else
    SET_TYPE(VM_FAULT_MAJOR);
    return page;
#endif
}
示例#9
0
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_node_frag *frag = f->fraglist;
	__u32 offset = pg->index << PAGE_CACHE_SHIFT;
	__u32 end = offset + PAGE_CACHE_SIZE;
	unsigned char *pg_buf;
	int ret;

	D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	while(frag && frag->ofs + frag->size  <= offset) {
		//		D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
		frag = frag->next;
	}

	pg_buf = kmap(pg);

	/* XXX FIXME: Where a single physical node actually shows up in two
	   frags, we read it twice. Don't do that. */
	/* Now we're pointing at the first frag which overlaps our page */
	while(offset < end) {
		D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
		if (!frag || frag->ofs > offset) {
			__u32 holesize = end - offset;
			if (frag) {
				D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
				holesize = min(holesize, frag->ofs - offset);
				D1(jffs2_print_frag_list(f));
			}
			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
			memset(pg_buf, 0, holesize);
			pg_buf += holesize;
			offset += holesize;
			continue;
		} else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
			D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
				  inode->i_ino, frag->ofs, offset));
			D1(jffs2_print_frag_list(f));
			memset(pg_buf, 0, end - offset);
			ClearPageUptodate(pg);
			SetPageError(pg);
			kunmap(pg);
			return -EIO;
		} else if (!frag->node) {
			__u32 holeend = min(end, frag->ofs + frag->size);
			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
			memset(pg_buf, 0, holeend - offset);
			pg_buf += holeend - offset;
			offset = holeend;
			frag = frag->next;
			continue;
		} else {
			__u32 readlen;
			__u32 fragofs; /* offset within the frag to start reading */

			fragofs = offset - frag->ofs;
			readlen = min(frag->size - fragofs, end - offset);
			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs, 
				  fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
			ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
			D2(printk(KERN_DEBUG "node read done\n"));
			if (ret) {
				D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
				memset(pg_buf, 0, readlen);
				ClearPageUptodate(pg);
				SetPageError(pg);
				kunmap(pg);
				return ret;
			}
		
			pg_buf += readlen;
			offset += readlen;
			frag = frag->next;
			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
		}
	}
	D2(printk(KERN_DEBUG "readpage finishing\n"));
	SetPageUptodate(pg);
	ClearPageError(pg);

	flush_dcache_page(pg);

	kunmap(pg);
	D1(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
示例#10
0
/*
 * Hacked from kernel function __get_user_pages in mm/memory.c
 *
 * Handle buffers allocated by other kernel space driver and mmaped into user
 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
 *
 * Get physical pages from user space virtual address and update into page list
 */
static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
			      unsigned long start, int nr_pages,
			      unsigned int gup_flags, struct page **pages,
			      struct vm_area_struct **vmas)
{
	int i, ret;
	unsigned long vm_flags;

	if (nr_pages <= 0)
		return 0;

	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

	/*
	 * Require read or write permissions.
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
	 */
	vm_flags  = (gup_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (gup_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
	i = 0;

	do {
		struct vm_area_struct *vma;

		vma = find_vma(mm, start);
		if (!vma) {
			v4l2_err(&atomisp_dev, "find_vma failed\n");
			return i ? : -EFAULT;
		}

		if (is_vm_hugetlb_page(vma)) {
			/*
			i = follow_hugetlb_page(mm, vma, pages, vmas,
					&start, &nr_pages, i, gup_flags);
			*/
			continue;
		}

		do {
			struct page *page;
			unsigned long pfn;

			/*
			 * If we have a pending SIGKILL, don't keep faulting
			 * pages and potentially allocating memory.
			 */
			if (unlikely(fatal_signal_pending(current))) {
				v4l2_err(&atomisp_dev,
					"fatal_signal_pending in %s\n",
					__func__);
				return i ? i : -ERESTARTSYS;
			}

			ret = follow_pfn(vma, start, &pfn);
			if (ret) {
				v4l2_err(&atomisp_dev,
					"follow_pfn() failed\n");
				return i ? : -EFAULT;
			}

			page = pfn_to_page(pfn);
			if (IS_ERR(page))
				return i ? i : PTR_ERR(page);
			if (pages) {
				pages[i] = page;

				flush_anon_page(vma, page, start);
				flush_dcache_page(page);
			}
			if (vmas)
				vmas[i] = vma;
			i++;
			start += PAGE_SIZE;
			nr_pages--;
		} while (nr_pages && start < vma->vm_end);
	} while (nr_pages);
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	int rc;
#else
	struct ecryptfs_page_crypt_req *page_crypt_req;
	int rc = 0;
#endif
#if 1 // FEATURE_SDCARD_ENCRYPTION
	struct inode *ecryptfs_inode;
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
	ecryptfs_inode = page->mapping->host;
#endif

#if 1 // FEATURE_SDCARD_ENCRYPTION
	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		ecryptfs_printk(KERN_DEBUG,
				"Passing through unencrypted page\n");
		rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
			0, PAGE_CACHE_SIZE);
		if (rc) {
			ClearPageUptodate(page);
			goto out;
		}
		SetPageUptodate(page);
	} else {
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
#else
//	rc = ecryptfs_encrypt_page(page);
//	if (rc) {
//		ecryptfs_printk(KERN_WARNING, "Error encrypting "
//				"page (upper index [0x%.16lx])\n", page->index);
//		ClearPageUptodate(page);
	page_crypt_req = ecryptfs_alloc_page_crypt_req(
				page, ecryptfs_writepage_complete);
	if (unlikely(!page_crypt_req)) {
		rc = -ENOMEM;
		ecryptfs_printk(KERN_ERR,
				"Failed to allocate page crypt request "
				"for encryption\n");
#endif
		goto out;
	}
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	SetPageUptodate(page);
#else
//	SetPageUptodate(page);
	set_page_writeback(page);
	ecryptfs_encrypt_page_async(page_crypt_req);
#endif
	}
#else
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
#endif
out:
	unlock_page(page);
	return rc;
}

static void strip_xattr_flag(char *page_virt,
			     struct ecryptfs_crypt_stat *crypt_stat)
{
	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
		size_t written;

		crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
		ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
						&written);
		crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
	}
}

/**
 *   Header Extent:
 *     Octets 0-7:        Unencrypted file size (big-endian)
 *     Octets 8-15:       eCryptfs special marker
 *     Octets 16-19:      Flags
 *      Octet 16:         File format version number (between 0 and 255)
 *      Octets 17-18:     Reserved
 *      Octet 19:         Bit 1 (lsb): Reserved
 *                        Bit 2: Encrypted?
 *                        Bits 3-8: Reserved
 *     Octets 20-23:      Header extent size (big-endian)
 *     Octets 24-25:      Number of header extents at front of file
 *                        (big-endian)
 *     Octet  26:         Begin RFC 2440 authentication token packet set
 */

/**
 * ecryptfs_copy_up_encrypted_with_header
 * @page: Sort of a ``virtual'' representation of the encrypted lower
 *        file. The actual lower file does not have the metadata in
 *        the header. This is locked.
 * @crypt_stat: The eCryptfs inode's cryptographic context
 *
 * The ``view'' is the version of the file that userspace winds up
 * seeing, with the header information inserted.
 */
static int
ecryptfs_copy_up_encrypted_with_header(struct page *page,
				       struct ecryptfs_crypt_stat *crypt_stat)
{
	loff_t extent_num_in_page = 0;
	loff_t num_extents_per_page = (PAGE_CACHE_SIZE
				       / crypt_stat->extent_size);
	int rc = 0;

	while (extent_num_in_page < num_extents_per_page) {
		loff_t view_extent_num = ((((loff_t)page->index)
					   * num_extents_per_page)
					  + extent_num_in_page);
		size_t num_header_extents_at_front =
			(crypt_stat->metadata_size / crypt_stat->extent_size);

		if (view_extent_num < num_header_extents_at_front) {
			/* This is a header extent */
			char *page_virt;

			page_virt = kmap_atomic(page);
			memset(page_virt, 0, PAGE_CACHE_SIZE);
			/* TODO: Support more than one header extent */
			if (view_extent_num == 0) {
				size_t written;

				rc = ecryptfs_read_xattr_region(
					page_virt, page->mapping->host);
				strip_xattr_flag(page_virt + 16, crypt_stat);
				ecryptfs_write_header_metadata(page_virt + 20,
							       crypt_stat,
							       &written);
			}
			kunmap_atomic(page_virt);
			flush_dcache_page(page);
			if (rc) {
				printk(KERN_ERR "%s: Error reading xattr "
				       "region; rc = [%d]\n", __func__, rc);
				goto out;
			}
		} else {
			/* This is an encrypted data extent */
			loff_t lower_offset =
				((view_extent_num * crypt_stat->extent_size)
				 - crypt_stat->metadata_size);

			rc = ecryptfs_read_lower_page_segment(
				page, (lower_offset >> PAGE_CACHE_SHIFT),
				(lower_offset & ~PAGE_CACHE_MASK),
				crypt_stat->extent_size, page->mapping->host);
			if (rc) {
				printk(KERN_ERR "%s: Error attempting to read "
				       "extent at offset [%lld] in the lower "
				       "file; rc = [%d]\n", __func__,
				       lower_offset, rc);
				goto out;
			}
		}
		extent_num_in_page++;
	}
out:
	return rc;
}
示例#12
0
static int zram_read(struct zram *zram, struct bio *bio)
{
	int i;
	u32 index;
	struct bio_vec *bvec;

	if (unlikely(!zram->init_done)) {
		set_bit(BIO_UPTODATE, &bio->bi_flags);
		bio_endio(bio, 0);
		return 0;
	}

	zram_inc_stat(zram, ZRAM_STAT_NUM_READS);
	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
		int ret;
		size_t zlen;
		u32 zoffset;
		struct page *bio_page, *zpage;
		unsigned char *bio_mem, *zmem;

		bio_page = bvec->bv_page;

		if (zram_is_zero_page(zram, index)) {
			handle_zero_page(bio_page);
			continue;
		}

		zram_find_obj(zram, index, &zpage, &zoffset);

		/* Requested page is not present in compressed area */
		if (unlikely(!zpage)) {
			pr_debug("Read before write on swap device: "
				"sector=%lu, size=%u",
				(ulong)(bio->bi_sector), bio->bi_size);
			/* Do nothing */
			continue;
		}

		/* Page is stored uncompressed since it's incompressible */
		if (unlikely(!zoffset)) {
			handle_uncompressed_page(zram, bio_page, index);
			continue;
		}

		bio_mem = kmap_atomic(bio_page, KM_USER0);
		zlen = PAGE_SIZE;

		zmem = kmap_atomic(zpage, KM_USER1) + zoffset;

		ret = lzo1x_decompress_safe(zmem, xv_get_object_size(zmem),
					bio_mem, &zlen);

		kunmap_atomic(bio_mem, KM_USER0);
		kunmap_atomic(zmem, KM_USER1);

		/* This should NEVER happen - return bio error if it does! */
		if (unlikely(ret != LZO_E_OK)) {
			pr_err("Decompression failed! err=%d, page=%u\n",
				ret, index);
			goto out;
		}

		flush_dcache_page(bio_page);
		index++;
	}
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
	void *src_addr, *dst_addr;
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(dn->inode),
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
		.page = page,
		.encrypted_page = NULL,
	};
	int dirty, err;

	f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

	if (!f2fs_exist_data(dn->inode))
		goto clear_out;

	err = f2fs_reserve_block(dn, 0);
	if (err)
		return err;

	f2fs_wait_on_page_writeback(page, DATA);

	if (PageUptodate(page))
		goto no_update;

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(dn->inode_page);
	dst_addr = kmap_atomic(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	flush_dcache_page(page);
	kunmap_atomic(dst_addr);
	SetPageUptodate(page);
no_update:
	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	fio.blk_addr = dn->data_blkaddr;
	write_data_page(dn, &fio);
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
	f2fs_wait_on_page_writeback(page, DATA);
	if (dirty)
		inode_dec_dirty_pages(dn->inode);

	/* this converted inline_data should be recovered. */
	set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

	/* clear inline data and flag after data writeback */
	truncate_inline_inode(dn->inode_page, 0);
clear_out:
	stat_dec_inline_inode(dn->inode);
	f2fs_clear_inline_inode(dn->inode);
	sync_inode_page(dn);
	f2fs_put_dnode(dn);
	return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;

	page = grab_cache_page(inode->i_mapping, 0);
	if (!page)
		return -ENOMEM;

	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_dnode(&dn);
		return -EAGAIN;
	}

	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE);
	src_addr = kmap_atomic(page);
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap_atomic(src_addr);

	set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
	set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	return 0;
}
示例#14
0
/* I have followed the behavior from ecryptfs. write_begin sets up the page.
 * for writing. Following changes are made :
 * 1. If Encrypt is not enabled, then just grab the page and set it up for
 *    write_begin. It is almost similar to ecryptfs. When we seek to a position
 *    after EOF and write, then the copied bytes are adjusted accordingly and
 *    passed. For example, if the file contains 2000 bytes and if we write
 *    1000 bytes from 3000th position(by lseeking), then from contains 3000 and
 *    copied contains 1000.  So we can directly copy 1000 bytes to lower file.
 * 2. When Encrypt is enabled, three cases are possible which are commented
 *    below. We must handle zero bytes cases explicitly.
 */
int wrapfs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct page *page;
	char *page_data;
	pgoff_t index;
	int err = 0;
	struct inode *cur_inode, *lower_inode;
	unsigned int offset = 0;

#ifdef WRAPFS_CRYPTO
	/* pgoff_t is unsigned long, loff_t is long long */
	loff_t cur_inode_size;
	pgoff_t cur_inode_last_index;
	unsigned int cur_inode_end_offset;
	unsigned int zero_count;
	char *page_data_zeros;
	struct page *page_to_zeros = NULL;
	pgoff_t tempindex;
	pgoff_t tempoffset;
	pgoff_t bytes_to_write;
	struct file *lower_file = wrapfs_lower_file(file);
	char *encrypted_buf;
	mm_segment_t old_fs;
#endif

	wrapfs_debug("");
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"");

	index = pos >> PAGE_CACHE_SHIFT;
	offset = pos & (PAGE_CACHE_SIZE - 1);
	wrapfs_debug("index : %lu, offset : %d\n", index, offset);

	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page) {
		wrapfs_debug("grab_cache_page_write_begin returned NULL!!");
		err = -ENOMEM;
		goto out;
	}
	page_data = (char *)kmap(page);
	*pagep = page;

	cur_inode = file->f_path.dentry->d_inode;
	if (cur_inode)
		lower_inode = wrapfs_lower_inode(cur_inode);

#ifdef WRAPFS_CRYPTO
	/* cur_inode* refers to the file's existing attributes */
	cur_inode_size = cur_inode->i_size;
	cur_inode_last_index = cur_inode_size >> (PAGE_CACHE_SHIFT);
	cur_inode_end_offset = cur_inode_size & (PAGE_CACHE_SIZE - 1);

	wrapfs_debug(
	"cur_inode->i_size : %lu, i_size_read(page->mapping->host) : %lu\n",
	(unsigned long)cur_inode->i_size,
	(unsigned long)i_size_read(page->mapping->host));

	if (index == cur_inode_last_index) {
		/* The page to write is same as last page in file */
		wrapfs_debug("");
		if (pos > cur_inode_size) {
			/* Need to fill zeroes upto pos,
			 * from cur_inode_size */
			wrapfs_debug("");
			zero_count = pos - cur_inode_size;
			memset(page_data + cur_inode_end_offset, 0x00,
				zero_count);
		} else if (pos == cur_inode_size) {
			wrapfs_debug("");
			/* Fine. Do a normal encryption in write_end */
		} else if (pos < cur_inode_size) {
			/* Fine. Do a normal encryption in write_end */
			wrapfs_debug("");

		}
	} else if (index < cur_inode_last_index) {
		/* The page to write is an intermediate file page.
		 * No special cases need to be handled here.
		 */
		wrapfs_debug("");
	} else if (index > cur_inode_last_index) {
		/* If we skip to a page more than the last page in file.
		 * Need to fill holes between cur_inode_last_index and index.
		 * First filling hole in the new index page upto offset.
		 */
		wrapfs_debug("");
		memset(page_data, 0x00, offset);
		tempoffset = cur_inode_end_offset;
		tempindex = cur_inode_last_index;
		lower_file->f_pos = cur_inode_size;
		encrypted_buf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
		if (encrypted_buf == NULL) {
			wrapfs_debug("kmalloc failed!!");
			err = -ENOMEM;
			goto out_holes;
		}
		/* Fill zeroes in page cur_inode_last_index from cur off to end
		 * Then fill all pages from (cur_inode_last_index + 1) to index
		 * These must also be encrypted and written to lower file here
		 * itself as they are not reflected in write_end.
		 */
		while (tempindex < index) {
			page_to_zeros =
			grab_cache_page_write_begin(cur_inode->i_mapping,
							tempindex, flags);
			if (page_to_zeros == NULL) {
				wrapfs_debug("grab_cache_page failed!!");
				kfree(encrypted_buf);
				err = -ENOMEM;
				goto out_holes;
			}
			page_data_zeros = (char *)kmap(page_to_zeros);
			bytes_to_write = PAGE_CACHE_SIZE - tempoffset;
			memset(page_data_zeros + tempoffset, 0x00,
				bytes_to_write);
			err = my_encrypt(page_data_zeros, PAGE_CACHE_SIZE,
				encrypted_buf,
				PAGE_CACHE_SIZE,
				WRAPFS_SB(file->f_dentry->d_sb)->key,
				WRAPFS_CRYPTO_KEY_LEN);
			if (err < 0) {
				wrapfs_debug("Encryption failed!!");
				kfree(encrypted_buf);
				err = -EINVAL;
				goto free_pages_holes;
			}
			flush_dcache_page(page_to_zeros);

			old_fs = get_fs();
			set_fs(KERNEL_DS);
			err = vfs_write(lower_file,
					encrypted_buf + tempoffset,
					bytes_to_write,
					&lower_file->f_pos);
			set_fs(old_fs);
free_pages_holes:
			kunmap(page_to_zeros);
			unlock_page(page_to_zeros);
			page_cache_release(page_to_zeros);
			if (err < 0) {
				kfree(encrypted_buf);
				goto out_holes;
			}
			err = 0;
			mark_inode_dirty_sync(cur_inode);
			tempoffset = 0;
			tempindex++;
		} /* while ends */
out_holes:
		if ((err < 0) && (page_to_zeros != NULL))
			ClearPageUptodate(page_to_zeros);
	}
#endif

out:
	if (page)
		kunmap(page);
	if (unlikely(err)) {
		unlock_page(page);
		page_cache_release(page);
		*pagep = NULL;
	}
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"err : %d", err);
	return err;
}
static int
msmsdcc_pio_irq(int irq, void *dev_id)
{
	struct msmsdcc_host	*host = dev_id;
	void __iomem		*base = host->base;
	uint32_t		status;

	status = readl(base + MMCISTATUS);
#if IRQ_DEBUG
	msmsdcc_print_status(host, "irq1-r", status);
#endif

	do {
		unsigned long flags;
		unsigned int remain, len;
		char *buffer;

		if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL)))
			break;

		/* Map the current scatter buffer */
		local_irq_save(flags);
		buffer = kmap_atomic(sg_page(host->pio.sg),
				     KM_BIO_SRC_IRQ) + host->pio.sg->offset;
		buffer += host->pio.sg_off;
		remain = host->pio.sg->length - host->pio.sg_off;

		len = 0;
		if (status & MCI_RXACTIVE)
			len = msmsdcc_pio_read(host, buffer, remain);
		if (status & MCI_TXACTIVE)
			len = msmsdcc_pio_write(host, buffer, remain, status);

		/* Unmap the buffer */
		kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
		local_irq_restore(flags);

		host->pio.sg_off += len;
		host->curr.xfer_remain -= len;
		host->curr.data_xfered += len;
		remain -= len;

		if (remain) /* Done with this page? */
			break; /* Nope */

		if (status & MCI_RXACTIVE && host->curr.user_pages)
			flush_dcache_page(sg_page(host->pio.sg));

		if (!--host->pio.sg_len) {
			memset(&host->pio, 0, sizeof(host->pio));
			break;
		}

		/* Advance to next sg */
		host->pio.sg++;
		host->pio.sg_off = 0;

		status = readl(base + MMCISTATUS);
	} while (1);

	if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);

	if (!host->curr.xfer_remain)
		writel(0, base + MMCIMASK1);

	return IRQ_HANDLED;
}
示例#16
0
/**
 * ecryptfs_copy_up_encrypted_with_header
 * @page: Sort of a ``virtual'' representation of the encrypted lower
 *        file. The actual lower file does not have the metadata in
 *        the header. This is locked.
 * @crypt_stat: The eCryptfs inode's cryptographic context
 *
 * The ``view'' is the version of the file that userspace winds up
 * seeing, with the header information inserted.
 */
static int
ecryptfs_copy_up_encrypted_with_header(struct page *page,
                                       struct ecryptfs_crypt_stat *crypt_stat)
{
    loff_t extent_num_in_page = 0;
    loff_t num_extents_per_page = (PAGE_CACHE_SIZE
                                   / crypt_stat->extent_size);
    int rc = 0;

    while (extent_num_in_page < num_extents_per_page) {
        loff_t view_extent_num = ((((loff_t)page->index)
                                   * num_extents_per_page)
                                  + extent_num_in_page);
        size_t num_header_extents_at_front =
            (crypt_stat->metadata_size / crypt_stat->extent_size);

        if (view_extent_num < num_header_extents_at_front) {
            /* This is a header extent */
            char *page_virt;

            page_virt = kmap_atomic(page);
            memset(page_virt, 0, PAGE_CACHE_SIZE);
            /* TODO: Support more than one header extent */
            if (view_extent_num == 0) {
                size_t written;

                rc = ecryptfs_read_xattr_region(
                         page_virt, page->mapping->host);
                strip_xattr_flag(page_virt + 16, crypt_stat);
                ecryptfs_write_header_metadata(page_virt + 20,
                                               crypt_stat,
                                               &written);
            }
            kunmap_atomic(page_virt);
            flush_dcache_page(page);
            if (rc) {
                printk(KERN_ERR "%s: Error reading xattr "
                       "region; rc = [%d]\n", __func__, rc);
                goto out;
            }
        } else {
            /* This is an encrypted data extent */
            loff_t lower_offset =
                ((view_extent_num * crypt_stat->extent_size)
                 - crypt_stat->metadata_size);

            rc = ecryptfs_read_lower_page_segment(
                     page, (lower_offset >> PAGE_CACHE_SHIFT),
                     (lower_offset & ~PAGE_CACHE_MASK),
                     crypt_stat->extent_size, page->mapping->host);
            if (rc) {
                printk(KERN_ERR "%s: Error attempting to read "
                       "extent at offset [%lld] in the lower "
                       "file; rc = [%d]\n", __func__,
                       lower_offset, rc);
                goto out;
            }
        }
        extent_num_in_page++;
    }
out:
    return rc;
}
示例#17
0
/* Read page, by pass page cache, always read from device. */
static int rawfs_readpage_nolock(struct file *filp, struct page *page)
{
    struct super_block *sb = filp->f_path.dentry->d_sb;
    struct inode *inode = filp->f_path.dentry->d_inode;
    struct rawfs_sb_info *rawfs_sb = RAWFS_SB(sb);
    struct rawfs_inode_info *inode_info = RAWFS_I(inode);
    loff_t pos;
    unsigned int curr_file_pos;
    unsigned int curr_buf_pos = 0;
    int remain_buf_size;
    unsigned size;
    int retval;
	unsigned char *pg_buf;

//	pg_buf = kmap_atomic(page);
	pg_buf = kmap(page);

    // TODO: check pg_buf

    size = i_size_read(inode);
    curr_file_pos = pos = page->index << PAGE_CACHE_SHIFT;
    retval = PAGE_CACHE_SIZE;

    RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_readpage %s @ folder %X, "
        "page_index %ld, pos %lld, len %d, filesize: %d, pg_buf %X\n",
        inode_info->i_name,
        inode_info->i_parent_folder_id,
        page->index,
        pos, retval, size, (unsigned)pg_buf);

    if ((retval + pos) >= size)
        retval = size - pos;

    if (pos < size) {
        {
            int preceding_pages, rear_pages;
            struct rawfs_page *page_buf = NULL;
            int i;

            // Prepare page buffer
            page_buf = kzalloc(rawfs_sb->page_size, GFP_NOFS);

            if (page_buf == NULL)
            {
                retval = 0;
                goto out;
            }

            preceding_pages = FLOOR((unsigned)pos, rawfs_sb->page_data_size);
            rear_pages = CEILING((unsigned)pos + retval,
                rawfs_sb->page_data_size);
            remain_buf_size = retval;

            RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_readpage %s, "
                "preceding_pages %d, rear_pages %d, remain_buf_size %d ",
                inode_info->i_name, preceding_pages, rear_pages,
                remain_buf_size);

            /* Step 1: Copy preceding pages, if starting pos is not 0. */
            for (i=preceding_pages;i<rear_pages;i++)
            {
                // Read page
                rawfs_sb->dev.read_page(sb,
                    inode_info->i_location_block,
                    inode_info->i_location_page+i,
                    page_buf);

                /* Copy requried parts */
                {
                    int start_in_buf;
                    int copy_len;

                    start_in_buf = (curr_file_pos % rawfs_sb->page_data_size);
                    copy_len =  ((start_in_buf + remain_buf_size) >
                                  rawfs_sb->page_data_size) ?
                                (rawfs_sb->page_data_size - start_in_buf) :
                                remain_buf_size;

                    memcpy(pg_buf + curr_buf_pos,
                        &page_buf->i_data[0] + start_in_buf, copy_len);

                    RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_readpage %s, %d, "
                        "curr_buf_pos %d, remain_buf_size %d start_in_buf %d "
                        "copy_len %d starting pattern %X",
                        inode_info->i_name, i, curr_buf_pos, remain_buf_size,
                        start_in_buf, copy_len,
                        *(unsigned int*)(&page_buf->i_data[0] + start_in_buf));

                    curr_buf_pos    += copy_len;
                    remain_buf_size -= copy_len;
                }
            }

            if (page_buf)
                kfree(page_buf);
        }
    }
    else
        retval = 0;

out:

    SetPageUptodate(page);
    ClearPageError(page);
	flush_dcache_page(page);

	//kunmap_atomic(pg_buf);
	kunmap(page);
    unlock_page(page);

    return 0;
}
示例#18
0
static int zram_read(struct zram *zram, struct bio *bio)
{

	int i;
	u32 index;
	struct bio_vec *bvec;

	if (unlikely(!zram->init_done)) {
		set_bit(BIO_UPTODATE, &bio->bi_flags);
		bio_endio(bio, 0);
		return 0;
	}

	zram_stat64_inc(zram, &zram->stats.num_reads);
	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
		int ret;
		size_t clen;
		struct page *page;
		struct zobj_header *zheader;
		unsigned char *user_mem, *cmem;

		page = bvec->bv_page;

		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
			handle_zero_page(page);
			index++;
			continue;
		}

		/* Requested page is not present in compressed area */
		if (unlikely(!zram->table[index].page)) {
			pr_debug("Read before write: sector=%lu, size=%u",
				(ulong)(bio->bi_sector), bio->bi_size);
			/* Do nothing */
			index++;
			continue;
		}

		/* Page is stored uncompressed since it's incompressible */
		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
			handle_uncompressed_page(zram, page, index);
			index++;
			continue;
		}

		user_mem = kmap_atomic(page, KM_USER0);
		clen = PAGE_SIZE;

		cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
				zram->table[index].offset;

		ret = lzo1x_decompress_safe(
			cmem + sizeof(*zheader),
			xv_get_object_size(cmem) - sizeof(*zheader),
			user_mem, &clen);

		kunmap_atomic(user_mem, KM_USER0);
		kunmap_atomic(cmem, KM_USER1);

		/* Should NEVER happen. Return bio error if it does. */
		if (unlikely(ret != LZO_E_OK)) {
			pr_err("Decompression failed! err=%d, page=%u\n",
				ret, index);
			zram_stat64_inc(zram, &zram->stats.failed_reads);
			goto out;
		}

		flush_dcache_page(page);
		index++;
	}
示例#19
0
文件: rd.c 项目: nhanh0/hah
static int rd_blkdev_pagecache_IO(int rw, struct buffer_head * sbh, int minor)
{
	struct address_space * mapping;
	unsigned long index;
	int offset, size, err;

	err = -EIO;
	err = 0;
	mapping = rd_bdev[minor]->bd_inode->i_mapping;

	index = sbh->b_rsector >> (PAGE_CACHE_SHIFT - 9);
	offset = (sbh->b_rsector << 9) & ~PAGE_CACHE_MASK;
	size = sbh->b_size;

	do {
		int count;
		struct page ** hash;
		struct page * page;
		char * src, * dst;
		int unlock = 0;

		count = PAGE_CACHE_SIZE - offset;
		if (count > size)
			count = size;
		size -= count;

		hash = page_hash(mapping, index);
		page = __find_get_page(mapping, index, hash);
		if (!page) {
			page = grab_cache_page(mapping, index);
			err = -ENOMEM;
			if (!page)
				goto out;
			err = 0;

			if (!Page_Uptodate(page)) {
				memset(kmap(page), 0, PAGE_CACHE_SIZE);
				kunmap(page);
				SetPageUptodate(page);
			}

			unlock = 1;
		}

		index++;

		if (rw == READ) {
			src = kmap(page);
			src += offset;
			dst = bh_kmap(sbh);
		} else {
			dst = kmap(page);
			dst += offset;
			src = bh_kmap(sbh);
		}
		offset = 0;

		memcpy(dst, src, count);

		kunmap(page);
		bh_kunmap(sbh);

		if (rw == READ) {
			flush_dcache_page(page);
		} else {
			SetPageDirty(page);
		}
		if (unlock)
			UnlockPage(page);
		__free_page(page);
	} while (size);

 out:
	return err;
}
示例#20
0
/*
 * This is a little more tricky than the file -> pipe splicing. There are
 * basically three cases:
 *
 *	- Destination page already exists in the address space and there
 *	  are users of it. For that case we have no other option that
 *	  copying the data. Tough luck.
 *	- Destination page already exists in the address space, but there
 *	  are no users of it. Make sure it's uptodate, then drop it. Fall
 *	  through to last case.
 *	- Destination page does not exist, we can add the pipe page to
 *	  the page cache and avoid the copy.
 *
 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
 * sd->flags), we attempt to migrate pages from the pipe to the output
 * file address space page cache. This is possible if no one else has
 * the pipe page referenced outside of the pipe and page cache. If
 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
 * a new page in the output file page cache and fill/dirty that.
 */
static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
			struct splice_desc *sd)
{
	struct file *file = sd->file;
	struct address_space *mapping = file->f_mapping;
	unsigned int offset, this_len;
	struct page *page;
	pgoff_t index;
	int ret;

	/*
	 * make sure the data in this buffer is uptodate
	 */
	ret = buf->ops->pin(pipe, buf);
	if (unlikely(ret))
		return ret;

	index = sd->pos >> PAGE_CACHE_SHIFT;
	offset = sd->pos & ~PAGE_CACHE_MASK;

	this_len = sd->len;
	if (this_len + offset > PAGE_CACHE_SIZE)
		this_len = PAGE_CACHE_SIZE - offset;

find_page:
	page = find_lock_page(mapping, index);
	if (!page) {
		ret = -ENOMEM;
		page = page_cache_alloc_cold(mapping);
		if (unlikely(!page))
			goto out_ret;

		/*
		 * This will also lock the page
		 */
		ret = add_to_page_cache_lru(page, mapping, index,
					    GFP_KERNEL);
		if (unlikely(ret))
			goto out;
	}

	ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
	if (unlikely(ret)) {
		loff_t isize = i_size_read(mapping->host);

		if (ret != AOP_TRUNCATED_PAGE)
			unlock_page(page);
		page_cache_release(page);
		if (ret == AOP_TRUNCATED_PAGE)
			goto find_page;

		/*
		 * prepare_write() may have instantiated a few blocks
		 * outside i_size.  Trim these off again.
		 */
		if (sd->pos + this_len > isize)
			vmtruncate(mapping->host, isize);

		goto out_ret;
	}

	if (buf->page != page) {
		/*
		 * Careful, ->map() uses KM_USER0!
		 */
		char *src = buf->ops->map(pipe, buf, 1);
		char *dst = kmap_atomic(page, KM_USER1);

		memcpy(dst + offset, src + buf->offset, this_len);
		flush_dcache_page(page);
		kunmap_atomic(dst, KM_USER1);
		buf->ops->unmap(pipe, buf, src);
	}

	ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
	if (ret) {
		if (ret == AOP_TRUNCATED_PAGE) {
			page_cache_release(page);
			goto find_page;
		}
		if (ret < 0)
			goto out;
		/*
		 * Partial write has happened, so 'ret' already initialized by
		 * number of bytes written, Where is nothing we have to do here.
		 */
	} else
		ret = this_len;
	/*
	 * Return the number of bytes written and mark page as
	 * accessed, we are now done!
	 */
	mark_page_accessed(page);
	balance_dirty_pages_ratelimited(mapping);
out:
	page_cache_release(page);
	unlock_page(page);
out_ret:
	return ret;
}
示例#21
0
文件: mpage.c 项目: me-oss/me-linux
/*
 * This is the worker routine which does all the work of mapping the disk
 * blocks and constructs largest possible bios, submits them for IO if the
 * blocks are not contiguous on the disk.
 *
 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
 * represent the validity of its disk mapping and to decide when to do the next
 * get_block() call.
 */
static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
		sector_t *last_block_in_bio, struct buffer_head *map_bh,
		unsigned long *first_logical_block, get_block_t get_block)
{
	struct inode *inode = page->mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t blocks[MAX_BUF_PER_PAGE];
	unsigned page_block;
	unsigned first_hole = blocks_per_page;
	struct block_device *bdev = NULL;
	int length;
	int fully_mapped = 1;
	unsigned nblocks;
	unsigned relative_block;

	if (page_has_buffers(page))
		goto confused;

	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
	last_block = block_in_file + nr_pages * blocks_per_page;
	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
	if (last_block > last_block_in_file)
		last_block = last_block_in_file;
	page_block = 0;

	/*
	 * Map blocks using the result from the previous get_blocks call first.
	 */
	nblocks = map_bh->b_size >> blkbits;
	if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
			block_in_file < (*first_logical_block + nblocks)) {
		unsigned map_offset = block_in_file - *first_logical_block;
		unsigned last = nblocks - map_offset;

		for (relative_block = 0; ; relative_block++) {
			if (relative_block == last) {
				clear_buffer_mapped(map_bh);
				break;
			}
			if (page_block == blocks_per_page)
				break;
			blocks[page_block] = map_bh->b_blocknr + map_offset +
						relative_block;
			page_block++;
			block_in_file++;
		}
		bdev = map_bh->b_bdev;
	}

	/*
	 * Then do more get_blocks calls until we are done with this page.
	 */
	map_bh->b_page = page;
	while (page_block < blocks_per_page) {
		map_bh->b_state = 0;
		map_bh->b_size = 0;

		if (block_in_file < last_block) {
			map_bh->b_size = (last_block-block_in_file) << blkbits;
			if (get_block(inode, block_in_file, map_bh, 0))
				goto confused;
			*first_logical_block = block_in_file;
		}

		if (!buffer_mapped(map_bh)) {
			fully_mapped = 0;
			if (first_hole == blocks_per_page)
				first_hole = page_block;
			page_block++;
			block_in_file++;
			clear_buffer_mapped(map_bh);
			continue;
		}

		/* some filesystems will copy data into the page during
		 * the get_block call, in which case we don't want to
		 * read it again.  map_buffer_to_page copies the data
		 * we just collected from get_block into the page's buffers
		 * so readpage doesn't have to repeat the get_block call
		 */
		if (buffer_uptodate(map_bh)) {
			map_buffer_to_page(page, map_bh, page_block);
			goto confused;
		}
	
		if (first_hole != blocks_per_page)
			goto confused;		/* hole -> non-hole */

		/* Contiguous blocks? */
		if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
			goto confused;
		nblocks = map_bh->b_size >> blkbits;
		for (relative_block = 0; ; relative_block++) {
			if (relative_block == nblocks) {
				clear_buffer_mapped(map_bh);
				break;
			} else if (page_block == blocks_per_page)
				break;
			blocks[page_block] = map_bh->b_blocknr+relative_block;
			page_block++;
			block_in_file++;
		}
		bdev = map_bh->b_bdev;
	}

	if (first_hole != blocks_per_page) {
		char *kaddr = kmap_atomic(page, KM_USER0);
		memset(kaddr + (first_hole << blkbits), 0,
				PAGE_CACHE_SIZE - (first_hole << blkbits));
		flush_dcache_page(page);
		kunmap_atomic(kaddr, KM_USER0);
		if (first_hole == 0) {
			SetPageUptodate(page);
			unlock_page(page);
			goto out;
		}
	} else if (fully_mapped) {
		SetPageMappedToDisk(page);
	}

	/*
	 * This page will go to BIO.  Do we need to send this BIO off first?
	 */
	if (bio && (*last_block_in_bio != blocks[0] - 1))
		bio = mpage_bio_submit(READ, bio);

alloc_new:
	if (bio == NULL) {
		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
			  	min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
				GFP_KERNEL);
		if (bio == NULL)
			goto confused;
	}
示例#22
0
static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
				struct mmc_data *data)
{
	struct sg_mapping_iter *miter = &host->miter;
	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
	uint32_t *buf;
	uint32_t d;
	uint16_t status;
	size_t i, j;
	unsigned int timeout;

	while (sg_miter_next(miter)) {
		buf = miter->addr;
		i = miter->length;
		j = i / 32;
		i = i & 0x1f;
		while (j) {
			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
			if (unlikely(timeout))
				goto poll_timeout;

			buf[0] = readl(fifo_addr);
			buf[1] = readl(fifo_addr);
			buf[2] = readl(fifo_addr);
			buf[3] = readl(fifo_addr);
			buf[4] = readl(fifo_addr);
			buf[5] = readl(fifo_addr);
			buf[6] = readl(fifo_addr);
			buf[7] = readl(fifo_addr);

			buf += 8;
			--j;
		}

		if (unlikely(i)) {
			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
			if (unlikely(timeout))
				goto poll_timeout;

			while (i >= 4) {
				*buf++ = readl(fifo_addr);
				i -= 4;
			}
			if (unlikely(i > 0)) {
				d = readl(fifo_addr);
				memcpy(buf, &d, i);
			}
		}
		data->bytes_xfered += miter->length;

		/* This can go away once MIPS implements
		 * flush_kernel_dcache_page */
		flush_dcache_page(miter->page);
	}
	sg_miter_stop(miter);

	/* For whatever reason there is sometime one word more in the fifo then
	 * requested */
	timeout = 1000;
	status = readl(host->base + JZ_REG_MMC_STATUS);
	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
		d = readl(fifo_addr);
		status = readl(host->base + JZ_REG_MMC_STATUS);
	}

	return false;

poll_timeout:
	miter->consumed = (void *)buf - miter->addr;
	data->bytes_xfered += miter->consumed;
	sg_miter_stop(miter);

	return true;
}
示例#23
0
int isp_resize_mem_data(struct isp_mem_resize_data *data)
{
	int i;
	int ret = -1;
	struct isp_mem_resize_data *presizer_user = \
		(struct isp_mem_resize_data *)data;
	u32 input_buffer_size, output_buffer_size;
	u32 input_nr_pages, output_nr_pages;
	struct page **input_pages = NULL;
	struct page **output_pages = NULL;
	unsigned long isp_addr_in = 0;
	unsigned long  isp_addr_out = 0;
	struct isp_mem_resize_data resizer_param;
	unsigned long timeout;

	if (presizer_user == NULL) {
		printk(KERN_ERR "ISP_RESZ_ERR : Invalid user data\n");
		return -EINVAL;
	}

	memcpy(&resizer_param, presizer_user, \
		sizeof(struct isp_mem_resize_data));

	DPRINTK_ISPPROC("\nRSZ input(%d-%d) - output(%d-%d)\n",
		resizer_param.input_width,
		resizer_param.input_height,
		resizer_param.output_width,
		resizer_param.output_height);

	DPRINTK_ISPPROC("RSZ start(%d-%d) - end(%d-%d)\n",
		resizer_param.left,
		resizer_param.top,
		resizer_param.crop_width,
		resizer_param.crop_height);

	if (presizer_user->datain == 0 || presizer_user->dataout == 0)
		return -EINVAL;

	ispresizer_enable(0);
	timeout = jiffies + msecs_to_jiffies(200);
	while (ispresizer_busy()) {
		if (time_after(jiffies, timeout))
			return -EINVAL;
		msleep(1);
	}

	ispresizer_save_context();
	ispresizer_free();
	ispresizer_request();

	/* set data path before configuring modules. */
	ispresizer_config_datapath(RSZ_MEM_YUV, 0);

	input_buffer_size = ALIGN_TO(presizer_user->input_width* \
		presizer_user->input_height*2 , 0x100);
	input_pages = map_user_memory_to_kernel(presizer_user->datain,
		input_buffer_size, &input_nr_pages);
	if (input_pages == NULL) {
		ret = -EINVAL;
		printk(KERN_ERR "ISP_RESZ_ERR: memory allocation failed\n");
		goto exit_cleanup;
	}

	output_buffer_size = ALIGN_TO(presizer_user->output_width* \
		presizer_user->output_height*2, 0x1000);
	output_pages = map_user_memory_to_kernel(presizer_user->dataout,
		output_buffer_size, &output_nr_pages);
	if (output_pages == NULL) {
		ret = -EINVAL;
		printk(KERN_ERR "ISP_RESZ_ERR: memory allocation failed\n");
		goto exit_cleanup;
	}
	for (i = 0; i < output_nr_pages; ++i)
		flush_dcache_page(output_pages[i]);

	isp_addr_in = ispmmu_vmap_pages(input_pages, input_nr_pages);
	if (IS_ERR((void *)isp_addr_in)) {
		isp_addr_in = 0;
		ret = -EINVAL;
		printk(KERN_ERR "ISP_RESZ_ERR: isp mmu map failed\n");
		goto exit_cleanup;
	}
	isp_addr_out = ispmmu_vmap_pages(output_pages, output_nr_pages);
	if (IS_ERR((void *)isp_addr_out)) {
		isp_addr_out = 0;
		ret = -EINVAL;
		printk(KERN_ERR "ISP_RESZ_ERR:  isp mmu map failed\n");
		goto exit_cleanup;
	}

	if ((resizer_param.left == 0) && (resizer_param.top == 0)) {
		ret = ispresizer_try_size(&resizer_param.input_width,
					&resizer_param.input_height,
					&resizer_param.output_width,
					&resizer_param.output_height);

		ret = ispresizer_config_size(resizer_param.input_width,
					resizer_param.input_height,
					resizer_param.output_width,
					resizer_param.output_height);

		ispresizer_set_inaddr(isp_addr_in);
	} else {
		ispresizer_trycrop(resizer_param.left,
					resizer_param.top,
					resizer_param.crop_width,
					resizer_param.crop_height,
					resizer_param.output_width,
					resizer_param.output_height);

		ispresizer_applycrop();

		/*pixel alignment in 32bit space, vertical must be 0 per TRM */
		isp_reg_writel(((resizer_param.left%16) <<
				ISPRSZ_IN_START_HORZ_ST_SHIFT) |
				(0 <<
				ISPRSZ_IN_START_VERT_ST_SHIFT),
				OMAP3_ISP_IOMEM_RESZ,
				ISPRSZ_IN_START);

		/* Align input address for cropping, per TRM  */
		ispresizer_set_inaddr(isp_addr_in +
			(resizer_param.top*resizer_param.input_width*2)
			+ ((resizer_param.left/16)*32));
	}

	ispresizer_set_inaddr(isp_addr_in);
	ispresizer_set_outaddr(isp_addr_out);
	ispresizer_config_ycpos(0);
	ispresizer_config_inlineoffset(
		ALIGN_TO(presizer_user->input_width*2, 32));

	isp_set_callback(CBK_RESZ_DONE, rsz_isr, (void *) NULL, (void *)NULL);
	isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
	isp_wfc.done = 0;
	/* start resizer engine. */
	ispresizer_enable(1);

	ret = wait_for_completion_timeout(&isp_wfc, msecs_to_jiffies(1000));
	if (!ret)
		ispresizer_enable(0);

	timeout = jiffies + msecs_to_jiffies(50);
	while (ispresizer_busy()) {
		msleep(5);
		if (time_after(jiffies, timeout)) {
			printk(KERN_ERR "ISP_RESZ_ERR: Resizer still busy");
			break;
		}
	}

	isp_reg_writel(0xFFFFFFFF, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
	isp_unset_callback(CBK_RESZ_DONE);
	ret = 0;

exit_cleanup:
	ispresizer_restore_context();

	if (isp_addr_in != 0)
		ispmmu_vunmap(isp_addr_in);
	if (isp_addr_out != 0)
		ispmmu_vunmap(isp_addr_out);
	if (input_pages != NULL) {
		unmap_user_memory_from_kernel(input_pages, input_nr_pages);
		kfree(input_pages);
	}
	if (output_pages != NULL) {
		unmap_user_memory_from_kernel(output_pages, output_nr_pages);
		kfree(output_pages);
	}

	DPRINTK_ISPPROC("resizer exit.\n");

	return ret;
}
示例#24
0
文件: ioctl.c 项目: 513855417/linux
/*
 * reiserfs_unpack
 * Function try to convert tail from direct item into indirect.
 * It set up nopack attribute in the REISERFS_I(inode)->nopack
 */
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
	int retval = 0;
	int index;
	struct page *page;
	struct address_space *mapping;
	unsigned long write_from;
	unsigned long blocksize = inode->i_sb->s_blocksize;

	if (inode->i_size == 0) {
		REISERFS_I(inode)->i_flags |= i_nopack_mask;
		return 0;
	}
	/* ioctl already done */
	if (REISERFS_I(inode)->i_flags & i_nopack_mask) {
		return 0;
	}

	/* we need to make sure nobody is changing the file size beneath us */
{
	int depth = reiserfs_write_unlock_nested(inode->i_sb);
	inode_lock(inode);
	reiserfs_write_lock_nested(inode->i_sb, depth);
}

	reiserfs_write_lock(inode->i_sb);

	write_from = inode->i_size & (blocksize - 1);
	/* if we are on a block boundary, we are already unpacked.  */
	if (write_from == 0) {
		REISERFS_I(inode)->i_flags |= i_nopack_mask;
		goto out;
	}

	/*
	 * we unpack by finding the page with the tail, and calling
	 * __reiserfs_write_begin on that page.  This will force a
	 * reiserfs_get_block to unpack the tail for us.
	 */
	index = inode->i_size >> PAGE_SHIFT;
	mapping = inode->i_mapping;
	page = grab_cache_page(mapping, index);
	retval = -ENOMEM;
	if (!page) {
		goto out;
	}
	retval = __reiserfs_write_begin(page, write_from, 0);
	if (retval)
		goto out_unlock;

	/* conversion can change page contents, must flush */
	flush_dcache_page(page);
	retval = reiserfs_commit_write(NULL, page, write_from, write_from);
	REISERFS_I(inode)->i_flags |= i_nopack_mask;

out_unlock:
	unlock_page(page);
	put_page(page);

out:
	inode_unlock(inode);
	reiserfs_write_unlock(inode->i_sb);
	return retval;
}
示例#25
0
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)

{
	struct inode *inode = target_page->mapping->host;
	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;

	int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
	int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
	int start_index = target_page->index & ~mask;
	int end_index = start_index | mask;
	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
	struct page **page;
	struct squashfs_page_actor *actor;
	void *pageaddr;

	if (end_index > file_end)
		end_index = file_end;

	pages = end_index - start_index + 1;

	page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
	if (page == NULL)
		return res;

	/*
	 * Create a "page actor" which will kmap and kunmap the
	 * page cache pages appropriately within the decompressor
	 */
	actor = squashfs_page_actor_init_special(page, pages, 0);
	if (actor == NULL)
		goto out;

	/* Try to grab all the pages covered by the Squashfs block */
	for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
		page[i] = (n == target_page->index) ? target_page :
			grab_cache_page_nowait(target_page->mapping, n);

		if (page[i] == NULL) {
			missing_pages++;
			continue;
		}

		if (PageUptodate(page[i])) {
			unlock_page(page[i]);
			put_page(page[i]);
			page[i] = NULL;
			missing_pages++;
		}
	}

	if (missing_pages) {
		/*
		 * Couldn't get one or more pages, this page has either
		 * been VM reclaimed, but others are still in the page cache
		 * and uptodate, or we're racing with another thread in
		 * squashfs_readpage also trying to grab them.  Fall back to
		 * using an intermediate buffer.
		 */
		res = squashfs_read_cache(target_page, block, bsize, pages,
								page);
		if (res < 0)
			goto mark_errored;

		goto out;
	}

	/* Decompress directly into the page cache buffers */
	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
	if (res < 0)
		goto mark_errored;

	/* Last page may have trailing bytes not filled */
	bytes = res % PAGE_SIZE;
	if (bytes) {
		pageaddr = kmap_atomic(page[pages - 1]);
		memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
		kunmap_atomic(pageaddr);
	}

	/* Mark pages as uptodate, unlock and release */
	for (i = 0; i < pages; i++) {
		flush_dcache_page(page[i]);
		SetPageUptodate(page[i]);
		unlock_page(page[i]);
		if (page[i] != target_page)
			put_page(page[i]);
	}

	kfree(actor);
	kfree(page);

	return 0;

mark_errored:
	/* Decompression failed, mark pages as errored.  Target_page is
	 * dealt with by the caller
	 */
	for (i = 0; i < pages; i++) {
		if (page[i] == NULL || page[i] == target_page)
			continue;
		flush_dcache_page(page[i]);
		SetPageError(page[i]);
		unlock_page(page[i]);
		put_page(page[i]);
	}

out:
	kfree(actor);
	kfree(page);
	return res;
}
示例#26
0
static int
HgfsDoReadpage(HgfsHandle handle,  // IN:     Handle to use for reading
               struct page *page,  // IN/OUT: Page to read into
               unsigned pageFrom,  // IN:     Where to start reading to
               unsigned pageTo)    // IN:     Where to stop reading
{
   int result = 0;
   loff_t curOffset = ((loff_t)page->index << PAGE_CACHE_SHIFT) + pageFrom;
   size_t nextCount, remainingCount = pageTo - pageFrom;
   HgfsDataPacket dataPacket[1];

   LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoReadpage: read %Zu bytes from fh %u "
           "at offset %Lu\n", remainingCount, handle, curOffset));

   /*
    * Call HgfsDoRead repeatedly until either
    * - HgfsDoRead returns an error, or
    * - HgfsDoRead returns 0 (end of file), or
    * - We have read the requested number of bytes.
    */
   do {
      nextCount = (remainingCount > HGFS_IO_MAX) ?
         HGFS_IO_MAX : remainingCount;
      dataPacket[0].page = page;
      dataPacket[0].offset = pageFrom;
      dataPacket[0].len = nextCount;
      result = HgfsDoRead(handle, dataPacket, 1, curOffset);
      if (result < 0) {
         LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoReadpage: read error %d\n",
                 result));
         goto out;
      }
      remainingCount -= result;
      curOffset += result;
      pageFrom += result;
   } while ((result > 0) && (remainingCount > 0));

   /*
    * It's possible that despite being asked to read a full page, there is less
    * than a page in the file from this offset, so we should zero the rest of
    * the page's memory.
    */
   if (remainingCount) {
      char *buffer = kmap(page) + pageTo;
      LOG(6, (KERN_DEBUG "VMware hgfs: %s: zeroing last %Zu bytes\n",
              __func__, remainingCount));
      memset(buffer - remainingCount, 0, remainingCount);
      kunmap(page);
   }

   /*
    * We read a full page (or all of the page that actually belongs to the
    * file), so mark it up to date. Also, flush the old page data from the data
    * cache.
    */
   flush_dcache_page(page);
   SetPageUptodate(page);
   result = 0;

  out:
   return result;
}
示例#27
0
static int squashfs_symlink_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct super_block *sb = inode->i_sb;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	int index = page->index << PAGE_CACHE_SHIFT;
	u64 block = squashfs_i(inode)->start;
	int offset = squashfs_i(inode)->offset;
	int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
	int bytes, copied;
	void *pageaddr;
	struct squashfs_cache_entry *entry;

	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
			"%llx, offset %x\n", page->index, block, offset);

	/*
	 * Skip index bytes into symlink metadata.
	 */
	if (index) {
		bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
								index);
		if (bytes < 0) {
			ERROR("Unable to read symlink [%llx:%x]\n",
				squashfs_i(inode)->start,
				squashfs_i(inode)->offset);
			goto error_out;
		}
	}

	/*
	 * Read length bytes from symlink metadata.  Squashfs_read_metadata
	 * is not used here because it can sleep and we want to use
	 * kmap_atomic to map the page.  Instead call the underlying
	 * squashfs_cache_get routine.  As length bytes may overlap metadata
	 * blocks, we may need to call squashfs_cache_get multiple times.
	 */
	for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
		entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
		if (entry->error) {
			ERROR("Unable to read symlink [%llx:%x]\n",
				squashfs_i(inode)->start,
				squashfs_i(inode)->offset);
			squashfs_cache_put(entry);
			goto error_out;
		}

		pageaddr = kmap_atomic(page);
		copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
								length - bytes);
		if (copied == length - bytes)
			memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
		else
			block = entry->next_index;
		kunmap_atomic(pageaddr);
		squashfs_cache_put(entry);
	}

	flush_dcache_page(page);
	SetPageUptodate(page);
	unlock_page(page);
	return 0;

error_out:
	SetPageError(page);
	unlock_page(page);
	return 0;
}
static void
msmsdcc_dma_complete_tlet(unsigned long data)
{
	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
	unsigned long		flags;
	struct mmc_request	*mrq;

	spin_lock_irqsave(&host->lock, flags);
	mrq = host->curr.mrq;
	BUG_ON(!mrq);

	if (!(host->dma.result & DMOV_RSLT_VALID)) {
		printk(KERN_ERR "msmsdcc: Invalid DataMover result\n");
		goto out;
	}

	if (host->dma.result & DMOV_RSLT_DONE) {
		host->curr.data_xfered = host->curr.xfer_size;
	} else {
		/* Error or flush  */
		if (host->dma.result & DMOV_RSLT_ERROR)
			printk(KERN_ERR "%s: DMA error (0x%.8x)\n",
			       mmc_hostname(host->mmc), host->dma.result);
		if (host->dma.result & DMOV_RSLT_FLUSH)
			printk(KERN_ERR "%s: DMA channel flushed (0x%.8x)\n",
			       mmc_hostname(host->mmc), host->dma.result);
		if (host->dma.err)
			printk(KERN_ERR
			       "Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
			       host->dma.err->flush[0], host->dma.err->flush[1],
			       host->dma.err->flush[2], host->dma.err->flush[3],
			       host->dma.err->flush[4],
			       host->dma.err->flush[5]);
		if (!mrq->data->error)
			mrq->data->error = -EIO;
	}
	host->dma.busy = 0;
	dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
		     host->dma.dir);

	if (host->curr.user_pages) {
		struct scatterlist *sg = host->dma.sg;
		int i;

		for (i = 0; i < host->dma.num_ents; i++, sg++)
			flush_dcache_page(sg_page(sg));
	}

	host->dma.sg = NULL;

	if ((host->curr.got_dataend && host->curr.got_datablkend)
             || mrq->data->error) {

		if (mrq->data->error
		    && !(host->curr.got_dataend
			 && host->curr.got_datablkend)) {
			printk(KERN_INFO "%s: Worked around bug 1535304\n",
			       mmc_hostname(host->mmc));
		}
		/*
		 * If we've already gotten our DATAEND / DATABLKEND
		 * for this request, then complete it through here.
		 */
		msmsdcc_stop_data(host);

		if (!mrq->data->error)
			host->curr.data_xfered = host->curr.xfer_size;
		if (!mrq->data->stop || mrq->cmd->error) {
			writel(0, host->base + MMCICOMMAND);
			host->curr.mrq = NULL;
			host->curr.cmd = NULL;
			mrq->data->bytes_xfered = host->curr.data_xfered;

			spin_unlock_irqrestore(&host->lock, flags);

#ifdef CONFIG_MMC_MSM_PROG_DONE_SCAN
			if ((mrq->cmd->opcode == SD_IO_RW_EXTENDED)
				&& (mrq->cmd->arg & 0x80000000)) {
				/* set the prog_scan in a cmd53.*/
				host->prog_scan = 1;
				/* Send STOP to let the SDCC know to stop. */
				writel(MCI_CSPM_MCIABORT,
						host->base + MMCICOMMAND);
			}
#endif
			mmc_request_done(host->mmc, mrq);
			return;
		} else
			msmsdcc_start_command(host, mrq->data->stop, 0);
	}

out:
	spin_unlock_irqrestore(&host->lock, flags);
	return;
}
示例#29
0
static int
msmsdcc_pio_irq(int irq, void *dev_id)
{
	struct msmsdcc_host	*host = dev_id;
	void __iomem		*base = host->base;
	uint32_t		status;

	status = readl(base + MMCISTATUS);
#if IRQ_DEBUG
	msmsdcc_print_status(host, "irq1-r", status);
#endif
 
/* SEMC_BEGIN (Crash on irq when data already handled - DMS00718508) */
//	do {
	while (host->pio.sg) {
/* SEMC_END (Crash on irq when data already handled - DMS00718508) */

		unsigned long flags;
		unsigned int remain, len;
		char *buffer;

		if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
			if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
				break;

			if (msmsdcc_spin_on_status(host,
						   (MCI_TXFIFOHALFEMPTY |
						   MCI_RXDATAAVLBL),
						   PIO_SPINMAX)) {
				break;
			}
		}

		/* Map the current scatter buffer */
		local_irq_save(flags);
		buffer = kmap_atomic(sg_page(host->pio.sg),
				     KM_BIO_SRC_IRQ) + host->pio.sg->offset;
		buffer += host->pio.sg_off;
		remain = host->pio.sg->length - host->pio.sg_off;
		len = 0;
		if (status & MCI_RXACTIVE)
			len = msmsdcc_pio_read(host, buffer, remain);
		if (status & MCI_TXACTIVE)
			len = msmsdcc_pio_write(host, buffer, remain, status);

		/* Unmap the buffer */
		kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
		local_irq_restore(flags);

		host->pio.sg_off += len;
		host->curr.xfer_remain -= len;
		host->curr.data_xfered += len;
		remain -= len;

		if (remain == 0) {
			/* This sg page is full - do some housekeeping */
			if (status & MCI_RXACTIVE && host->curr.user_pages)
				flush_dcache_page(sg_page(host->pio.sg));

			if (!--host->pio.sg_len) {
				memset(&host->pio, 0, sizeof(host->pio));
				break;
			}

			/* Advance to next sg */
			host->pio.sg++;
			host->pio.sg_off = 0;
		}

		status = readl(base + MMCISTATUS);
/* SEMC_BEGIN (Crash on irq when data already handled - DMS00718508) */
//	} while (1);
	}
/* SEMC_END (Crash on irq when data already handled - DMS00718508) */

	if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);

	if (!host->curr.xfer_remain)
		writel(0, base + MMCIMASK1);

	return IRQ_HANDLED;
}
示例#30
0
int wrapfs_readpage(struct file *file, struct page *page)
{
	int err;
	struct file *lower_file;
	struct inode *inode;
	mm_segment_t old_fs;
	char *page_data = NULL;
	mode_t orig_mode;
#ifdef WRAPFS_CRYPTO
	char *decrypted_buf = NULL;
#endif

	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"");
	wrapfs_debug("");
	lower_file = wrapfs_lower_file(file);
	BUG_ON(lower_file == NULL);

	inode = file->f_path.dentry->d_inode;
	page_data = (char *) kmap(page);

	/*
	 * Use vfs_read because some lower file systems don't have a
	 * readpage method, and some file systems (esp. distributed ones)
	 * don't like their pages to be accessed directly.  Using vfs_read
	 * may be a little slower, but a lot safer, as the VFS does a lot of
	 * the necessary magic for us.
	 */
	lower_file->f_pos = page_offset(page);
	old_fs = get_fs();
	set_fs(KERNEL_DS);
	/*
	 * generic_file_splice_write may call us on a file not opened for
	 * reading, so temporarily allow reading.
	 */
	orig_mode = lower_file->f_mode;
	lower_file->f_mode |= FMODE_READ;
	err = vfs_read(lower_file, page_data, PAGE_CACHE_SIZE,
			&lower_file->f_pos);
	lower_file->f_mode = orig_mode;
	set_fs(old_fs);

#ifdef WRAPFS_CRYPTO
	/* At this point, we have the entire page from lower file system in
	 * page_data. If WRAPFS_CRYPTO is set, we need to decrypt page_data
	 * and store it back in page_data. I have taken the decrypt function
	 * from HW1 and made necessary modifications.
	 */
	decrypted_buf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
	if (decrypted_buf == NULL) {
		wrapfs_debug("kmalloc failed!!");
		kunmap(page);
		err = -ENOMEM;
		goto out;
	}
	if (my_decrypt(page_data, PAGE_CACHE_SIZE, decrypted_buf,
			PAGE_CACHE_SIZE,
			WRAPFS_SB(file->f_dentry->d_sb)->key,
			WRAPFS_CRYPTO_KEY_LEN) < 0) {
		wrapfs_debug("my_decrypt failed!!");
		kunmap(page);
		kfree(decrypted_buf);
		err = -EINVAL;
	}
	memcpy(page_data, decrypted_buf, PAGE_CACHE_SIZE);
#endif

	if (err >= 0 && err < PAGE_CACHE_SIZE)
		memset(page_data + err, 0, PAGE_CACHE_SIZE - err);
	kunmap(page);

	if (err < 0)
		goto out;
	err = 0;

	/* if vfs_read succeeded above, sync up our times */
	fsstack_copy_attr_times(inode, lower_file->f_path.dentry->d_inode);

	flush_dcache_page(page);

out:
	if (err == 0)
		SetPageUptodate(page);
	else
		ClearPageUptodate(page);
	unlock_page(page);
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"err : %d", err);
	return err;
}