Example #1
0
static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
			     struct isp1362_ep_queue *epq)
{
	struct ptd *ptd = &ep->ptd;
	int act_len;

	WARN_ON(list_empty(&ep->active));
	BUG_ON(ep->ptd_offset < 0);

	list_del_init(&ep->active);
	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);

	prefetchw(ptd);
	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
	dump_ptd(ptd);
	act_len = PTD_GET_COUNT(ptd);
	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
		return;
	if (act_len > ep->length)
		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
			 ep->ptd_offset, act_len, ep->length);
	BUG_ON(act_len > ep->length);
	/* Only transfer the amount of data that has actually been overwritten
	 * in the chip buffer. We don't want any data that doesn't belong to the
	 * transfer to leak out of the chip to the callers transfer buffer!
	 */
	prefetchw(ep->data);
	isp1362_read_buffer(isp1362_hcd, ep->data,
			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
	dump_ptd_in_data(ptd, ep->data);
}
/* Like lcpit_advance_one_byte(), but for buffers larger than
 * MAX_NORMAL_BUFSIZE.  */
static inline u32
lcpit_advance_one_byte_huge(const u32 cur_pos,
			    u32 pos_data[restrict],
			    u64 intervals64[restrict],
			    u32 prefetch_next[restrict],
			    struct lz_match matches[restrict],
			    const bool record_matches)
{
	u32 interval_idx;
	u32 next_interval_idx;
	u64 cur;
	u64 next;
	u32 match_pos;
	struct lz_match *matchptr;

	interval_idx = pos_data[cur_pos];

	prefetchw(&intervals64[pos_data[prefetch_next[0]] & HUGE_POS_MASK]);

	prefetch_next[0] = intervals64[prefetch_next[1]] & HUGE_POS_MASK;
	prefetchw(&pos_data[prefetch_next[0]]);

	prefetch_next[1] = pos_data[cur_pos + 3] & HUGE_POS_MASK;
	prefetchw(&intervals64[prefetch_next[1]]);

	pos_data[cur_pos] = 0;

	while ((next = intervals64[interval_idx]) & HUGE_UNVISITED_TAG) {
		intervals64[interval_idx] = (next & HUGE_LCP_MASK) | cur_pos;
		interval_idx = next & HUGE_POS_MASK;
	}

	matchptr = matches;
	while (next & HUGE_LCP_MASK) {
		cur = next;
		do {
			match_pos = next & HUGE_POS_MASK;
			next_interval_idx = pos_data[match_pos];
			next = intervals64[next_interval_idx];
		} while (next > cur);
		intervals64[interval_idx] = (cur & HUGE_LCP_MASK) | cur_pos;
		pos_data[match_pos] = interval_idx;
		if (record_matches) {
			matchptr->length = cur >> HUGE_LCP_SHIFT;
			matchptr->offset = cur_pos - match_pos;
			matchptr++;
		}
		interval_idx = next_interval_idx;
	}
	return matchptr - matches;
}
Example #3
0
/** Read to request from FIFO (max read == bytes in fifo)
 *  Return:  0 = still running, 1 = completed, negative = errno
 *  NOTE: INDEX register must be set for EP
 */
static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
{
	u32 csr;
	u8 *buf;
	unsigned bufferspace, count, is_short;
	volatile u32 *fifo = (volatile u32 *)ep->fifo;

	/* make sure there's a packet in the FIFO. */
	csr = usb_read(ep->csr1);
	if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) {
		DEBUG("%s: Packet NOT ready!\n", __func__);
		return -EINVAL;
	}

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);
	bufferspace = req->req.length - req->req.actual;

	/* read all bytes from this packet */
	count = usb_read(USB_OUT_FIFO_WC1);
	req->req.actual += min(count, bufferspace);

	is_short = (count < ep->ep.maxpacket);
	DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n",
	      ep->ep.name, csr, count,
	      is_short ? "/S" : "", req, req->req.actual, req->req.length);

	while (likely(count-- != 0)) {
		u8 byte = (u8) (*fifo & 0xff);

		if (unlikely(bufferspace == 0)) {
			/* this happens when the driver's buffer
			 * is smaller than what the host sent.
			 * discard the extra data.
			 */
			if (req->req.status != -EOVERFLOW)
				printk(KERN_WARNING "%s overflow %d\n",
				       ep->ep.name, count);
			req->req.status = -EOVERFLOW;
		} else {
			*buf++ = byte;
			bufferspace--;
		}
	}

	usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1);

	/* completion */
	if (is_short || req->req.actual == req->req.length) {
		done(ep, req, 0);
		usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);

		if (list_empty(&ep->queue))
			pio_irq_disable(ep_index(ep));
		return 1;
	}

	/* finished that packet.  the next one may be waiting... */
	return 0;
}
Example #4
0
static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req)
{
	u32 *buf, ctrl;
	u32 length, pktcnt;
	u32 ep_num = ep_index(ep);

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);

	length = req->req.length - req->req.actual;
	dma_cache_maint(buf, length, DMA_FROM_DEVICE);

	if(length == 0)
		pktcnt = 1;
	else
		pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;

	ctrl =  readl(S3C_UDC_OTG_DOEPCTL(ep_num));

	writel(virt_to_phys(buf), S3C_UDC_OTG_DOEPDMA(ep_num));
	writel((pktcnt<<19)|(length<<0), S3C_UDC_OTG_DOEPTSIZ(ep_num));
	writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, S3C_UDC_OTG_DOEPCTL(ep_num));

	DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x, DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
			"\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
			__func__, ep_num,
			readl(S3C_UDC_OTG_DOEPDMA(ep_num)),
			readl(S3C_UDC_OTG_DOEPTSIZ(ep_num)),
			readl(S3C_UDC_OTG_DOEPCTL(ep_num)),
			buf, pktcnt, length);
	return 0;

}
Example #5
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		if (bio_flagged(bio, BIO_BAIO)) {
			struct ba_iocb *baiocb =
				(struct ba_iocb *)bio->bi_private2;
		       	BUG_ON(!PageBaio(page));
			ClearPageBaio(page);
			if (!uptodate)
				baiocb->io_error = -EIO;
			baiocb->result += bvec->bv_len;
			baiocb_put(baiocb);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Example #6
0
/* completion handler for BIO writes */
static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error)
{
    const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
    struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

    if (bio->bi_size)
        return 1;

    if(!uptodate)
        err("bi_write_complete: not uptodate\n");

    do {
        struct page *page = bvec->bv_page;
        DEBUG(3, "Cleaning up page %ld\n", page->index);
        if (--bvec >= bio->bi_io_vec)
            prefetchw(&bvec->bv_page->flags);

        if (uptodate) {
            SetPageUptodate(page);
        } else {
            ClearPageUptodate(page);
            SetPageError(page);
        }
        ClearPageDirty(page);
        unlock_page(page);
        page_cache_release(page);
    } while (bvec >= bio->bi_io_vec);

    complete((struct completion*)bio->bi_private);
    return 0;
}
Example #7
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	if (bio->bi_size)
		return 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
	return 0;
}
/*
 * Use the suffix array accompanied with the longest-common-prefix array ---
 * which in combination can be called the "enhanced suffix array" --- to
 * simulate a bottom-up traversal of the corresponding suffix tree, or
 * equivalently the lcp-interval tree.  Do so in suffix rank order, but save the
 * superinterval references needed for later bottom-up traversal of the tree in
 * suffix position order.
 *
 * To enumerate the lcp-intervals, this algorithm scans the suffix array and its
 * corresponding LCP array linearly.  While doing so, it maintains a stack of
 * lcp-intervals that are currently open, meaning that their left boundaries
 * have been seen but their right boundaries have not.  The bottom of the stack
 * is the interval which covers the entire suffix array (this has lcp=0), and
 * the top of the stack is the deepest interval that is currently open (this has
 * the greatest lcp of any interval on the stack).  When this algorithm opens an
 * lcp-interval, it assigns it a unique index in intervals[] and pushes it onto
 * the stack.  When this algorithm closes an interval, it pops it from the stack
 * and sets the intervals[] entry of that interval to the index and lcp of that
 * interval's superinterval, which is the new top of the stack.
 *
 * This algorithm also set pos_data[pos] for each suffix position 'pos' to the
 * index and lcp of the deepest lcp-interval containing it.  Alternatively, we
 * can interpret each suffix as being associated with a singleton lcp-interval,
 * or leaf of the suffix tree.  With this interpretation, an entry in pos_data[]
 * is the superinterval reference for one of these singleton lcp-intervals and
 * therefore is not fundamentally different from an entry in intervals[].
 *
 * To reduce memory usage, this algorithm re-uses the suffix array's memory to
 * store the generated intervals[] array.  This is possible because SA and LCP
 * are accessed linearly, and no more than one interval is generated per suffix.
 *
 * The techniques used in this algorithm are described in various published
 * papers.  The generation of lcp-intervals from the suffix array (SA) and the
 * longest-common-prefix array (LCP) is given as Algorithm BottomUpTraverse in
 * Kasai et al. (2001) and Algorithm 4.1 ("Computation of lcp-intervals") in
 * Abouelhoda et al. (2004).  Both these papers note the equivalence between
 * lcp-intervals (including the singleton lcp-interval for each suffix) and
 * nodes of the suffix tree.  Abouelhoda et al. (2004) furthermore applies
 * bottom-up traversal of the lcp-interval tree to Lempel-Ziv factorization, as
 * does Chen at al. (2008).  Algorithm CPS1b of Chen et al. (2008) dynamically
 * re-uses the suffix array during bottom-up traversal of the lcp-interval tree.
 *
 * References:
 *
 *	Kasai et al. Linear-Time Longest-Common-Prefix Computation in Suffix
 *	Arrays and Its Applications.  2001.  CPM '01 Proceedings of the 12th
 *	Annual Symposium on Combinatorial Pattern Matching pp. 181-192.
 *
 *	M.I. Abouelhoda, S. Kurtz, E. Ohlebusch.  2004.  Replacing Suffix Trees
 *	With Enhanced Suffix Arrays.  Journal of Discrete Algorithms Volume 2
 *	Issue 1, March 2004, pp. 53-86.
 *
 *	G. Chen, S.J. Puglisi, W.F. Smyth.  2008.  Lempel-Ziv Factorization
 *	Using Less Time & Space.  Mathematics in Computer Science June 2008,
 *	Volume 1, Issue 4, pp. 605-623.
 */
static void
build_LCPIT(u32 intervals[restrict], u32 pos_data[restrict], const u32 n)
{
	u32 * const SA_and_LCP = intervals;
	u32 next_interval_idx;
	u32 open_intervals[LCP_MAX + 1];
	u32 *top = open_intervals;
	u32 prev_pos = SA_and_LCP[0] & POS_MASK;

	*top = 0;
	intervals[0] = 0;
	next_interval_idx = 1;

	for (u32 r = 1; r < n; r++) {
		const u32 next_pos = SA_and_LCP[r] & POS_MASK;
		const u32 next_lcp = SA_and_LCP[r] & LCP_MASK;
		const u32 top_lcp = *top & LCP_MASK;

		prefetchw(&pos_data[SA_and_LCP[r + PREFETCH_SAFETY] & POS_MASK]);

		if (next_lcp == top_lcp) {
			/* Continuing the deepest open interval  */
			pos_data[prev_pos] = *top;
		} else if (next_lcp > top_lcp) {
			/* Opening a new interval  */
			*++top = next_lcp | next_interval_idx++;
			pos_data[prev_pos] = *top;
		} else {
			/* Closing the deepest open interval  */
			pos_data[prev_pos] = *top;
			for (;;) {
				const u32 closed_interval_idx = *top-- & POS_MASK;
				const u32 superinterval_lcp = *top & LCP_MASK;

				if (next_lcp == superinterval_lcp) {
					/* Continuing the superinterval */
					intervals[closed_interval_idx] = *top;
					break;
				} else if (next_lcp > superinterval_lcp) {
					/* Creating a new interval that is a
					 * superinterval of the one being
					 * closed, but still a subinterval of
					 * its superinterval  */
					*++top = next_lcp | next_interval_idx++;
					intervals[closed_interval_idx] = *top;
					break;
				} else {
					/* Also closing the superinterval  */
					intervals[closed_interval_idx] = *top;
				}
			}
		}
		prev_pos = next_pos;
	}

	/* Close any still-open intervals.  */
	pos_data[prev_pos] = *top;
	for (; top > open_intervals; top--)
		intervals[*top & POS_MASK] = *(top - 1);
}
/*
 * Build the LCP (Longest Common Prefix) array in linear time.
 *
 * LCP[r] will be the length of the longest common prefix between the suffixes
 * with positions SA[r - 1] and SA[r].  LCP[0] will be undefined.
 *
 * Algorithm taken from Kasai et al. (2001), but modified slightly:
 *
 *  - With bytes there is no realistic way to reserve a unique symbol for
 *    end-of-buffer, so use explicit checks for end-of-buffer.
 *
 *  - For decreased memory usage and improved memory locality, pack the two
 *    logically distinct SA and LCP arrays into a single array SA_and_LCP.
 *
 *  - Since SA_and_LCP is accessed randomly, improve the cache behavior by
 *    reading several entries ahead in ISA and prefetching the upcoming
 *    SA_and_LCP entry.
 *
 *  - If an LCP value is less than the minimum match length, then store 0.  This
 *    avoids having to do comparisons against the minimum match length later.
 *
 *  - If an LCP value is greater than the "nice match length", then store the
 *    "nice match length".  This caps the number of bits needed to store each
 *    LCP value, and this caps the depth of the LCP-interval tree, without
 *    usually hurting the compression ratio too much.
 *
 * References:
 *
 *	Kasai et al.  2001.  Linear-Time Longest-Common-Prefix Computation in
 *	Suffix Arrays and Its Applications.  CPM '01 Proceedings of the 12th
 *	Annual Symposium on Combinatorial Pattern Matching pp. 181-192.
 */
static void
build_LCP(u32 SA_and_LCP[restrict], const u32 ISA[restrict],
	  const u8 T[restrict], const u32 n,
	  const u32 min_lcp, const u32 max_lcp)
{
	u32 h = 0;
	for (u32 i = 0; i < n; i++) {
		const u32 r = ISA[i];
		prefetchw(&SA_and_LCP[ISA[i + PREFETCH_SAFETY]]);
		if (r > 0) {
			const u32 j = SA_and_LCP[r - 1] & POS_MASK;
			const u32 lim = min(n - i, n - j);
			while (h < lim && T[i + h] == T[j + h])
				h++;
			u32 stored_lcp = h;
			if (stored_lcp < min_lcp)
				stored_lcp = 0;
			else if (stored_lcp > max_lcp)
				stored_lcp = max_lcp;
			SA_and_LCP[r] |= stored_lcp << LCP_SHIFT;
			if (h > 0)
				h--;
		}
	}
}
Example #10
0
static int fb_counter_netrx(const struct fblock * const fb,
			    struct sk_buff * const skb,
			    enum path_type * const dir)
{
	int drop = 0;
	unsigned int seq;
	struct fb_counter_priv __percpu *fb_priv_cpu;

	fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
	prefetchw(skb->cb);
	do {
		seq = read_seqbegin(&fb_priv_cpu->lock);
		write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
		if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
			drop = 1;
	} while (read_seqretry(&fb_priv_cpu->lock, seq));

	u64_stats_update_begin(&fb_priv_cpu->syncp);
	fb_priv_cpu->packets++;
	fb_priv_cpu->bytes += skb->len;
	u64_stats_update_end(&fb_priv_cpu->syncp);

	if (drop) {
		kfree_skb(skb);
		return PPE_DROPPED;
	}
	return PPE_SUCCESS;
}
Example #11
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
		if (bio_data_dir(bio) == READ) {
			if (uptodate) {
				SetPageUptodate(page);
			} else {
				ClearPageUptodate(page);
				SetPageError(page);
			}
			unlock_page(page);
		} else { /* bio_data_dir(bio) == WRITE */
			if (!uptodate) {
				SetPageError(page);
				if (page->mapping)
					set_bit(AS_EIO, &page->mapping->flags);
			}
			end_page_writeback(page);
		}
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Example #12
0
/**
 * mpage_readpages - populate an address space with some pages & start reads against them
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 *   The page at @pages->prev has the lowest file offset, and reads should be
 *   issued in @pages->prev to @pages->next order.
 * @nr_pages: The number of pages at *@pages
 * @get_block: The filesystem's block mapper function.
 *
 * This function walks the pages and the blocks within each page, building and
 * emitting large BIOs.
 *
 * If anything unusual happens, such as:
 *
 * - encountering a page which has buffers
 * - encountering a page which has a non-hole after a hole
 * - encountering a page with non-contiguous blocks
 *
 * then this code just gives up and calls the buffer_head-based read function.
 * It does handle a page which has holes at the end - that is a common case:
 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
 *
 * BH_Boundary explanation:
 *
 * There is a problem.  The mpage read code assembles several pages, gets all
 * their disk mappings, and then submits them all.  That's fine, but obtaining
 * the disk mappings may require I/O.  Reads of indirect blocks, for example.
 *
 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
 * submitted in the following order:
 * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
 *
 * because the indirect block has to be read to get the mappings of blocks
 * 13,14,15,16.  Obviously, this impacts performance.
 *
 * So what we do it to allow the filesystem's get_block() function to set
 * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
 * after this one will require I/O against a block which is probably close to
 * this one.  So you should push what I/O you have currently accumulated.
 *
 * This all causes the disk requests to be issued in the correct order.
 */
int
mpage_readpages(struct address_space *mapping, struct list_head *pages,
				unsigned nr_pages, get_block_t get_block)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;
	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);

	map_bh.b_state = 0;
	map_bh.b_size = 0;
	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_entry(pages->prev, struct page, lru);

		prefetchw(&page->flags);
		list_del(&page->lru);
		if (!add_to_page_cache_lru(page, mapping,
					page->index,
					gfp)) {
			bio = do_mpage_readpage(bio, page,
					nr_pages - page_idx,
					&last_block_in_bio, &map_bh,
					&first_logical_block,
					get_block, gfp);
		}
		page_cache_release(page);
	}
	BUG_ON(!list_empty(pages));
	if (bio)
		mpage_bio_submit(READ, bio);
	return 0;
}
unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	prefetchw(to);
	if (access_ok(VERIFY_READ, from, n))
		__copy_user_zeroing(to,from,n);
	else
		memset(to, 0, n);
	return n;
}
Example #14
0
/** Read to request from FIFO (max read == bytes in fifo)
 *  Return:  0 = still running, 1 = completed, negative = errno
 */
static int read_fifo(struct elfin_ep *ep, struct elfin_request *req)
{
	u32 csr;
	u8 *buf;
	unsigned bufferspace, count, is_short;
	void* fifo = ep->fifo;

	/* make sure there's a packet in the FIFO. */
	csr = usb_read(ep->csr1, ep_index(ep));
	if (!(csr & S3C2410_UDC_OCSR1_PKTRDY)) {
		DPRINTK("%s: Packet NOT ready!\n", __FUNCTION__);
		return -EINVAL;
	}

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);
	bufferspace = req->req.length - req->req.actual;

	/* read all bytes from this packet */
	count = (( (usb_read(S3C2410_UDC_OUT_FIFO_CNT2_REG, ep_index(ep)) & 0xff ) << 8) | (usb_read(S3C2410_UDC_OUT_FIFO_CNT1_REG, ep_index(ep)) & 0xff));
	req->req.actual += min(count, bufferspace);

	is_short = (count < ep->ep.maxpacket);
	DPRINTK("read %s %02x, %d bytes%s req %p %d/%d\n",
	      ep->ep.name, csr, count,
	      is_short ? "/S" : "", req, req->req.actual, req->req.length);

	while (likely(count-- != 0)) {
		u8 byte = (u8) __raw_readl(fifo);

		if (unlikely(bufferspace == 0)) {
			/* this happens when the driver's buffer
			 * is smaller than what the host sent.
			 * discard the extra data.
			 */
			if (req->req.status != -EOVERFLOW)
				printk("%s overflow %d\n", ep->ep.name, count);
			req->req.status = -EOVERFLOW;
		} else {
			*buf++ = byte;
			bufferspace--;
		}
	}

	usb_clear(S3C2410_UDC_OCSR1_PKTRDY, ep->csr1, ep_index(ep));

	/* completion */
	if (is_short || req->req.actual == req->req.length) {
		done(ep, req, 0);
		return 1;
	}

	/* finished that packet.  the next one may be waiting... */
	return 0;
}
int gru_get_cb_exception_detail(void *cb,
		struct control_block_extended_exc_detail *excdet)
{
	struct gru_control_block_extended *cbe;

	cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
	prefetchw(cbe);         /* Harmless on hardware, required for emulator */
	excdet->opc = cbe->opccpy;
	excdet->exopc = cbe->exopccpy;
	excdet->ecause = cbe->ecause;
	excdet->exceptdet0 = cbe->idef1upd;
	excdet->exceptdet1 = cbe->idef3upd;
	return 0;
}
Example #16
0
static inline void __atomic_add(int i, __atomic_t *v)
{
    unsigned long tmp;
    int result;

    prefetchw(&v->counter);
    __asm__ __volatile__("@ __atomic_add\n"
                         "1:     ldrex   %0, [%3]\n"
                         "       add     %0, %0, %4\n"
                         "       strex   %1, %0, [%3]\n"
                         "       teq     %1, #0\n"
                         "       bne     1b"
                         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
                         : "r" (&v->counter), "Ir" (i)
                         : "cc");
}
Example #17
0
static inline void __atomic_add_hang(int i, __atomic_t *v)
{
    unsigned long tmp;
    int result;

    prefetchw(&v->counter);
    __asm__ __volatile__("@ __atomic_add\n"
                         "1:     ldrex   %0, [%3]\n"
                         "       ldrex   %0, [%3]\n"
                         "       add     %0, %0, %4\n"
                         "       strex   %1, %0, [%3]\n"
                         "       strex   %1, %0, [%3]\n"
                         "       teq     %1, #0\n"
                         "       bne     1b" /* the 2nd strex should fail, the whole program will hang */
                         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
                         : "r" (&v->counter), "Ir" (i)
                         : "cc");
}
Example #18
0
static void mpage_end_io_write(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (!uptodate){
			if (page->mapping)
				set_bit(AS_EIO, &page->mapping->flags);
		}
		end_page_writeback(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req)
{
	u32 *buf, ctrl;
	u32 length, pktcnt;
	u32 ep_num = ep_index(ep);
	struct s3c_udc *udc = ep->dev;
	struct device *dev = &udc->dev->dev;

	aligned_map_buf(req, ep_is_in(ep));
	buf = req->req.buf + req->req.actual;
	prefetchw(buf);

	length = req->req.length - req->req.actual;

	req->req.dma = dma_map_single(dev, buf,
				length, DMA_FROM_DEVICE);
	req->mapped = 1;

	if (length == 0)
		pktcnt = 1;
	else
		pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;

	ctrl =  __raw_readl(udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num));

	__raw_writel(virt_to_phys(buf),
		udc->regs + S3C_UDC_OTG_DOEPDMA(ep_num));
	__raw_writel((pktcnt<<19) | (length<<0),
		udc->regs + S3C_UDC_OTG_DOEPTSIZ(ep_num));
	__raw_writel(DEPCTL_EPENA | DEPCTL_CNAK | ctrl,
		udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num));

	DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
			"DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
			"\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
			__func__, ep_num,
			__raw_readl(udc->regs + S3C_UDC_OTG_DOEPDMA(ep_num)),
			__raw_readl(udc->regs + S3C_UDC_OTG_DOEPTSIZ(ep_num)),
			__raw_readl(udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num)),
			buf, pktcnt, length);
	return 0;
}
Example #20
0
static inline void free_one_pgd(pgd_t * dir)
{
	int j;
	pmd_t * pmd;

	if (pgd_none(*dir))
		return;
	if (pgd_bad(*dir)) {
		pgd_ERROR(*dir);
		pgd_clear(dir);
		return;
	}
	pmd = pmd_offset(dir, 0);
	pgd_clear(dir);
	for (j = 0; j < PTRS_PER_PMD ; j++) {
		prefetchw(pmd+j+(PREFETCH_STRIDE/16));
		free_one_pmd(pmd+j);
	}
	pmd_free(pmd);
}
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct compressed_bio *cb = bio->bi_private;
	struct page *page;
	int page_idx, ret = 0;
	
	printk(KERN_INFO "\n==> IN MPAGE_END_IO");
	do {
		struct page *page = bvec->bv_page;		
		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
			
	} while (bvec >= bio->bi_io_vec);

	if (!atomic_dec_and_test(&cb->pending_bios))
		goto out;
	
	/* Last bio...start decompression */
	ret = decompress_stride(cb);
	
	for (page_idx = 0; page_idx < cb->nr_pages; page_idx++) {
		page = cb->compressed_pages[page_idx];
		page->mapping = NULL;
		page_cache_release(page);
	}
	
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}
Example #22
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			flush_dcache_page(page);
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Example #23
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
		if (bio_data_dir(bio) == READ) {
			if (uptodate) {
				int enc_status = tenc_decrypt_page(page);
				if (enc_status == TENC_CAN_UNLOCK) {
					/* Decryption code is not interested. Unlock immediately */
					SetPageUptodate(page);
					unlock_page(page);
				}
				else if (enc_status == TENC_DECR_FAIL) {
					ClearPageUptodate(page);
					SetPageError(page);
					unlock_page(page);
				}
			} else {
				ClearPageUptodate(page);
				SetPageError(page);
				unlock_page(page);
			}
		} else { /* bio_data_dir(bio) == WRITE */
			if (!uptodate) {
				SetPageError(page);
				if (page->mapping)
					set_bit(AS_EIO, &page->mapping->flags);
			}
			end_page_writeback(page);
		}
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Example #24
0
static void writeseg_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct super_block *sb = bio->bi_private;
	struct logfs_super *super = logfs_super(sb);
	struct page *page;

	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
	BUG_ON(err);
	BUG_ON(bio->bi_vcnt == 0);
	do {
		page = bvec->bv_page;
		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		end_page_writeback(page);
		page_cache_release(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
	if (atomic_dec_and_test(&super->s_pending_writes))
		wake_up(&wq);
}
Example #25
0
static int read_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
{
	u8	*buf;
	int	bytes_ep, bufferspace, count, i;

	bytes_ep = imx_fifo_bcount(imx_ep);
	bufferspace = req->req.length - req->req.actual;

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);

	if (unlikely(imx_ep_empty(imx_ep)))
		count = 0;	/*     */
	else
		count = min(bytes_ep, bufferspace);

	for (i = count; i > 0; i--)
		*buf++ = __raw_readb(imx_ep->imx_usb->base
						+ USB_EP_FDAT0(EP_NO(imx_ep)));
	req->req.actual += count;

	return count;
}
Example #26
0
static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	if (bio->bi_size)
		return 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (!uptodate){
			SetPageError(page);
			if (page->mapping)
				set_bit(AS_EIO, &page->mapping->flags);
		}
		end_page_writeback(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
	return 0;
}
/** Read to request from FIFO (max read == bytes in fifo)
 *  Return:  0 = still running, 1 = completed, negative = errno
 */
static int read_fifo(struct s3c_ep *ep, struct s3c_request *req)
{
	u32 csr, gintmsk;
	u32 *buf;
	u32 bufferspace, count, count_bytes, is_short = 0;
	u32 fifo = ep->fifo;

	csr = readl(S3C_UDC_OTG_GRXSTSP);
	count_bytes = (csr & 0x7ff0)>>4;

	gintmsk = readl(S3C_UDC_OTG_GINTMSK);

	if(!count_bytes) {
		DEBUG_OUT_EP("%s: count_bytes %d bytes\n", __FUNCTION__, count_bytes);

		/* Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY */
		writel(gintmsk | INT_RX_FIFO_NOT_EMPTY, S3C_UDC_OTG_GINTMSK);
		return 0;
	}

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);
	bufferspace = req->req.length - req->req.actual;

	count = count_bytes / 4;
	if (count_bytes%4)
		count = count + 1;

	req->req.actual += min(count_bytes, bufferspace);

	is_short = (count_bytes < ep->ep.maxpacket);
	DEBUG_OUT_EP("%s: read %s, %d bytes%s req %p %d/%d GRXSTSP:0x%x\n",
		__FUNCTION__,
		ep->ep.name, count_bytes,
		is_short ? "/S" : "", req, req->req.actual, req->req.length, csr);

	while (likely(count-- != 0)) {
		u32 byte = (u32) readl(fifo);

		if (unlikely(bufferspace == 0)) {
			/* this happens when the driver's buffer
		 	* is smaller than what the host sent.
		 	* discard the extra data.
		 	*/
			if (req->req.status != -EOVERFLOW)
				printk("%s overflow %d\n", ep->ep.name, count);
			req->req.status = -EOVERFLOW;
		} else {
			*buf++ = byte;
			bufferspace-=4;
		}
 	 }

	/* Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY */
	writel(gintmsk | INT_RX_FIFO_NOT_EMPTY, S3C_UDC_OTG_GINTMSK);

	/* completion */
	if (is_short || req->req.actual == req->req.length) {
		done(ep, req, 0);
		return 1;
	}

	/* finished that packet.  the next one may be waiting... */
	return 0;
}
Example #28
0
int ext4_mpage_readpages(struct address_space *mapping,
			 struct list_head *pages, struct page *page,
			 unsigned nr_pages, bool is_readahead)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;

	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t blocks[MAX_BUF_PER_PAGE];
	unsigned page_block;
	struct block_device *bdev = inode->i_sb->s_bdev;
	int length;
	unsigned relative_block = 0;
	struct ext4_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;

	for (; nr_pages; nr_pages--) {
		int fully_mapped = 1;
		unsigned first_hole = blocks_per_page;

		prefetchw(&page->flags);
		if (pages) {
			page = lru_to_page(pages);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping, page->index,
				  readahead_gfp_mask(mapping)))
				goto next_page;
		}

		if (page_has_buffers(page))
			goto confused;

		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
		last_block = block_in_file + nr_pages * blocks_per_page;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;
		page_block = 0;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & EXT4_MAP_MAPPED) &&
		    block_in_file > map.m_lblk &&
		    block_in_file < (map.m_lblk + map.m_len)) {
			unsigned map_offset = block_in_file - map.m_lblk;
			unsigned last = map.m_len - map_offset;

			for (relative_block = 0; ; relative_block++) {
				if (relative_block == last) {
					/* needed? */
					map.m_flags &= ~EXT4_MAP_MAPPED;
					break;
				}
				if (page_block == blocks_per_page)
					break;
				blocks[page_block] = map.m_pblk + map_offset +
					relative_block;
				page_block++;
				block_in_file++;
			}
		}

		/*
		 * Then do more ext4_map_blocks() calls until we are
		 * done with this page.
		 */
		while (page_block < blocks_per_page) {
			if (block_in_file < last_block) {
				map.m_lblk = block_in_file;
				map.m_len = last_block - block_in_file;

				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
				set_error_page:
					SetPageError(page);
					zero_user_segment(page, 0,
							  PAGE_SIZE);
					unlock_page(page);
					goto next_page;
				}
			}
			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
				fully_mapped = 0;
				if (first_hole == blocks_per_page)
					first_hole = page_block;
				page_block++;
				block_in_file++;
				continue;
			}
			if (first_hole != blocks_per_page)
				goto confused;		/* hole -> non-hole */

			/* Contiguous blocks? */
			if (page_block && blocks[page_block-1] != map.m_pblk-1)
				goto confused;
			for (relative_block = 0; ; relative_block++) {
				if (relative_block == map.m_len) {
					/* needed? */
					map.m_flags &= ~EXT4_MAP_MAPPED;
					break;
				} else if (page_block == blocks_per_page)
					break;
				blocks[page_block] = map.m_pblk+relative_block;
				page_block++;
				block_in_file++;
			}
		}
		if (first_hole != blocks_per_page) {
			zero_user_segment(page, first_hole << blkbits,
					  PAGE_SIZE);
			if (first_hole == 0) {
				SetPageUptodate(page);
				unlock_page(page);
				goto next_page;
			}
		} else if (fully_mapped) {
			SetPageMappedToDisk(page);
		}
		if (fully_mapped && blocks_per_page == 1 &&
		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
			SetPageUptodate(page);
			goto confused;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != blocks[0] - 1)) {
		submit_and_realloc:
			submit_bio(bio);
			bio = NULL;
		}
		if (bio == NULL) {
			struct fscrypt_ctx *ctx = NULL;

			if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
				if (IS_ERR(ctx))
					goto set_error_page;
			}
			bio = bio_alloc(GFP_KERNEL,
				min_t(int, nr_pages, BIO_MAX_PAGES));
			if (!bio) {
				if (ctx)
					fscrypt_release_ctx(ctx);
				goto set_error_page;
			}
			bio_set_dev(bio, bdev);
			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
			bio->bi_end_io = mpage_end_io;
			bio->bi_private = ctx;
			bio_set_op_attrs(bio, REQ_OP_READ,
						is_readahead ? REQ_RAHEAD : 0);
		}

		length = first_hole << blkbits;
		if (bio_add_page(bio, page, length, 0) < length)
			goto submit_and_realloc;

		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
		     (relative_block == map.m_len)) ||
		    (first_hole != blocks_per_page)) {
			submit_bio(bio);
			bio = NULL;
		} else
			last_block_in_bio = blocks[blocks_per_page - 1];
		goto next_page;
	confused:
		if (bio) {
			submit_bio(bio);
			bio = NULL;
		}
		if (!PageUptodate(page))
			block_read_full_page(page, ext4_get_block);
		else
			unlock_page(page);
	next_page:
		if (pages)
			put_page(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(bio);
	return 0;
}
Example #29
0
static int read_fifo(struct goku_ep *ep, struct goku_request *req)
{
	struct goku_udc_regs __iomem	*regs;
	u32				size, set;
	u8				*buf;
	unsigned			bufferspace, is_short, dbuff;

	regs = ep->dev->regs;
top:
	buf = req->req.buf + req->req.actual;
	prefetchw(buf);

	if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
		return -EL2HLT;

	dbuff = (ep->num == 1 || ep->num == 2);
	do {
		/* ack dataset irq matching the status we'll handle */
		if (ep->num != 0)
			writel(~INT_EPxDATASET(ep->num), &regs->int_status);

		set = readl(&regs->DataSet) & DATASET_AB(ep->num);
		size = readl(&regs->EPxSizeLA[ep->num]);
		bufferspace = req->req.length - req->req.actual;

		/* usually do nothing without an OUT packet */
		if (likely(ep->num != 0 || bufferspace != 0)) {
			if (unlikely(set == 0))
				break;
			/* use ep1/ep2 double-buffering for OUT */
			if (!(size & PACKET_ACTIVE))
				size = readl(&regs->EPxSizeLB[ep->num]);
			if (!(size & PACKET_ACTIVE))	/* "can't happen" */
				break;
			size &= DATASIZE;	/* EPxSizeH == 0 */

		/* ep0out no-out-data case for set_config, etc */
		} else
			size = 0;

		/* read all bytes from this packet */
		req->req.actual += size;
		is_short = (size < ep->ep.maxpacket);
#ifdef USB_TRACE
		VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
			ep->ep.name, size, is_short ? "/S" : "",
			req, req->req.actual, req->req.length);
#endif
		while (likely(size-- != 0)) {
			u8	byte = (u8) readl(ep->reg_fifo);

			if (unlikely(bufferspace == 0)) {
				/* this happens when the driver's buffer
				 * is smaller than what the host sent.
				 * discard the extra data in this packet.
				 */
				if (req->req.status != -EOVERFLOW)
					DBG(ep->dev, "%s overflow %u\n",
						ep->ep.name, size);
				req->req.status = -EOVERFLOW;
			} else {
				*buf++ = byte;
				bufferspace--;
			}
		}

		/* completion */
		if (unlikely(is_short || req->req.actual == req->req.length)) {
			if (unlikely(ep->num == 0)) {
				/* non-control endpoints now usable? */
				if (ep->dev->req_config)
					writel(ep->dev->configured
							? USBSTATE_CONFIGURED
							: 0,
						&regs->UsbState);
				/* ep0out status stage */
				writel(~(1<<0), &regs->EOP);
				ep->stopped = 1;
				ep->dev->ep0state = EP0_STATUS;
			}
			done(ep, req, 0);

			/* empty the second buffer asap */
			if (dbuff && !list_empty(&ep->queue)) {
				req = list_entry(ep->queue.next,
						struct goku_request, queue);
				goto top;
			}
			return 1;
		}
	} while (dbuff);
	return 0;
}
Example #30
0
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
                                struct list_head *pages, struct page *page,
                                unsigned nr_pages)
{
    struct bio *bio = NULL;
    unsigned page_idx;
    sector_t last_block_in_bio = 0;
    struct inode *inode = mapping->host;
    const unsigned blkbits = inode->i_blkbits;
    const unsigned blocksize = 1 << blkbits;
    sector_t block_in_file;
    sector_t last_block;
    sector_t last_block_in_file;
    sector_t block_nr;
    struct block_device *bdev = inode->i_sb->s_bdev;
    struct f2fs_map_blocks map;

    map.m_pblk = 0;
    map.m_lblk = 0;
    map.m_len = 0;
    map.m_flags = 0;

    for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

        prefetchw(&page->flags);
        if (pages) {
            page = list_entry(pages->prev, struct page, lru);
            list_del(&page->lru);
            if (add_to_page_cache_lru(page, mapping,
                                      page->index, GFP_KERNEL))
                goto next_page;
        }

        block_in_file = (sector_t)page->index;
        last_block = block_in_file + nr_pages;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
                             blkbits;
        if (last_block > last_block_in_file)
            last_block = last_block_in_file;

        /*
         * Map blocks using the previous result first.
         */
        if ((map.m_flags & F2FS_MAP_MAPPED) &&
                block_in_file > map.m_lblk &&
                block_in_file < (map.m_lblk + map.m_len))
            goto got_it;

        /*
         * Then do more f2fs_map_blocks() calls until we are
         * done with this page.
         */
        map.m_flags = 0;

        if (block_in_file < last_block) {
            map.m_lblk = block_in_file;
            map.m_len = last_block - block_in_file;

            if (f2fs_map_blocks(inode, &map, 0, false))
                goto set_error_page;
        }
got_it:
        if ((map.m_flags & F2FS_MAP_MAPPED)) {
            block_nr = map.m_pblk + block_in_file - map.m_lblk;
            SetPageMappedToDisk(page);

            if (!PageUptodate(page) && !cleancache_get_page(page)) {
                SetPageUptodate(page);
                goto confused;
            }
        } else {
            zero_user_segment(page, 0, PAGE_CACHE_SIZE);
            SetPageUptodate(page);
            unlock_page(page);
            goto next_page;
        }

        /*
         * This page will go to BIO.  Do we need to send this
         * BIO off first?
         */
        if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
            submit_bio(READ, bio);
            bio = NULL;
        }
        if (bio == NULL) {
            struct f2fs_crypto_ctx *ctx = NULL;

            if (f2fs_encrypted_inode(inode) &&
                    S_ISREG(inode->i_mode)) {
                struct page *cpage;

                ctx = f2fs_get_crypto_ctx(inode);
                if (IS_ERR(ctx))
                    goto set_error_page;

                /* wait the page to be moved by cleaning */
                cpage = find_lock_page(
                            META_MAPPING(F2FS_I_SB(inode)),
                            block_nr);
                if (cpage) {
                    f2fs_wait_on_page_writeback(cpage,
                                                DATA);
                    f2fs_put_page(cpage, 1);
                }
            }

            bio = bio_alloc(GFP_KERNEL,
                            min_t(int, nr_pages, BIO_MAX_PAGES));
            if (!bio) {
                if (ctx)
                    f2fs_release_crypto_ctx(ctx);
                goto set_error_page;
            }
            bio->bi_bdev = bdev;
            bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
            bio->bi_end_io = f2fs_read_end_io;
            bio->bi_private = ctx;
        }

        if (bio_add_page(bio, page, blocksize, 0) < blocksize)
            goto submit_and_realloc;

        last_block_in_bio = block_nr;
        goto next_page;
set_error_page:
        SetPageError(page);
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        unlock_page(page);
        goto next_page;
confused:
        if (bio) {
            submit_bio(READ, bio);
            bio = NULL;
        }
        unlock_page(page);
next_page:
        if (pages)
            page_cache_release(page);
    }
    BUG_ON(pages && !list_empty(pages));
    if (bio)
        submit_bio(READ, bio);
    return 0;
}