Exemple #1
0
static void mv_cesa_ahash_std_step(struct ahash_request *req)
{
	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
	struct mv_cesa_engine *engine = creq->base.engine;
	struct mv_cesa_op_ctx *op;
	unsigned int new_cache_ptr = 0;
	u32 frag_mode;
	size_t  len;
	unsigned int digsize;
	int i;

	mv_cesa_adjust_op(engine, &creq->op_tmpl);
	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));

	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
	for (i = 0; i < digsize / 4; i++)
		writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));

	mv_cesa_adjust_op(engine, &creq->op_tmpl);
	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));

	if (creq->cache_ptr)
		memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
			    creq->cache, creq->cache_ptr);

	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
		    CESA_SA_SRAM_PAYLOAD_SIZE);

	if (!creq->last_req) {
		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
	}

	if (len - creq->cache_ptr)
		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
						   engine->sram +
						   CESA_SA_DATA_SRAM_OFFSET +
						   creq->cache_ptr,
						   len - creq->cache_ptr,
						   sreq->offset);

	op = &creq->op_tmpl;

	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;

	if (creq->last_req && sreq->offset == req->nbytes &&
	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
	}

	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
		if (len &&
		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
			mv_cesa_set_mac_op_total_len(op, creq->len);
		} else {
			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;

			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
				len &= CESA_HASH_BLOCK_SIZE_MSK;
				new_cache_ptr = 64 - trailerlen;
				memcpy_fromio(creq->cache,
					      engine->sram +
					      CESA_SA_DATA_SRAM_OFFSET + len,
					      new_cache_ptr);
			} else {
				len += mv_cesa_ahash_pad_req(creq,
						engine->sram + len +
						CESA_SA_DATA_SRAM_OFFSET);
			}

			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
			else
				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
		}
	}

	mv_cesa_set_mac_op_frag_len(op, len);
	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);

	/* FIXME: only update enc_len field */
	memcpy_toio(engine->sram, op, sizeof(*op));

	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
				      CESA_SA_DESC_CFG_FRAG_MSK);

	creq->cache_ptr = new_cache_ptr;

	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
	BUG_ON(readl(engine->regs + CESA_SA_CMD) &
	       CESA_SA_CMD_EN_CESA_SA_ACCL0);
	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
Exemple #2
0
static int ufs_trunc_direct(struct inode *inode)
{
	struct ufs_inode_info *ufsi = UFS_I(inode);
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	void *p;
	u64 frag1, frag2, frag3, frag4, block1, block2;
	unsigned frag_to_free, free_count;
	unsigned i, tmp;
	int retry;
	
	UFSD("ENTER: ino %lu\n", inode->i_ino);

	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	
	frag_to_free = 0;
	free_count = 0;
	retry = 0;
	
	frag1 = DIRECT_FRAGMENT;
	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
	frag3 = frag4 & ~uspi->s_fpbmask;
	block1 = block2 = 0;
	if (frag2 > frag3) {
		frag2 = frag4;
		frag3 = frag4 = 0;
	} else if (frag2 < frag3) {
		block1 = ufs_fragstoblks (frag2);
		block2 = ufs_fragstoblks (frag3);
	}

	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
	     (unsigned long long)frag1, (unsigned long long)frag2,
	     (unsigned long long)block1, (unsigned long long)block2,
	     (unsigned long long)frag3, (unsigned long long)frag4);

	if (frag1 >= frag2)
		goto next1;		

	/*
	 * Free first free fragments
	 */
	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
	tmp = ufs_data_ptr_to_cpu(sb, p);
	if (!tmp )
		ufs_panic (sb, "ufs_trunc_direct", "internal error");
	frag2 -= frag1;
	frag1 = ufs_fragnum (frag1);

	ufs_free_fragments(inode, tmp + frag1, frag2);
	mark_inode_dirty(inode);
	frag_to_free = tmp + frag1;

next1:
	/*
	 * Free whole blocks
	 */
	for (i = block1 ; i < block2; i++) {
		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
		tmp = ufs_data_ptr_to_cpu(sb, p);
		if (!tmp)
			continue;
		ufs_data_ptr_clear(uspi, p);

		if (free_count == 0) {
			frag_to_free = tmp;
			free_count = uspi->s_fpb;
		} else if (free_count > 0 && frag_to_free == tmp - free_count)
			free_count += uspi->s_fpb;
		else {
			ufs_free_blocks (inode, frag_to_free, free_count);
			frag_to_free = tmp;
			free_count = uspi->s_fpb;
		}
		mark_inode_dirty(inode);
	}
	
	if (free_count > 0)
		ufs_free_blocks (inode, frag_to_free, free_count);

	if (frag3 >= frag4)
		goto next3;

	/*
	 * Free last free fragments
	 */
	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
	tmp = ufs_data_ptr_to_cpu(sb, p);
	if (!tmp )
		ufs_panic(sb, "ufs_truncate_direct", "internal error");
	frag4 = ufs_fragnum (frag4);
	ufs_data_ptr_clear(uspi, p);

	ufs_free_fragments (inode, tmp, frag4);
	mark_inode_dirty(inode);
 next3:

	UFSD("EXIT: ino %lu\n", inode->i_ino);
	return retry;
}
Exemple #3
0
static ulong scsi_read(struct blk_desc *block_dev, lbaint_t blknr,
		       lbaint_t blkcnt, void *buffer)
#endif
{
#ifdef CONFIG_BLK
	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
	struct udevice *bdev = dev->parent;
#else
	struct udevice *bdev = NULL;
#endif
	lbaint_t start, blks;
	uintptr_t buf_addr;
	unsigned short smallblks = 0;
	struct scsi_cmd *pccb = (struct scsi_cmd *)&tempccb;

	/* Setup device */
	pccb->target = block_dev->target;
	pccb->lun = block_dev->lun;
	buf_addr = (unsigned long)buffer;
	start = blknr;
	blks = blkcnt;
	debug("\nscsi_read: dev %d startblk " LBAF
	      ", blccnt " LBAF " buffer %lx\n",
	      block_dev->devnum, start, blks, (unsigned long)buffer);
	do {
		pccb->pdata = (unsigned char *)buf_addr;
#ifdef CONFIG_SYS_64BIT_LBA
		if (start > SCSI_LBA48_READ) {
			unsigned long blocks;
			blocks = min_t(lbaint_t, blks, SCSI_MAX_READ_BLK);
			pccb->datalen = block_dev->blksz * blocks;
			scsi_setup_read16(pccb, start, blocks);
			start += blocks;
			blks -= blocks;
		} else
#endif
		if (blks > SCSI_MAX_READ_BLK) {
			pccb->datalen = block_dev->blksz *
				SCSI_MAX_READ_BLK;
			smallblks = SCSI_MAX_READ_BLK;
			scsi_setup_read_ext(pccb, start, smallblks);
			start += SCSI_MAX_READ_BLK;
			blks -= SCSI_MAX_READ_BLK;
		} else {
			pccb->datalen = block_dev->blksz * blks;
			smallblks = (unsigned short)blks;
			scsi_setup_read_ext(pccb, start, smallblks);
			start += blks;
			blks = 0;
		}
		debug("scsi_read_ext: startblk " LBAF
		      ", blccnt %x buffer %lX\n",
		      start, smallblks, buf_addr);
		if (scsi_exec(bdev, pccb)) {
			scsi_print_error(pccb);
			blkcnt -= blks;
			break;
		}
		buf_addr += pccb->datalen;
	} while (blks != 0);
	debug("scsi_read_ext: end startblk " LBAF
	      ", blccnt %x buffer %lX\n", start, smallblks, buf_addr);
	return blkcnt;
}
Exemple #4
0
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
					struct ieee80211_hdr *hdr,
					u16 len,
					u32 ampdu_status,
					struct iwl_rx_mem_buffer *rxb,
					struct ieee80211_rx_status *stats)
{
	struct sk_buff *skb;
	int ret = 0;
	__le16 fc = hdr->frame_control;

	/* We only process data packets if the interface is open */
	if (unlikely(!priv->is_open)) {
		IWL_DEBUG_DROP_LIMIT(priv,
		    "Dropping packet while interface is not open.\n");
		return;
	}

	/* In case of HW accelerated crypto and bad decryption, drop */
	if (!priv->cfg->mod_params->sw_crypto &&
	    iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
		return;

	skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
	if (!skb) {
		IWL_ERR(priv, "alloc_skb failed\n");
		return;
	}

	skb_reserve(skb, IWL_LINK_HDR_MAX);
	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);

	/* mac80211 currently doesn't support paged SKB. Convert it to
	 * linear SKB for management frame and data frame requires
	 * software decryption or software defragementation. */
	if (ieee80211_is_mgmt(fc) ||
	    ieee80211_has_protected(fc) ||
	    ieee80211_has_morefrags(fc) ||
	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
		ret = skb_linearize(skb);
	else
		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
			 0 : -ENOMEM;

	if (ret) {
		kfree_skb(skb);
		goto out;
	}

	/*
	 * XXX: We cannot touch the page and its virtual memory (hdr) after
	 * here. It might have already been freed by the above skb change.
	 */

	iwl_update_stats(priv, false, fc, len);
	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));

	ieee80211_rx(priv->hw, skb);
 out:
	priv->alloc_rxb_page--;
	rxb->page = NULL;
}
Exemple #5
0
static int tegra2_idle_lp2_cpu_0(struct cpuidle_device *dev,
			   struct cpuidle_state *state, s64 request)
{
	ktime_t entry_time;
	ktime_t exit_time;
	s64 wake_time;
	bool sleep_completed = false;
	int bin;
	int i;

	while (tegra2_cpu_is_resettable_soon())
		cpu_relax();

	if (tegra2_reset_other_cpus(dev->cpu))
		return 0;

	idle_stats.both_idle_count++;

	if (request < state->target_residency) {
		tegra2_lp3_fall_back(dev);
		return -EBUSY;
	}

	/* LP2 entry time */
	entry_time = ktime_get();

	/* LP2 initial targeted wake time */
	wake_time = ktime_to_us(entry_time) + request;

	/* CPU0 must wake up before CPU1. */
	smp_rmb();
	wake_time = min_t(s64, wake_time, tegra_cpu1_wake_by_time);

	/* LP2 actual targeted wake time */
	request = wake_time - ktime_to_us(entry_time);
	BUG_ON(wake_time < 0LL);

	idle_stats.tear_down_count++;
	entry_time = ktime_get();

	if (request > state->target_residency) {
		s64 sleep_time = request - tegra_lp2_exit_latency;

		bin = time_to_bin((u32)request / 1000);
		idle_stats.lp2_count++;
		idle_stats.lp2_count_bin[bin]++;

		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);

		if (tegra_idle_lp2_last(sleep_time, 0) == 0)
			sleep_completed = true;
		else {
			int irq = tegra_gic_pending_interrupt();
			idle_stats.lp2_int_count[irq]++;
		}

		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
	}

	for_each_online_cpu(i) {
		if (i != dev->cpu)
			tegra2_wake_reset_cpu(i);
	}

	exit_time = ktime_get();
	if (sleep_completed) {
		/*
		 * Stayed in LP2 for the full time until the next tick,
		 * adjust the exit latency based on measurement
		 */
		s64 actual_time = ktime_to_us(ktime_sub(exit_time, entry_time));
		long offset = (long)(actual_time - request);
		int latency = tegra_lp2_exit_latency + offset / 16;
		latency = clamp(latency, 0, 10000);
		tegra_lp2_exit_latency = latency;
		smp_wmb();

		idle_stats.lp2_completed_count++;
		idle_stats.lp2_completed_count_bin[bin]++;
		idle_stats.in_lp2_time += actual_time;

		pr_debug("%lld %lld %ld %d\n", request, actual_time,
			offset, bin);
	}

	return 0;
}
/**
 * process_vm_rw_core - core of reading/writing pages from task specified
 * @pid: PID of process to read/write from/to
 * @lvec: iovec array specifying where to copy to/from locally
 * @liovcnt: size of lvec array
 * @rvec: iovec array specifying where to copy to/from in the other process
 * @riovcnt: size of rvec array
 * @flags: currently unused
 * @vm_write: 0 if reading from other process, 1 if writing to other process
 * Returns the number of bytes read/written or error code. May
 *  return less bytes than expected if an error occurs during the copying
 *  process.
 */
static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
				  unsigned long liovcnt,
				  const struct iovec *rvec,
				  unsigned long riovcnt,
				  unsigned long flags, int vm_write)
{
	struct task_struct *task;
	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
	struct page **process_pages = pp_stack;
	struct mm_struct *mm;
	unsigned long i;
	ssize_t rc = 0;
	ssize_t bytes_copied_loop;
	ssize_t bytes_copied = 0;
	unsigned long nr_pages = 0;
	unsigned long nr_pages_iov;
	unsigned long iov_l_curr_idx = 0;
	size_t iov_l_curr_offset = 0;
	ssize_t iov_len;

	/*
	 * Work out how many pages of struct pages we're going to need
	 * when eventually calling get_user_pages
	 */
	for (i = 0; i < riovcnt; i++) {
		iov_len = rvec[i].iov_len;
		if (iov_len > 0) {
			nr_pages_iov = ((unsigned long)rvec[i].iov_base
					+ iov_len)
				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
				/ PAGE_SIZE + 1;
			nr_pages = max(nr_pages, nr_pages_iov);
		}
	}

	if (nr_pages == 0)
		return 0;

	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
		/* For reliability don't try to kmalloc more than
		   2 pages worth */
		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
					      sizeof(struct pages *)*nr_pages),
					GFP_KERNEL);

		if (!process_pages)
			return -ENOMEM;
	}

	/* Get process information */
	rcu_read_lock();
	task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();
	if (!task) {
		rc = -ESRCH;
		goto free_proc_pages;
	}

	task_lock(task);
	if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
		task_unlock(task);
		rc = -EPERM;
		goto put_task_struct;
	}
	mm = task->mm;

	if (!mm || (task->flags & PF_KTHREAD)) {
		task_unlock(task);
		rc = -EINVAL;
		goto put_task_struct;
	}

	atomic_inc(&mm->mm_users);
	task_unlock(task);

	for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
		rc = process_vm_rw_single_vec(
			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
			lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
			process_pages, mm, task, vm_write, &bytes_copied_loop);
		bytes_copied += bytes_copied_loop;
		if (rc != 0) {
			/* If we have managed to copy any data at all then
			   we return the number of bytes copied. Otherwise
			   we return the error code */
			if (bytes_copied)
				rc = bytes_copied;
			goto put_mm;
		}
	}

	rc = bytes_copied;
put_mm:
	mmput(mm);

put_task_struct:
	put_task_struct(task);

free_proc_pages:
	if (process_pages != pp_stack)
		kfree(process_pages);
	return rc;
}
Exemple #7
0
/*
 * This routine is called to find out and return a data or hole offset
 * from the page cache for unwritten extents according to the desired
 * type for xfs_seek_data() or xfs_seek_hole().
 *
 * The argument offset is used to tell where we start to search from the
 * page cache.  Map is used to figure out the end points of the range to
 * lookup pages.
 *
 * Return true if the desired type of offset was found, and the argument
 * offset is filled with that address.  Otherwise, return false and keep
 * offset unchanged.
 */
STATIC bool
xfs_find_get_desired_pgoff(
	struct inode		*inode,
	struct xfs_bmbt_irec	*map,
	unsigned int		type,
	loff_t			*offset)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	struct pagevec		pvec;
	pgoff_t			index;
	pgoff_t			end;
	loff_t			endoff;
	loff_t			startoff = *offset;
	loff_t			lastoff = startoff;
	bool			found = false;

	pagevec_init(&pvec, 0);

	index = startoff >> PAGE_CACHE_SHIFT;
	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
	end = endoff >> PAGE_CACHE_SHIFT;
	do {
		int		want;
		unsigned	nr_pages;
		unsigned int	i;

		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
					  want);
		/*
		 * No page mapped into given range.  If we are searching holes
		 * and if this is the first time we got into the loop, it means
		 * that the given offset is landed in a hole, return it.
		 *
		 * If we have already stepped through some block buffers to find
		 * holes but they all contains data.  In this case, the last
		 * offset is already updated and pointed to the end of the last
		 * mapped page, if it does not reach the endpoint to search,
		 * that means there should be a hole between them.
		 */
		if (nr_pages == 0) {
			/* Data search found nothing */
			if (type == DATA_OFF)
				break;

			ASSERT(type == HOLE_OFF);
			if (lastoff == startoff || lastoff < endoff) {
				found = true;
				*offset = lastoff;
			}
			break;
		}

		/*
		 * At lease we found one page.  If this is the first time we
		 * step into the loop, and if the first page index offset is
		 * greater than the given search offset, a hole was found.
		 */
		if (type == HOLE_OFF && lastoff == startoff &&
		    lastoff < page_offset(pvec.pages[0])) {
			found = true;
			break;
		}

		for (i = 0; i < nr_pages; i++) {
			struct page	*page = pvec.pages[i];
			loff_t		b_offset;

			/*
			 * At this point, the page may be truncated or
			 * invalidated (changing page->mapping to NULL),
			 * or even swizzled back from swapper_space to tmpfs
			 * file mapping. However, page->index will not change
			 * because we have a reference on the page.
			 *
			 * Searching done if the page index is out of range.
			 * If the current offset is not reaches the end of
			 * the specified search range, there should be a hole
			 * between them.
			 */
			if (page->index > end) {
				if (type == HOLE_OFF && lastoff < endoff) {
					*offset = lastoff;
					found = true;
				}
				goto out;
			}

			lock_page(page);
			/*
			 * Page truncated or invalidated(page->mapping == NULL).
			 * We can freely skip it and proceed to check the next
			 * page.
			 */
			if (unlikely(page->mapping != inode->i_mapping)) {
				unlock_page(page);
				continue;
			}

			if (!page_has_buffers(page)) {
				unlock_page(page);
				continue;
			}

			found = xfs_lookup_buffer_offset(page, &b_offset, type);
			if (found) {
				/*
				 * The found offset may be less than the start
				 * point to search if this is the first time to
				 * come here.
				 */
				*offset = max_t(loff_t, startoff, b_offset);
				unlock_page(page);
				goto out;
			}

			/*
			 * We either searching data but nothing was found, or
			 * searching hole but found a data buffer.  In either
			 * case, probably the next page contains the desired
			 * things, update the last offset to it so.
			 */
			lastoff = page_offset(page) + PAGE_SIZE;
			unlock_page(page);
		}

		/*
		 * The number of returned pages less than our desired, search
		 * done.  In this case, nothing was found for searching data,
		 * but we found a hole behind the last offset.
		 */
		if (nr_pages < want) {
			if (type == HOLE_OFF) {
				*offset = lastoff;
				found = true;
			}
			break;
		}

		index = pvec.pages[i - 1]->index + 1;
		pagevec_release(&pvec);
	} while (index <= end);

out:
	pagevec_release(&pvec);
	return found;
}
static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
		unsigned short flags, char read_write, u8 command, int size,
		union i2c_smbus_data * data)
{
	struct amd_smbus *smbus = adap->algo_data;
	unsigned char protocol, len, pec, temp[2];
	int i;

	protocol = (read_write == I2C_SMBUS_READ) ? AMD_SMB_PRTCL_READ
						  : AMD_SMB_PRTCL_WRITE;
	pec = (flags & I2C_CLIENT_PEC) ? AMD_SMB_PRTCL_PEC : 0;

	switch (size) {
		case I2C_SMBUS_QUICK:
			protocol |= AMD_SMB_PRTCL_QUICK;
			read_write = I2C_SMBUS_WRITE;
			break;

		case I2C_SMBUS_BYTE:
			if (read_write == I2C_SMBUS_WRITE)
				amd_ec_write(smbus, AMD_SMB_CMD, command);
			protocol |= AMD_SMB_PRTCL_BYTE;
			break;

		case I2C_SMBUS_BYTE_DATA:
			amd_ec_write(smbus, AMD_SMB_CMD, command);
			if (read_write == I2C_SMBUS_WRITE)
				amd_ec_write(smbus, AMD_SMB_DATA, data->byte);
			protocol |= AMD_SMB_PRTCL_BYTE_DATA;
			break;

		case I2C_SMBUS_WORD_DATA:
			amd_ec_write(smbus, AMD_SMB_CMD, command);
			if (read_write == I2C_SMBUS_WRITE) {
				amd_ec_write(smbus, AMD_SMB_DATA,
					     data->word & 0xff);
				amd_ec_write(smbus, AMD_SMB_DATA + 1,
					     data->word >> 8);
			}
			protocol |= AMD_SMB_PRTCL_WORD_DATA | pec;
			break;

		case I2C_SMBUS_BLOCK_DATA:
			amd_ec_write(smbus, AMD_SMB_CMD, command);
			if (read_write == I2C_SMBUS_WRITE) {
				len = min_t(u8, data->block[0],
					    I2C_SMBUS_BLOCK_MAX);
				amd_ec_write(smbus, AMD_SMB_BCNT, len);
				for (i = 0; i < len; i++)
					amd_ec_write(smbus, AMD_SMB_DATA + i,
						     data->block[i + 1]);
			}
			protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec;
			break;

		case I2C_SMBUS_I2C_BLOCK_DATA:
			len = min_t(u8, data->block[0],
				    I2C_SMBUS_BLOCK_MAX);
			amd_ec_write(smbus, AMD_SMB_CMD, command);
			amd_ec_write(smbus, AMD_SMB_BCNT, len);
			if (read_write == I2C_SMBUS_WRITE)
				for (i = 0; i < len; i++)
					amd_ec_write(smbus, AMD_SMB_DATA + i,
						     data->block[i + 1]);
			protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA;
			break;

		case I2C_SMBUS_PROC_CALL:
			amd_ec_write(smbus, AMD_SMB_CMD, command);
			amd_ec_write(smbus, AMD_SMB_DATA, data->word & 0xff);
			amd_ec_write(smbus, AMD_SMB_DATA + 1, data->word >> 8);
			protocol = AMD_SMB_PRTCL_PROC_CALL | pec;
			read_write = I2C_SMBUS_READ;
			break;

		case I2C_SMBUS_BLOCK_PROC_CALL:
			len = min_t(u8, data->block[0],
				    I2C_SMBUS_BLOCK_MAX - 1);
			amd_ec_write(smbus, AMD_SMB_CMD, command);
			amd_ec_write(smbus, AMD_SMB_BCNT, len);
			for (i = 0; i < len; i++)
				amd_ec_write(smbus, AMD_SMB_DATA + i,
					     data->block[i + 1]);
			protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec;
			read_write = I2C_SMBUS_READ;
			break;

		default:
			dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
			return -EOPNOTSUPP;
	}
/* Return the max entries we should read out of rx fifo */
static u32 pic32_rx_max(struct pic32_spi_priv *priv, int n_bytes)
{
    u32 rx_left = (priv->rx_end - priv->rx) / n_bytes;

    return min_t(u32, rx_left, pic32_spi_rx_fifo_level(priv));
}
Exemple #10
0
static ssize_t pp_write(struct file *file, const char __user *buf,
			size_t count, loff_t *ppos)
{
	unsigned int minor = iminor(file_inode(file));
	struct pp_struct *pp = file->private_data;
	char *kbuffer;
	ssize_t bytes_written = 0;
	ssize_t wrote;
	int mode;
	struct parport *pport;

	if (!(pp->flags & PP_CLAIMED)) {
		/* Don't have the port claimed */
		pr_debug(CHRDEV "%x: claim the port first\n", minor);
		return -EINVAL;
	}

	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
	if (!kbuffer)
		return -ENOMEM;

	pport = pp->pdev->port;
	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);

	parport_set_timeout(pp->pdev,
			    (file->f_flags & O_NONBLOCK) ?
			    PARPORT_INACTIVITY_O_NONBLOCK :
			    pp->default_inactivity);

	while (bytes_written < count) {
		ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);

		if (copy_from_user(kbuffer, buf + bytes_written, n)) {
			bytes_written = -EFAULT;
			break;
		}

		if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
			/* do a fast EPP write */
			if (pport->ieee1284.mode & IEEE1284_ADDR) {
				wrote = pport->ops->epp_write_addr(pport,
					kbuffer, n, PARPORT_EPP_FAST);
			} else {
				wrote = pport->ops->epp_write_data(pport,
					kbuffer, n, PARPORT_EPP_FAST);
			}
		} else {
			wrote = parport_write(pp->pdev->port, kbuffer, n);
		}

		if (wrote <= 0) {
			if (!bytes_written)
				bytes_written = wrote;
			break;
		}

		bytes_written += wrote;

		if (file->f_flags & O_NONBLOCK) {
			if (!bytes_written)
				bytes_written = -EAGAIN;
			break;
		}

		if (signal_pending(current))
			break;

		cond_resched();
	}

	parport_set_timeout(pp->pdev, pp->default_inactivity);

	kfree(kbuffer);
	pp_enable_irq(pp);
	return bytes_written;
}
Exemple #11
0
/*
 * Send read data back to initiator.
 */
int ft_send_read_data(struct scst_cmd *cmd)
{
	struct ft_cmd *fcmd;
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	size_t remaining;
	u32 fh_off = 0;
	u32 frame_off;
	size_t frame_len = 0;
	size_t mem_len;
	u32 mem_off;
	size_t tlen;
	struct page *page;
	int use_sg;
	int error;
	void *to = NULL;
	u8 *from = NULL;
	int loop_limit = 10000;

	fcmd = scst_cmd_get_tgt_priv(cmd);
	ep = fc_seq_exch(fcmd->seq);
	lport = ep->lp;

	frame_off = fcmd->read_data_len;
	tlen = scst_cmd_get_resp_data_len(cmd);
	FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
		  ep->oid, ep->oxid, tlen, frame_off);
	if (tlen <= frame_off)
		return SCST_TGT_RES_SUCCESS;
	remaining = tlen - frame_off;
	if (remaining > UINT_MAX)
		FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
		       ep->oid, ep->oxid, tlen, frame_off);

	mem_len = scst_get_buf_first(cmd, &from);
	mem_off = 0;
	if (!mem_len) {
		FT_IO_DBG("mem_len 0\n");
		return SCST_TGT_RES_SUCCESS;
	}
	FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
		 ep->sid, ep->oxid, mem_len, frame_off, remaining);

	/*
	 * If we've already transferred some of the data, skip through
	 * the buffer over the data already sent and continue with the
	 * same sequence.  Otherwise, get a new sequence for the data.
	 */
	if (frame_off) {
		tlen = frame_off;
		while (mem_len <= tlen) {
			tlen -= mem_len;
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			if (!mem_len)
				return SCST_TGT_RES_SUCCESS;
		}
		mem_len -= tlen;
		mem_off = tlen;
	} else
		fcmd->seq = lport->tt.seq_start_next(fcmd->seq);

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4) && lport->sg_supp;

	while (remaining) {
		if (!loop_limit) {
			FT_ERR("hit loop limit.  remaining %zx mem_len %zx "
			       "frame_len %zx tlen %zx\n",
			       remaining, mem_len, frame_len, tlen);
			break;
		}
		loop_limit--;
		if (!mem_len) {
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			mem_off = 0;
			if (!mem_len) {
				FT_ERR("mem_len 0 from get_buf_next\n");
				break;
			}
		}
		if (!frame_len) {
			frame_len = fcmd->max_lso_payload;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp) {
				FT_IO_DBG("frame_alloc failed. "
					  "use_sg %d frame_len %zd\n",
					  use_sg, frame_len);
				break;
			}
			fr_max_payload(fp) = fcmd->max_payload;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
		}
		tlen = min(mem_len, frame_len);
		BUG_ON(!tlen);
		BUG_ON(tlen > remaining);
		BUG_ON(tlen > mem_len);
		BUG_ON(tlen > frame_len);

		if (use_sg) {
			page = virt_to_page(from + mem_off);
			get_page(page);
			tlen = min_t(size_t, tlen,
				     PAGE_SIZE - (mem_off & ~PAGE_MASK));
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, offset_in_page(from + mem_off),
					   tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
			frame_len -= tlen;
			if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
				frame_len = 0;
		} else {
			memcpy(to, from + mem_off, tlen);
			to += tlen;
			frame_len -= tlen;
		}

		mem_len -= tlen;
		mem_off += tlen;
		remaining -= tlen;
		frame_off += tlen;

		if (frame_len)
			continue;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP,
			       remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
			       (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
			       fh_off);
		error = lport->tt.seq_send(lport, fcmd->seq, fp);
		if (error) {
			WARN_ON(1);
			/* XXX For now, initiator will retry */
		} else
			fcmd->read_data_len = frame_off;
	}
	if (mem_len)
		scst_put_buf(cmd, from);
	if (remaining) {
		FT_IO_DBG("remaining read data %zd\n", remaining);
		return SCST_TGT_RES_QUEUE_FULL;
	}
	return SCST_TGT_RES_SUCCESS;
}
Exemple #12
0
static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
		       loff_t *ppos)
{
	unsigned int minor = iminor(file_inode(file));
	struct pp_struct *pp = file->private_data;
	char *kbuffer;
	ssize_t bytes_read = 0;
	struct parport *pport;
	int mode;

	if (!(pp->flags & PP_CLAIMED)) {
		/* Don't have the port claimed */
		pr_debug(CHRDEV "%x: claim the port first\n", minor);
		return -EINVAL;
	}

	/* Trivial case. */
	if (count == 0)
		return 0;

	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
	if (!kbuffer)
		return -ENOMEM;
	pport = pp->pdev->port;
	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);

	parport_set_timeout(pp->pdev,
			    (file->f_flags & O_NONBLOCK) ?
			    PARPORT_INACTIVITY_O_NONBLOCK :
			    pp->default_inactivity);

	while (bytes_read == 0) {
		ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);

		if (mode == IEEE1284_MODE_EPP) {
			/* various specials for EPP mode */
			int flags = 0;
			size_t (*fn)(struct parport *, void *, size_t, int);

			if (pp->flags & PP_W91284PIC)
				flags |= PARPORT_W91284PIC;
			if (pp->flags & PP_FASTREAD)
				flags |= PARPORT_EPP_FAST;
			if (pport->ieee1284.mode & IEEE1284_ADDR)
				fn = pport->ops->epp_read_addr;
			else
				fn = pport->ops->epp_read_data;
			bytes_read = (*fn)(pport, kbuffer, need, flags);
		} else {
			bytes_read = parport_read(pport, kbuffer, need);
		}

		if (bytes_read != 0)
			break;

		if (file->f_flags & O_NONBLOCK) {
			bytes_read = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			bytes_read = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

	parport_set_timeout(pp->pdev, pp->default_inactivity);

	if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
		bytes_read = -EFAULT;

	kfree(kbuffer);
	pp_enable_irq(pp);
	return bytes_read;
}
Exemple #13
0
/*
 * AFS read page from file, directory or symlink
 */
static int afs_readpage(struct file *file, struct page *page)
{
	struct afs_vnode *vnode;
	struct inode *inode;
	struct key *key;
	size_t len;
	off_t offset;
	int ret;

	inode = page->mapping->host;

	ASSERT(file != NULL);
	key = file->private_data;
	ASSERT(key != NULL);

	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);

	vnode = AFS_FS_I(inode);

	BUG_ON(!PageLocked(page));

	ret = -ESTALE;
	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
		goto error;

#ifdef AFS_CACHING_SUPPORT
	/* is it cached? */
	ret = cachefs_read_or_alloc_page(vnode->cache,
					 page,
					 afs_file_readpage_read_complete,
					 NULL,
					 GFP_KERNEL);
#else
	ret = -ENOBUFS;
#endif

	switch (ret) {
		/* read BIO submitted and wb-journal entry found */
	case 1:
		BUG(); // TODO - handle wb-journal match

		/* read BIO submitted (page in cache) */
	case 0:
		break;

		/* no page available in cache */
	case -ENOBUFS:
	case -ENODATA:
	default:
		offset = page->index << PAGE_CACHE_SHIFT;
		len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);

		/* read the contents of the file from the server into the
		 * page */
		ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
		if (ret < 0) {
			if (ret == -ENOENT) {
				_debug("got NOENT from server"
				       " - marking file deleted and stale");
				set_bit(AFS_VNODE_DELETED, &vnode->flags);
				ret = -ESTALE;
			}
#ifdef AFS_CACHING_SUPPORT
			cachefs_uncache_page(vnode->cache, page);
#endif
			goto error;
		}

		SetPageUptodate(page);

#ifdef AFS_CACHING_SUPPORT
		if (cachefs_write_page(vnode->cache,
				       page,
				       afs_file_readpage_write_complete,
				       NULL,
				       GFP_KERNEL) != 0
		    ) {
			cachefs_uncache_page(vnode->cache, page);
			unlock_page(page);
		}
#else
		unlock_page(page);
#endif
	}

	_leave(" = 0");
	return 0;

error:
	SetPageError(page);
	unlock_page(page);
	_leave(" = %d", ret);
	return ret;
}
Exemple #14
0
static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
			  struct creq_base *resp, void *sb, u8 is_block)
{
	struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
	struct bnxt_qplib_crsq *crsqe;
	u32 sw_prod, cmdq_prod;
	unsigned long flags;
	u32 size, opcode;
	u16 cookie, cbit;
	u8 *preq;

	opcode = req->opcode;
	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
	     opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
		dev_err(&rcfw->pdev->dev,
			"QPLIB: RCFW not initialized, reject opcode 0x%x",
			opcode);
		return -EINVAL;
	}

	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
		return -EINVAL;
	}

	if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
		return -ETIMEDOUT;

	/* Cmdq are in 16-byte units, each request can consume 1 or more
	 * cmdqe
	 */
	spin_lock_irqsave(&cmdq->lock, flags);
	if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
		spin_unlock_irqrestore(&cmdq->lock, flags);
		return -EAGAIN;
	}


	cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
	if (is_block)
		cookie |= RCFW_CMD_IS_BLOCKING;

	set_bit(cbit, rcfw->cmdq_bitmap);
	req->cookie = cpu_to_le16(cookie);
	crsqe = &rcfw->crsqe_tbl[cbit];
	if (crsqe->resp) {
		spin_unlock_irqrestore(&cmdq->lock, flags);
		return -EBUSY;
	}
	memset(resp, 0, sizeof(*resp));
	crsqe->resp = (struct creq_qp_event *)resp;
	crsqe->resp->cookie = req->cookie;
	crsqe->req_size = req->cmd_size;
	if (req->resp_size && sb) {
		struct bnxt_qplib_rcfw_sbuf *sbuf = sb;

		req->resp_addr = cpu_to_le64(sbuf->dma_addr);
		req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
				  BNXT_QPLIB_CMDQE_UNITS;
	}

	cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
	preq = (u8 *)req;
	size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
	do {
		/* Locate the next cmdq slot */
		sw_prod = HWQ_CMP(cmdq->prod, cmdq);
		cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
		if (!cmdqe) {
			dev_err(&rcfw->pdev->dev,
				"QPLIB: RCFW request failed with no cmdqe!");
			goto done;
		}
		/* Copy a segment of the req cmd to the cmdq */
		memset(cmdqe, 0, sizeof(*cmdqe));
		memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
		preq += min_t(u32, size, sizeof(*cmdqe));
		size -= min_t(u32, size, sizeof(*cmdqe));
		cmdq->prod++;
		rcfw->seq_num++;
	} while (size > 0);

	rcfw->seq_num++;

	cmdq_prod = cmdq->prod;
	if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
		/* The very first doorbell write
		 * is required to set this flag
		 * which prompts the FW to reset
		 * its internal pointers
		 */
		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
		clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
	}

	/* ring CMDQ DB */
	wmb();
	writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
	       rcfw->cmdq_bar_reg_prod_off);
	writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
	       rcfw->cmdq_bar_reg_trig_off);
done:
	spin_unlock_irqrestore(&cmdq->lock, flags);
	/* Return the CREQ response pointer */
	return 0;
}
Exemple #15
0
int __generic_block_fiemap(struct inode *inode,
                           struct fiemap_extent_info *fieinfo, u64 start,
                           u64 len, get_block_t *get_block)
{
    struct buffer_head tmp;
    unsigned long long start_blk;
    long long length = 0, map_len = 0;
    u64 logical = 0, phys = 0, size = 0;
    u32 flags = FIEMAP_EXTENT_MERGED;
    int ret = 0, past_eof = 0, whole_file = 0;

    if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
        return ret;

    start_blk = logical_to_blk(inode, start);

    length = (long long)min_t(u64, len, i_size_read(inode));
    if (length < len)
        whole_file = 1;

    map_len = length;

    do {
        /*
         * we set b_size to the total size we want so it will map as
         * many contiguous blocks as possible at once
         */
        memset(&tmp, 0, sizeof(struct buffer_head));
        tmp.b_size = map_len;

        ret = get_block(inode, start_blk, &tmp, 0);
        if (ret)
            break;

        /* HOLE */
        if (!buffer_mapped(&tmp)) {
            length -= blk_to_logical(inode, 1);
            start_blk++;

            /*
             * we want to handle the case where there is an
             * allocated block at the front of the file, and then
             * nothing but holes up to the end of the file properly,
             * to make sure that extent at the front gets properly
             * marked with FIEMAP_EXTENT_LAST
             */
            if (!past_eof &&
                    blk_to_logical(inode, start_blk) >=
                    blk_to_logical(inode, 0)+i_size_read(inode))
                past_eof = 1;

            /*
             * first hole after going past the EOF, this is our
             * last extent
             */
            if (past_eof && size) {
                flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                break;
            }

            /* if we have holes up to/past EOF then we're done */
            if (length <= 0 || past_eof)
                break;
        } else {
            /*
             * we have gone over the length of what we wanted to
             * map, and it wasn't the entire file, so add the extent
             * we got last time and exit.
             *
             * This is for the case where say we want to map all the
             * way up to the second to the last block in a file, but
             * the last block is a hole, making the second to last
             * block FIEMAP_EXTENT_LAST.  In this case we want to
             * see if there is a hole after the second to last block
             * so we can mark it properly.  If we found data after
             * we exceeded the length we were requesting, then we
             * are good to go, just add the extent to the fieinfo
             * and break
             */
            if (length <= 0 && !whole_file) {
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                break;
            }

            /*
             * if size != 0 then we know we already have an extent
             * to add, so add it.
             */
            if (size) {
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                if (ret)
                    break;
            }

            logical = blk_to_logical(inode, start_blk);
            phys = blk_to_logical(inode, tmp.b_blocknr);
            size = tmp.b_size;
            flags = FIEMAP_EXTENT_MERGED;

            length -= tmp.b_size;
            start_blk += logical_to_blk(inode, size);

            /*
             * If we are past the EOF, then we need to make sure as
             * soon as we find a hole that the last extent we found
             * is marked with FIEMAP_EXTENT_LAST
             */
            if (!past_eof &&
                    logical+size >=
                    blk_to_logical(inode, 0)+i_size_read(inode))
                past_eof = 1;
        }
        cond_resched();
    } while (1);

    /* if ret is 1 then we just hit the end of the extent array */
    if (ret == 1)
        ret = 0;

    return ret;
}
static int ext4_destroy_inline_data_nolock(handle_t *handle,
					   struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_xattr_ibody_find is = {
		.s = { .not_found = 0, },
	};
	struct ext4_xattr_info i = {
		.name_index = EXT4_XATTR_INDEX_SYSTEM,
		.name = EXT4_XATTR_SYSTEM_DATA,
		.value = NULL,
		.value_len = 0,
	};
	int error;

	if (!ei->i_inline_off)
		return 0;

	error = ext4_get_inode_loc(inode, &is.iloc);
	if (error)
		return error;

	error = ext4_xattr_ibody_find(inode, &i, &is);
	if (error)
		goto out;

	BUFFER_TRACE(is.iloc.bh, "get_write_access");
	error = ext4_journal_get_write_access(handle, is.iloc.bh);
	if (error)
		goto out;

	error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
	if (error)
		goto out;

	memset((void *)ext4_raw_inode(&is.iloc)->i_block,
		0, EXT4_MIN_INLINE_DATA_SIZE);

	if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
				      EXT4_FEATURE_INCOMPAT_EXTENTS)) {
		if (S_ISDIR(inode->i_mode) ||
		    S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
			ext4_ext_tree_init(handle, inode);
		}
	}
	ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);

	get_bh(is.iloc.bh);
	error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);

	EXT4_I(inode)->i_inline_off = 0;
	EXT4_I(inode)->i_inline_size = 0;
	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
out:
	brelse(is.iloc.bh);
	if (error == -ENODATA)
		error = 0;
	return error;
}

static int ext4_read_inline_page(struct inode *inode, struct page *page)
{
	void *kaddr;
	int ret = 0;
	size_t len;
	struct ext4_iloc iloc;

	BUG_ON(!PageLocked(page));
	BUG_ON(!ext4_has_inline_data(inode));
	BUG_ON(page->index);

	if (!EXT4_I(inode)->i_inline_off) {
		ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
			     inode->i_ino);
		goto out;
	}

	ret = ext4_get_inode_loc(inode, &iloc);
	if (ret)
		goto out;

	len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
	kaddr = kmap_atomic(page);
	ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
	flush_dcache_page(page);
	kunmap_atomic(kaddr);
	zero_user_segment(page, len, PAGE_CACHE_SIZE);
	SetPageUptodate(page);
	brelse(iloc.bh);

out:
	return ret;
}
static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
				  int pad)
{
	bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
	struct fimc_dev *fimc = ctx->fimc_dev;
	struct samsung_fimc_variant *var = fimc->variant;
	struct fimc_pix_limit *pl = var->pix_limit;
	struct fimc_frame *sink = &ctx->s_frame;
	u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
	u32 align_sz = 0, align_h = 4;
	u32 max_sc_h, max_sc_v;

	/* In JPEG transparent transfer mode cropping is not supported */
	if (fimc_fmt_is_jpeg(ctx->d_frame.fmt->color)) {
		r->width  = sink->f_width;
		r->height = sink->f_height;
		r->left   = r->top = 0;
		return;
	}
	if (pad == FIMC_SD_PAD_SOURCE) {
		if (ctx->rotation != 90 && ctx->rotation != 270)
			align_h = 1;
		max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
		max_sc_v = min(SCALER_MAX_VRATIO, 1 << (ffs(sink->height) - 1));
		min_sz = var->min_out_pixsize;
	} else {
		u32 depth = fimc_get_format_depth(sink->fmt);
		align_sz = 64/ALIGN(depth, 8);
		min_sz = var->min_inp_pixsize;
		min_w = min_h = min_sz;
		max_sc_h = max_sc_v = 1;
	}
	/*
	 * For the crop rectangle at source pad the following constraints
	 * must be met:
	 * - it must fit in the sink pad format rectangle (f_width/f_height);
	 * - maximum downscaling ratio is 64;
	 * - maximum crop size depends if the rotator is used or not;
	 * - the sink pad format width/height must be 4 multiple of the
	 *   prescaler ratios determined by sink pad size and source pad crop,
	 *   the prescaler ratio is returned by fimc_get_scaler_factor().
	 */
	max_w = min_t(u32,
		      rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
		      rotate ? sink->f_height : sink->f_width);
	max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
	if (pad == FIMC_SD_PAD_SOURCE) {
		min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
		min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
		if (rotate) {
			swap(max_sc_h, max_sc_v);
			swap(min_w, min_h);
		}
	}
	v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
			      &r->height, min_h, max_h, align_h,
			      align_sz);
	/* Adjust left/top if cropping rectangle is out of bounds */
	r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
	r->top  = clamp_t(u32, r->top, 0, sink->f_height - r->height);
	r->left = round_down(r->left, var->hor_offs_align);

	dbg("pad%d: (%d,%d)/%dx%d, sink fmt: %dx%d",
	    pad, r->left, r->top, r->width, r->height,
	    sink->f_width, sink->f_height);
}
Exemple #18
0
/**
 *      atmel_lcdfb_check_var - Validates a var passed in.
 *      @var: frame buffer variable screen structure
 *      @info: frame buffer structure that represents a single frame buffer
 *
 *	Checks to see if the hardware supports the state requested by
 *	var passed in. This function does not alter the hardware
 *	state!!!  This means the data stored in struct fb_info and
 *	struct atmel_lcdfb_info do not change. This includes the var
 *	inside of struct fb_info.  Do NOT change these. This function
 *	can be called on its own if we intent to only test a mode and
 *	not actually set it. The stuff in modedb.c is a example of
 *	this. If the var passed in is slightly off by what the
 *	hardware can support then we alter the var PASSED in to what
 *	we can do. If the hardware doesn't support mode change a
 *	-EINVAL will be returned by the upper layers. You don't need
 *	to implement this function then. If you hardware doesn't
 *	support changing the resolution then this function is not
 *	needed. In this case the driver would just provide a var that
 *	represents the static state the screen is in.
 *
 *	Returns negative errno on error, or zero on success.
 */
static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
			     struct fb_info *info)
{
	struct device *dev = info->device;
	struct atmel_lcdfb_info *sinfo = info->par;
	unsigned long clk_value_khz;

	clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;

	dev_dbg(dev, "%s:\n", __func__);

	if (!(var->pixclock && var->bits_per_pixel)) {
		/* choose a suitable mode if possible */
		if (!atmel_lcdfb_choose_mode(var, info)) {
			dev_err(dev, "needed value not specified\n");
			return -EINVAL;
		}
	}

	dev_dbg(dev, "  resolution: %ux%u\n", var->xres, var->yres);
	dev_dbg(dev, "  pixclk:     %lu KHz\n", PICOS2KHZ(var->pixclock));
	dev_dbg(dev, "  bpp:        %u\n", var->bits_per_pixel);
	dev_dbg(dev, "  clk:        %lu KHz\n", clk_value_khz);

	if (PICOS2KHZ(var->pixclock) > clk_value_khz) {
		dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock));
		return -EINVAL;
	}

	/* Do not allow to have real resoulution larger than virtual */
	if (var->xres > var->xres_virtual)
		var->xres_virtual = var->xres;

	if (var->yres > var->yres_virtual)
		var->yres_virtual = var->yres;

	/* Force same alignment for each line */
	var->xres = (var->xres + 3) & ~3UL;
	var->xres_virtual = (var->xres_virtual + 3) & ~3UL;

	var->red.msb_right = var->green.msb_right = var->blue.msb_right = 0;
	var->transp.msb_right = 0;
	var->transp.offset = var->transp.length = 0;
	var->xoffset = var->yoffset = 0;

	if (info->fix.smem_len) {
		unsigned int smem_len = (var->xres_virtual * var->yres_virtual
					 * ((var->bits_per_pixel + 7) / 8));
		if (smem_len > info->fix.smem_len)
			return -EINVAL;
	}

	/* Saturate vertical and horizontal timings at maximum values */
	var->vsync_len = min_t(u32, var->vsync_len,
			(ATMEL_LCDC_VPW >> ATMEL_LCDC_VPW_OFFSET) + 1);
	var->upper_margin = min_t(u32, var->upper_margin,
			ATMEL_LCDC_VBP >> ATMEL_LCDC_VBP_OFFSET);
	var->lower_margin = min_t(u32, var->lower_margin,
			ATMEL_LCDC_VFP);
	var->right_margin = min_t(u32, var->right_margin,
			(ATMEL_LCDC_HFP >> ATMEL_LCDC_HFP_OFFSET) + 1);
	var->hsync_len = min_t(u32, var->hsync_len,
			(ATMEL_LCDC_HPW >> ATMEL_LCDC_HPW_OFFSET) + 1);
	var->left_margin = min_t(u32, var->left_margin,
			ATMEL_LCDC_HBP + 1);

	/* Some parameters can't be zero */
	var->vsync_len = max_t(u32, var->vsync_len, 1);
	var->right_margin = max_t(u32, var->right_margin, 1);
	var->hsync_len = max_t(u32, var->hsync_len, 1);
	var->left_margin = max_t(u32, var->left_margin, 1);

	switch (var->bits_per_pixel) {
	case 1:
	case 2:
	case 4:
	case 8:
		var->red.offset = var->green.offset = var->blue.offset = 0;
		var->red.length = var->green.length = var->blue.length
			= var->bits_per_pixel;
		break;
	case 15:
	case 16:
		if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
			/* RGB:565 mode */
			var->red.offset = 11;
			var->blue.offset = 0;
			var->green.length = 6;
		} else if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB555) {
			var->red.offset = 10;
			var->blue.offset = 0;
			var->green.length = 5;
		} else {
			/* BGR:555 mode */
			var->red.offset = 0;
			var->blue.offset = 10;
			var->green.length = 5;
		}
		var->green.offset = 5;
		var->red.length = var->blue.length = 5;
		break;
	case 32:
		var->transp.offset = 24;
		var->transp.length = 8;
		/* fall through */
	case 24:
		if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
			/* RGB:888 mode */
			var->red.offset = 16;
			var->blue.offset = 0;
		} else {
			/* BGR:888 mode */
			var->red.offset = 0;
			var->blue.offset = 16;
		}
		var->green.offset = 8;
		var->red.length = var->green.length = var->blue.length = 8;
		break;
	default:
		dev_err(dev, "color depth %d not supported\n",
					var->bits_per_pixel);
		return -EINVAL;
	}

	return 0;
}
/**
 * process_vm_rw_pages - read/write pages from task specified
 * @task: task to read/write from
 * @mm: mm for task
 * @process_pages: struct pages area that can store at least
 *  nr_pages_to_copy struct page pointers
 * @pa: address of page in task to start copying from/to
 * @start_offset: offset in page to start copying from/to
 * @len: number of bytes to copy
 * @lvec: iovec array specifying where to copy to/from
 * @lvec_cnt: number of elements in iovec array
 * @lvec_current: index in iovec array we are up to
 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
 * @vm_write: 0 means copy from, 1 means copy to
 * @nr_pages_to_copy: number of pages to copy
 * @bytes_copied: returns number of bytes successfully copied
 * Returns 0 on success, error code otherwise
 */
static int process_vm_rw_pages(struct task_struct *task,
			       struct mm_struct *mm,
			       struct page **process_pages,
			       unsigned long pa,
			       unsigned long start_offset,
			       unsigned long len,
			       const struct iovec *lvec,
			       unsigned long lvec_cnt,
			       unsigned long *lvec_current,
			       size_t *lvec_offset,
			       int vm_write,
			       unsigned int nr_pages_to_copy,
			       ssize_t *bytes_copied)
{
	int pages_pinned;
	void *target_kaddr;
	int pgs_copied = 0;
	int j;
	int ret;
	ssize_t bytes_to_copy;
	ssize_t rc = 0;

	*bytes_copied = 0;

	/* Get the pages we're interested in */
	down_read(&mm->mmap_sem);
	pages_pinned = get_user_pages(task, mm, pa,
				      nr_pages_to_copy,
				      vm_write, 0, process_pages, NULL);
	up_read(&mm->mmap_sem);

	if (pages_pinned != nr_pages_to_copy) {
		rc = -EFAULT;
		goto end;
	}

	/* Do the copy for each page */
	for (pgs_copied = 0;
	     (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
	     pgs_copied++) {
		/* Make sure we have a non zero length iovec */
		while (*lvec_current < lvec_cnt
		       && lvec[*lvec_current].iov_len == 0)
			(*lvec_current)++;
		if (*lvec_current == lvec_cnt)
			break;

		/*
		 * Will copy smallest of:
		 * - bytes remaining in page
		 * - bytes remaining in destination iovec
		 */
		bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
				      len - *bytes_copied);
		bytes_to_copy = min_t(ssize_t, bytes_to_copy,
				      lvec[*lvec_current].iov_len
				      - *lvec_offset);

		target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;

		if (vm_write)
			ret = copy_from_user(target_kaddr,
					     lvec[*lvec_current].iov_base
					     + *lvec_offset,
					     bytes_to_copy);
		else
			ret = copy_to_user(lvec[*lvec_current].iov_base
					   + *lvec_offset,
					   target_kaddr, bytes_to_copy);
		kunmap(process_pages[pgs_copied]);
		if (ret) {
			*bytes_copied += bytes_to_copy - ret;
			pgs_copied++;
			rc = -EFAULT;
			goto end;
		}
		*bytes_copied += bytes_to_copy;
		*lvec_offset += bytes_to_copy;
		if (*lvec_offset == lvec[*lvec_current].iov_len) {
			/*
			 * Need to copy remaining part of page into the
			 * next iovec if there are any bytes left in page
			 */
			(*lvec_current)++;
			*lvec_offset = 0;
			start_offset = (start_offset + bytes_to_copy)
				% PAGE_SIZE;
			if (start_offset)
				pgs_copied--;
		} else {
			start_offset = 0;
		}
	}

end:
	if (vm_write) {
		for (j = 0; j < pages_pinned; j++) {
			if (j < pgs_copied)
				set_page_dirty_lock(process_pages[j]);
			put_page(process_pages[j]);
		}
	} else {
		for (j = 0; j < pages_pinned; j++)
			put_page(process_pages[j]);
	}

	return rc;
}
Exemple #20
0
static int
concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
		unsigned long count, loff_t to, size_t * retlen)
{
	struct mtd_concat *concat = CONCAT(mtd);
	struct kvec *vecs_copy;
	unsigned long entry_low, entry_high;
	size_t total_len = 0;
	int i;
	int err = -EINVAL;

	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;

	*retlen = 0;

	/* Calculate total length of data */
	for (i = 0; i < count; i++)
		total_len += vecs[i].iov_len;

	/* Do not allow write past end of device */
	if ((to + total_len) > mtd->size)
		return -EINVAL;

	/* Check alignment */
	if (mtd->writesize > 1) {
		uint64_t __to = to;
		if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
			return -EINVAL;
	}

	/* make a copy of vecs */
	vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
	if (!vecs_copy)
		return -ENOMEM;

	entry_low = 0;
	for (i = 0; i < concat->num_subdev; i++) {
		struct mtd_info *subdev = concat->subdev[i];
		size_t size, wsize, retsize, old_iov_len;

		if (to >= subdev->size) {
			to -= subdev->size;
			continue;
		}

		size = min_t(uint64_t, total_len, subdev->size - to);
		wsize = size; /* store for future use */

		entry_high = entry_low;
		while (entry_high < count) {
			if (size <= vecs_copy[entry_high].iov_len)
				break;
			size -= vecs_copy[entry_high++].iov_len;
		}

		old_iov_len = vecs_copy[entry_high].iov_len;
		vecs_copy[entry_high].iov_len = size;

		if (!(subdev->flags & MTD_WRITEABLE))
			err = -EROFS;
		else
			err = mtd_writev(subdev, &vecs_copy[entry_low],
					 entry_high - entry_low + 1, to,
					 &retsize);

		vecs_copy[entry_high].iov_len = old_iov_len - size;
		vecs_copy[entry_high].iov_base += size;

		entry_low = entry_high;

		if (err)
			break;

		*retlen += retsize;
		total_len -= wsize;

		if (total_len == 0)
			break;

		err = -EINVAL;
		to = 0;
	}

	kfree(vecs_copy);
	return err;
}
Exemple #21
0
STATIC loff_t
xfs_seek_hole(
	struct file		*file,
	loff_t			start)
{
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	loff_t			uninitialized_var(offset);
	xfs_fsize_t		isize;
	xfs_fileoff_t		fsbno;
	xfs_filblks_t		end;
	uint			lock;
	int			error;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	lock = xfs_ilock_data_map_shared(ip);

	isize = i_size_read(inode);
	if (start >= isize) {
		error = -ENXIO;
		goto out_unlock;
	}

	fsbno = XFS_B_TO_FSBT(mp, start);
	end = XFS_B_TO_FSB(mp, isize);

	for (;;) {
		struct xfs_bmbt_irec	map[2];
		int			nmap = 2;
		unsigned int		i;

		error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
				       XFS_BMAPI_ENTIRE);
		if (error)
			goto out_unlock;

		/* No extents at given offset, must be beyond EOF */
		if (nmap == 0) {
			error = -ENXIO;
			goto out_unlock;
		}

		for (i = 0; i < nmap; i++) {
			offset = max_t(loff_t, start,
				       XFS_FSB_TO_B(mp, map[i].br_startoff));

			/* Landed in a hole */
			if (map[i].br_startblock == HOLESTARTBLOCK)
				goto out;

			/*
			 * Landed in an unwritten extent, try to search hole
			 * from page cache.
			 */
			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
				if (xfs_find_get_desired_pgoff(inode, &map[i],
							HOLE_OFF, &offset))
					goto out;
			}
		}

		/*
		 * map[0] contains data or its unwritten but contains
		 * data in page cache, probably means that we are
		 * reading after EOF.  We should fix offset to point
		 * to the end of the file(i.e., there is an implicit
		 * hole at the end of any file).
		 */
		if (nmap == 1) {
			offset = isize;
			break;
		}

		ASSERT(i > 1);

		/*
		 * Both mappings contains data, proceed to the next round of
		 * search if the current reading offset not beyond or hit EOF.
		 */
		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
		start = XFS_FSB_TO_B(mp, fsbno);
		if (start >= isize) {
			offset = isize;
			break;
		}
	}

out:
	/*
	 * At this point, we must have found a hole.  However, the returned
	 * offset may be bigger than the file size as it may be aligned to
	 * page boundary for unwritten extents, we need to deal with this
	 * situation in particular.
	 */
	offset = min_t(loff_t, offset, isize);
	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);

out_unlock:
	xfs_iunlock(ip, lock);

	if (error)
		return error;
	return offset;
}
Exemple #22
0
static void agp_v3_parse_one(u32_t *requested_mode,
                             u32_t *bridge_agpstat,
                             u32_t *vga_agpstat)
{
    u32_t origbridge = *bridge_agpstat, origvga = *vga_agpstat;
    u32_t tmp;

    if (*requested_mode & AGP3_RESERVED_MASK)
    {
        dbgprintf("reserved bits set (%x) in mode 0x%x. Fixed.\n",
			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
		*requested_mode &= ~AGP3_RESERVED_MASK;
	}

	/* Check the speed bits make sense. */
	tmp = *requested_mode & 7;
	if (tmp == 0) {
        dbgprintf("Setting to AGP3 x4 mode.\n");
		*requested_mode |= AGPSTAT3_4X;
	}
	if (tmp >= 3) {
        dbgprintf("Setting to AGP3 x8 mode.\n");
		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
	}

	/* ARQSZ - Set the value to the maximum one.
	 * Don't allow the mode register to override values. */
	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
        max_t(u32_t,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));

	/* Calibration cycle.
	 * Don't allow the mode register to override values. */
	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
        min_t(u32_t,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));

	/* SBA *must* be supported for AGP v3 */
	*bridge_agpstat |= AGPSTAT_SBA;

	/*
	 * Set speed.
	 * Check for invalid speeds. This can happen when applications
	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
	 */
	if (*requested_mode & AGPSTAT_MODE_3_0) {
		/*
		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
		 * have been passed a 3.0 mode, but with 2.x speed bits set.
		 * AGP2.x 4x -> AGP3.0 4x.
		 */
		if (*requested_mode & AGPSTAT2_4X) {
            dbgprintf("broken AGP3 flags (%x). Fixed.\n", *requested_mode);
			*requested_mode &= ~AGPSTAT2_4X;
			*requested_mode |= AGPSTAT3_4X;
		}
	} else {
		/*
		 * The caller doesn't know what they are doing. We are in 3.0 mode,
		 * but have been passed an AGP 2.x mode.
		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
		 */
        dbgprintf("broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",*requested_mode);
		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
		*requested_mode |= AGPSTAT3_4X;
	}

	if (*requested_mode & AGPSTAT3_8X) {
		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
			*bridge_agpstat |= AGPSTAT3_4X;
            dbgprintf("requested AGPx8 but bridge not capable.\n");
			return;
		}
		if (!(*vga_agpstat & AGPSTAT3_8X)) {
			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
			*bridge_agpstat |= AGPSTAT3_4X;
            dbgprintf("requested AGPx8 but graphic card not capable.\n");
			return;
		}
		/* All set, bridge & device can do AGP x8*/
		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
		goto done;

	} else {

		/*
		 * If we didn't specify AGPx8, we can only do x4.
		 * If the hardware can't do x4, we're up shit creek, and never
		 *  should have got this far.
		 */
		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
		if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X))
			*bridge_agpstat |= AGPSTAT3_4X;
		else {
            dbgprintf("Badness. Don't know which AGP mode to set. "
							"[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n",
							origbridge, origvga, *bridge_agpstat, *vga_agpstat);
			if (!(*bridge_agpstat & AGPSTAT3_4X))
                dbgprintf("Bridge couldn't do AGP x4.\n");
			if (!(*vga_agpstat & AGPSTAT3_4X))
                dbgprintf("Graphic card couldn't do AGP x4.\n");
			return;
		}
	}

done:
	/* Apply any errata. */
    if (bridge->flags & AGP_ERRATA_FASTWRITES)
		*bridge_agpstat &= ~AGPSTAT_FW;

    if (bridge->flags & AGP_ERRATA_SBA)
		*bridge_agpstat &= ~AGPSTAT_SBA;

    if (bridge->flags & AGP_ERRATA_1X) {
		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
		*bridge_agpstat |= AGPSTAT2_1X;
	}
}
Exemple #23
0
static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
{
	/* arbitrary limit to avoid allocating gigantic resources */
	return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
}
Exemple #24
0
/**
 * service_done_flag - handle completed buffers
 * @dev: private data
 * @ch_idx: channel index
 *
 * Return back the completed buffers to mostcore, using completion callback
 */
static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
{
	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
	struct dim_ch_state_t st;
	struct list_head *head;
	struct mbo *mbo;
	int done_buffers;
	unsigned long flags;
	u8 *data;

	BUG_ON(!hdm_ch);
	BUG_ON(!hdm_ch->is_initialized);

	spin_lock_irqsave(&dim_lock, flags);

	done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
	if (!done_buffers) {
		spin_unlock_irqrestore(&dim_lock, flags);
		return;
	}

	if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
		spin_unlock_irqrestore(&dim_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&dim_lock, flags);

	head = &hdm_ch->started_list;

	while (done_buffers) {
		spin_lock_irqsave(&dim_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&dim_lock, flags);
			pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
			break;
		}

		mbo = list_first_entry(head, struct mbo, list);
		list_del(head->next);
		spin_unlock_irqrestore(&dim_lock, flags);

		data = mbo->virt_address;

		if (hdm_ch->data_type == MOST_CH_ASYNC &&
		    hdm_ch->direction == MOST_CH_RX &&
		    PACKET_IS_NET_INFO(data)) {
			retrieve_netinfo(dev, mbo);

			spin_lock_irqsave(&dim_lock, flags);
			list_add_tail(&mbo->list, &hdm_ch->pending_list);
			spin_unlock_irqrestore(&dim_lock, flags);
		} else {
			if (hdm_ch->data_type == MOST_CH_CONTROL ||
			    hdm_ch->data_type == MOST_CH_ASYNC) {
				u32 const data_size =
					(u32)data[0] * 256 + data[1] + 2;

				mbo->processed_length =
					min_t(u32, data_size,
					      mbo->buffer_length);
			} else {
				mbo->processed_length = mbo->buffer_length;
			}
			mbo->status = MBO_SUCCESS;
			mbo->complete(mbo);
		}

		done_buffers--;
	}
}
Exemple #25
0
static struct sdw_intel_ctx
*sdw_intel_add_controller(struct sdw_intel_res *res)
{
	struct platform_device_info pdevinfo;
	struct platform_device *pdev;
	struct sdw_link_data *link;
	struct sdw_intel_ctx *ctx;
	struct acpi_device *adev;
	int ret, i;
	u8 count;
	u32 caps;

	if (acpi_bus_get_device(res->handle, &adev))
		return NULL;

	/* Found controller, find links supported */
	count = 0;
	ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
				  "mipi-sdw-master-count", &count, 1);

	/* Don't fail on error, continue and use hw value */
	if (ret) {
		dev_err(&adev->dev,
			"Failed to read mipi-sdw-master-count: %d\n", ret);
		count = SDW_MAX_LINKS;
	}

	/* Check SNDWLCAP.LCOUNT */
	caps = ioread32(res->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);

	/* Check HW supported vs property value and use min of two */
	count = min_t(u8, caps, count);

	/* Check count is within bounds */
	if (count > SDW_MAX_LINKS) {
		dev_err(&adev->dev, "Link count %d exceeds max %d\n",
						count, SDW_MAX_LINKS);
		return NULL;
	}

	dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return NULL;

	ctx->count = count;
	ctx->links = kcalloc(ctx->count, sizeof(*ctx->links), GFP_KERNEL);
	if (!ctx->links)
		goto link_err;

	link = ctx->links;

	/* Create SDW Master devices */
	for (i = 0; i < count; i++) {

		link->res.irq = res->irq;
		link->res.registers = res->mmio_base + SDW_LINK_BASE
					+ (SDW_LINK_SIZE * i);
		link->res.shim = res->mmio_base + SDW_SHIM_BASE;
		link->res.alh = res->mmio_base + SDW_ALH_BASE;

		link->res.ops = res->ops;
		link->res.arg = res->arg;

		memset(&pdevinfo, 0, sizeof(pdevinfo));

		pdevinfo.parent = res->parent;
		pdevinfo.name = "int-sdw";
		pdevinfo.id = i;
		pdevinfo.fwnode = acpi_fwnode_handle(adev);
		pdevinfo.data = &link->res;
		pdevinfo.size_data = sizeof(link->res);

		pdev = platform_device_register_full(&pdevinfo);
		if (IS_ERR(pdev)) {
			dev_err(&adev->dev,
				"platform device creation failed: %ld\n",
				PTR_ERR(pdev));
			goto pdev_err;
		}

		link->pdev = pdev;
		link++;
	}

	return ctx;

pdev_err:
	sdw_intel_cleanup_pdev(ctx);
link_err:
	kfree(ctx);
	return NULL;
}
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(int inidle)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	/*
	 * Call to tick_nohz_start_idle stops the last_update_time from being
	 * updated. Thus, it must not be called in the event we are called from
	 * irq_exit() with the prior state different than idle.
	 */
	if (!inidle && !ts->inidle)
		goto end;

	/*
	 * Set ts->inidle unconditionally. Even if the system did not
	 * switch to NOHZ mode the cpu frequency governers rely on the
	 * update of the idle time accounting in tick_nohz_start_idle().
	 */
	ts->inidle = 1;

	now = tick_nohz_start_idle(cpu, ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		goto end;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked. Keep track of the fact that it was the one
		 * which had the do_timer() duty last. If this cpu is
		 * the one which had the do_timer() duty last, we
		 * limit the sleep time to the timekeeping
		 * max_deferement value which we retrieved
		 * above. Otherwise we can sleep as long as we want.
		 */
		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		if (delta_jiffies > 1)
			cpumask_set_cpu(cpu, nohz_cpu_mask);

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
			rcu_enter_nohz();
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpumask_clear_cpu(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
	ts->sleep_length = ktime_sub(dev->next_event, now);
end:
	local_irq_restore(flags);
}
int enic_get_vnic_config(struct enic *enic)
{
	struct vnic_enet_config *c = &enic->config;
	int err;

	err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
	if (err) {
		printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
		return err;
	}

#define GET_CONFIG(m) \
	do { \
		err = vnic_dev_spec(enic->vdev, \
			offsetof(struct vnic_enet_config, m), \
			sizeof(c->m), &c->m); \
		if (err) { \
			printk(KERN_ERR PFX \
				"Error getting %s, %d\n", #m, err); \
			return err; \
		} \
	} while (0)

	GET_CONFIG(flags);
	GET_CONFIG(wq_desc_count);
	GET_CONFIG(rq_desc_count);
	GET_CONFIG(mtu);
	GET_CONFIG(intr_timer);
	GET_CONFIG(intr_timer_type);
	GET_CONFIG(intr_mode);

	c->wq_desc_count =
		min_t(u32, ENIC_MAX_WQ_DESCS,
		max_t(u32, ENIC_MIN_WQ_DESCS,
		c->wq_desc_count));
	c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */

	c->rq_desc_count =
		min_t(u32, ENIC_MAX_RQ_DESCS,
		max_t(u32, ENIC_MIN_RQ_DESCS,
		c->rq_desc_count));
	c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */

	if (c->mtu == 0)
		c->mtu = 1500;
	c->mtu = min_t(u16, ENIC_MAX_MTU,
		max_t(u16, ENIC_MIN_MTU,
		c->mtu));

	c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);

	printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
		"wq/rq %d/%d\n",
		enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
		enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
		c->wq_desc_count, c->rq_desc_count);
	printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
		"intr timer %d\n",
		c->mtu, ENIC_SETTING(enic, TXCSUM),
		ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
		ENIC_SETTING(enic, LRO), c->intr_timer);

	return 0;
}
Exemple #28
0
static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
{
    ide_hwif_t *hwif	= drive->hwif;
    struct pci_dev *dev	= to_pci_dev(hwif->dev);
    u8 maslave		= hwif->channel ? 0x42 : 0x40;
    int a_speed		= 3 << (drive->dn * 4);
    int u_flag		= 1 << drive->dn;
    int v_flag		= 0x01 << drive->dn;
    int w_flag		= 0x10 << drive->dn;
    int u_speed		= 0;
    int			sitre;
    u16			reg4042, reg4a;
    u8			reg48, reg54, reg55;

    pci_read_config_word(dev, maslave, &reg4042);
    sitre = (reg4042 & 0x4000) ? 1 : 0;
    pci_read_config_byte(dev, 0x48, &reg48);
    pci_read_config_word(dev, 0x4a, &reg4a);
    pci_read_config_byte(dev, 0x54, &reg54);
    pci_read_config_byte(dev, 0x55, &reg55);

    if (speed >= XFER_UDMA_0) {
        u8 udma = speed - XFER_UDMA_0;

        u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);

        if (!(reg48 & u_flag))
            pci_write_config_byte(dev, 0x48, reg48 | u_flag);
        if (speed == XFER_UDMA_5) {
            pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
        } else {
            pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
        }
        if ((reg4a & a_speed) != u_speed)
            pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
        if (speed > XFER_UDMA_2) {
            if (!(reg54 & v_flag))
                pci_write_config_byte(dev, 0x54, reg54 | v_flag);
        } else
            pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
    } else {
        const u8 mwdma_to_pio[] = { 0, 3, 4 };
        u8 pio;

        if (reg48 & u_flag)
            pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
        if (reg4a & a_speed)
            pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
        if (reg54 & v_flag)
            pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
        if (reg55 & w_flag)
            pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);

        if (speed >= XFER_MW_DMA_0)
            pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
        else
            pio = 2; /* only SWDMA2 is allowed */

        piix_set_pio_mode(drive, pio);
    }
}
static int load_segment(const struct elf32_phdr *phdr, unsigned num,
		struct pil_device *pil)
{
	int ret = 0, count, paddr;
	char fw_name[30];
	const struct firmware *fw = NULL;
	const u8 *data;

	if (memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
		dev_err(&pil->dev, "%s: kernel memory would be overwritten "
			"[%#08lx, %#08lx)\n", pil->desc->name,
			(unsigned long)phdr->p_paddr,
			(unsigned long)(phdr->p_paddr + phdr->p_memsz));
		return -EPERM;
	}

	if (phdr->p_filesz) {
		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
				pil->desc->name, num);
		ret = request_firmware(&fw, fw_name, &pil->dev);
		if (ret) {
			dev_err(&pil->dev, "%s: Failed to locate blob %s\n",
					pil->desc->name, fw_name);
			return ret;
		}

		if (fw->size != phdr->p_filesz) {
			dev_err(&pil->dev, "%s: Blob size %u doesn't match "
					"%u\n", pil->desc->name, fw->size,
					phdr->p_filesz);
			ret = -EPERM;
			goto release_fw;
		}
	}

	
	count = phdr->p_filesz;
	paddr = phdr->p_paddr;
	data = fw ? fw->data : NULL;
	while (count > 0) {
		int size;
		u8 __iomem *buf;

		size = min_t(size_t, IOMAP_SIZE, count);
		buf = ioremap(paddr, size);
		if (!buf) {
			dev_err(&pil->dev, "%s: Failed to map memory\n",
					pil->desc->name);
			ret = -ENOMEM;
			goto release_fw;
		}
		memcpy(buf, data, size);
		iounmap(buf);

		count -= size;
		paddr += size;
		data += size;
	}

	
	count = phdr->p_memsz - phdr->p_filesz;
	while (count > 0) {
		int size;
		u8 __iomem *buf;

		size = min_t(size_t, IOMAP_SIZE, count);
		buf = ioremap(paddr, size);
		if (!buf) {
			dev_err(&pil->dev, "%s: Failed to map memory\n",
					pil->desc->name);
			ret = -ENOMEM;
			goto release_fw;
		}
		memset(buf, 0, size);
		iounmap(buf);

		count -= size;
		paddr += size;
	}

	if (pil->desc->ops->verify_blob) {
		ret = pil->desc->ops->verify_blob(pil->desc, phdr->p_paddr,
					  phdr->p_memsz);
		if (ret)
			dev_err(&pil->dev, "%s: Blob%u failed verification\n",
				pil->desc->name, num);
	}

release_fw:
	release_firmware(fw);
	return ret;
}
Exemple #30
0
/*
 * Look up and get an activation reference on a cell record under RCU
 * conditions.  The caller must hold the RCU read lock.
 */
struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
				     const char *name, unsigned int namesz)
{
	struct afs_cell *cell = NULL;
	struct rb_node *p;
	int n, seq = 0, ret = 0;

	_enter("%*.*s", namesz, namesz, name);

	if (name && namesz == 0)
		return ERR_PTR(-EINVAL);
	if (namesz > AFS_MAXCELLNAME)
		return ERR_PTR(-ENAMETOOLONG);

	do {
		/* Unfortunately, rbtree walking doesn't give reliable results
		 * under just the RCU read lock, so we have to check for
		 * changes.
		 */
		if (cell)
			afs_put_cell(net, cell);
		cell = NULL;
		ret = -ENOENT;

		read_seqbegin_or_lock(&net->cells_lock, &seq);

		if (!name) {
			cell = rcu_dereference_raw(net->ws_cell);
			if (cell) {
				afs_get_cell(cell);
				break;
			}
			ret = -EDESTADDRREQ;
			continue;
		}

		p = rcu_dereference_raw(net->cells.rb_node);
		while (p) {
			cell = rb_entry(p, struct afs_cell, net_node);

			n = strncasecmp(cell->name, name,
					min_t(size_t, cell->name_len, namesz));
			if (n == 0)
				n = cell->name_len - namesz;
			if (n < 0) {
				p = rcu_dereference_raw(p->rb_left);
			} else if (n > 0) {
				p = rcu_dereference_raw(p->rb_right);
			} else {
				if (atomic_inc_not_zero(&cell->usage)) {
					ret = 0;
					break;
				}
				/* We want to repeat the search, this time with
				 * the lock properly locked.
				 */
			}
			cell = NULL;
		}

	} while (need_seqretry(&net->cells_lock, seq));

	done_seqretry(&net->cells_lock, seq);

	return ret == 0 ? cell : ERR_PTR(ret);
}