static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
			   int mode)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct buffer_head *dibh;
	int error;
	loff_t size = len;
	unsigned int nr_blks;
	sector_t lblock = offset >> inode->i_blkbits;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (unlikely(error))
		return error;

	gfs2_trans_add_bh(ip->i_gl, dibh, 1);

	if (gfs2_is_stuffed(ip)) {
		error = gfs2_unstuff_dinode(ip, NULL);
		if (unlikely(error))
			goto out;
	}

	while (len) {
		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
		bh_map.b_size = len;
		set_buffer_zeronew(&bh_map);

		error = gfs2_block_map(inode, lblock, &bh_map, 1);
		if (unlikely(error))
			goto out;
		len -= bh_map.b_size;
		nr_blks = bh_map.b_size >> inode->i_blkbits;
		lblock += nr_blks;
		if (!buffer_new(&bh_map))
			continue;
		if (unlikely(!buffer_zeronew(&bh_map))) {
			error = -EIO;
			goto out;
		}
	}
	if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
		i_size_write(inode, offset + size);

	mark_inode_dirty(inode);

out:
	brelse(dibh);
	return error;
}

static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
			    unsigned int *data_blocks, unsigned int *ind_blocks)
{
	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	unsigned int max_blocks = ip->i_rgd->rd_free_clone;
	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);

	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
		max_data -= tmp;
	}
	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
	   so it might end up with fewer data blocks */
	if (max_data <= *data_blocks)
		return;
	*data_blocks = max_data;
	*ind_blocks = max_blocks - max_data;
	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
	if (*len > max) {
		*len = max;
		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
	}
}

static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
			   loff_t len)
{
	struct inode *inode = file_inode(file);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *ip = GFS2_I(inode);
	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
	loff_t bytes, max_bytes;
	struct gfs2_qadata *qa;
	int error;
	const loff_t pos = offset;
	const loff_t count = len;
	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
	loff_t max_chunk_size = UINT_MAX & bsize_mask;
	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;

	/* We only support the FALLOC_FL_KEEP_SIZE mode */
	if (mode & ~FALLOC_FL_KEEP_SIZE)
		return -EOPNOTSUPP;

	offset &= bsize_mask;

	len = next - offset;
	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
	if (!bytes)
		bytes = UINT_MAX;
	bytes &= bsize_mask;
	if (bytes == 0)
		bytes = sdp->sd_sb.sb_bsize;

	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
	error = gfs2_glock_nq(&ip->i_gh);
	if (unlikely(error))
		goto out_uninit;

	while (len > 0) {
		if (len < bytes)
			bytes = len;
		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
			len -= bytes;
			offset += bytes;
			continue;
		}
		qa = gfs2_qadata_get(ip);
		if (!qa) {
			error = -ENOMEM;
			goto out_unlock;
		}

		error = gfs2_quota_lock_check(ip);
		if (error)
			goto out_alloc_put;

retry:
		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);

		error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
		if (error) {
			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
				bytes >>= 1;
				bytes &= bsize_mask;
				if (bytes == 0)
					bytes = sdp->sd_sb.sb_bsize;
				goto retry;
			}
			goto out_qunlock;
		}
		max_bytes = bytes;
		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
				&max_bytes, &data_blocks, &ind_blocks);

		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
			  RES_RG_HDR + gfs2_rg_blocks(ip);
		if (gfs2_is_jdata(ip))
			rblocks += data_blocks ? data_blocks : 1;

		error = gfs2_trans_begin(sdp, rblocks,
					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
		if (error)
			goto out_trans_fail;

		error = fallocate_chunk(inode, offset, max_bytes, mode);
		gfs2_trans_end(sdp);

		if (error)
			goto out_trans_fail;

		len -= max_bytes;
		offset += max_bytes;
		gfs2_inplace_release(ip);
		gfs2_quota_unlock(ip);
		gfs2_qadata_put(ip);
	}

	if (error == 0)
		error = generic_write_sync(file, pos, count);
	goto out_unlock;

out_trans_fail:
	gfs2_inplace_release(ip);
out_qunlock:
	gfs2_quota_unlock(ip);
out_alloc_put:
	gfs2_qadata_put(ip);
out_unlock:
	gfs2_glock_dq(&ip->i_gh);
out_uninit:
	gfs2_holder_uninit(&ip->i_gh);
	return error;
}
Esempio n. 2
0
static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
{
	u32 hash_blocks, ind_blocks, leaf_blocks;
	u32 tmp_blocks;
	unsigned int x;
	int error;

	error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
	if (error) {
		if (!silent)
			fs_err(sdp, "can't read superblock\n");
		return error;
	}

	error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
	if (error)
		return error;

	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
			       GFS2_BASIC_BLOCK_SHIFT;
	sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
	sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
			  sizeof(struct gfs2_dinode)) / sizeof(u64);
	sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
			  sizeof(struct gfs2_meta_header)) / sizeof(u64);
	sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
	sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
	sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
	sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
	sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
				sizeof(struct gfs2_meta_header)) /
			        sizeof(struct gfs2_quota_change);

	/* Compute maximum reservation required to add a entry to a directory */

	hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH),
			     sdp->sd_jbsize);

	ind_blocks = 0;
	for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
		tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
		ind_blocks += tmp_blocks;
	}

	leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;

	sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;

	sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
				sizeof(struct gfs2_dinode);
	sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
	for (x = 2;; x++) {
		u64 space, d;
		u32 m;

		space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
		d = space;
		m = do_div(d, sdp->sd_inptrs);

		if (d != sdp->sd_heightsize[x - 1] || m)
			break;
		sdp->sd_heightsize[x] = space;
	}
	sdp->sd_max_height = x;
	sdp->sd_heightsize[x] = ~0;
	gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);

	sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
				 sizeof(struct gfs2_dinode);
	sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
	for (x = 2;; x++) {
		u64 space, d;
		u32 m;

		space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
		d = space;
		m = do_div(d, sdp->sd_inptrs);

		if (d != sdp->sd_jheightsize[x - 1] || m)
			break;
		sdp->sd_jheightsize[x] = space;
	}
	sdp->sd_max_jheight = x;
	sdp->sd_jheightsize[x] = ~0;
	gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);

	return 0;
}
Esempio n. 3
0
static int __atcspi200_spi_xfer(struct nds_spi_slave *ns,
		unsigned int bitlen,  const void *data_out, void *data_in,
		unsigned long flags)
{
		unsigned int event, rx_bytes;
		const void *dout = NULL;
		void *din = NULL;
		int num_blks, num_chunks, max_tran_len, tran_len;
		int num_bytes;
		u8 *cmd_buf = ns->cmd_buf;
		size_t cmd_len = ns->cmd_len;
		unsigned long data_len = bitlen / 8;
		int rf_cnt;
		int ret = 0;

		max_tran_len = ns->max_transfer_length;
		switch (flags) {
		case SPI_XFER_BEGIN:
			cmd_len = ns->cmd_len = data_len;
			memcpy(cmd_buf, data_out, cmd_len);
			return 0;

		case 0:
		case SPI_XFER_END:
			if (bitlen == 0) {
				return 0;
			}
			ns->data_len = data_len;
			ns->din = (u8 *)data_in;
			ns->dout = (u8 *)data_out;
			break;

		case SPI_XFER_BEGIN | SPI_XFER_END:
			ns->data_len = 0;
			ns->din = 0;
			ns->dout = 0;
			cmd_len = ns->cmd_len = data_len;
			memcpy(cmd_buf, data_out, cmd_len);
			data_out = 0;
			data_len = 0;
			__atcspi200_spi_start(ns);
			break;
		}
		if (data_out)
			debug("spi_xfer: data_out %08X(%p) data_in %08X(%p) data_len %lu\n",
			      *(uint *)data_out, data_out, *(uint *)data_in,
			      data_in, data_len);
		num_chunks = DIV_ROUND_UP(data_len, max_tran_len);
		din = data_in;
		dout = data_out;
		while (num_chunks--) {
			tran_len = min((size_t)data_len, (size_t)max_tran_len);
			ns->tran_len = tran_len;
			num_blks = DIV_ROUND_UP(tran_len , CHUNK_SIZE);
			num_bytes = (tran_len) % CHUNK_SIZE;
			if(num_bytes == 0)
				num_bytes = CHUNK_SIZE;
			__atcspi200_spi_start(ns);

			while (num_blks) {
				event = in_le32(&ns->regs->status);
				if ((event & TXEPTY) && (data_out)) {
					__nspi_espi_tx(ns, dout);
					num_blks -= CHUNK_SIZE;
					dout += CHUNK_SIZE;
				}

				if ((event & RXFVE_MASK) && (data_in)) {
					rf_cnt = ((event & RXFVE_MASK)>> RXFVE_OFFSET);
					if (rf_cnt >= CHUNK_SIZE)
						rx_bytes = CHUNK_SIZE;
					else if (num_blks == 1 && rf_cnt == num_bytes)
						rx_bytes = num_bytes;
					else
						continue;

					if (__nspi_espi_rx(ns, din, rx_bytes) == rx_bytes) {
						num_blks -= CHUNK_SIZE;
						din = (unsigned char *)din + rx_bytes;
					}
				}
			}

			data_len -= tran_len;
			if(data_len)
			{
				ns->cmd_buf[1] += ((tran_len>>16)&0xff);
				ns->cmd_buf[2] += ((tran_len>>8)&0xff);
				ns->cmd_buf[3] += ((tran_len)&0xff);
				ns->data_len = data_len;
			}
			ret = __atcspi200_spi_stop(ns);
		}
Esempio n. 4
0
PJ_DEF(pj_status_t) pjmedia_aud_test( const pjmedia_aud_param *param,
				      pjmedia_aud_test_results *result)
{
    pj_status_t status = PJ_SUCCESS;
    pjmedia_aud_stream *strm;
    struct test_data test_data;
    unsigned ptime, tmp;
    
    /*
     * Init test parameters
     */
    pj_bzero(&test_data, sizeof(test_data));
    test_data.param = param;
    test_data.result = result;

    test_data.pool = pj_pool_create(pjmedia_aud_subsys_get_pool_factory(),
				    "audtest", 1000, 1000, NULL);
    pj_mutex_create_simple(test_data.pool, "sndtest", &test_data.mutex); 

    /*
     * Open device.
     */
    status = pjmedia_aud_stream_create(test_data.param, &rec_cb, &play_cb, 
				       &test_data, &strm);
    if (status != PJ_SUCCESS) {
        app_perror("Unable to open device", status);
	pj_pool_release(test_data.pool);
        return status;
    }


    /* Sleep for a while to let sound device "settles" */
    pj_thread_sleep(200);

    /*
     * Start the stream.
     */
    status = pjmedia_aud_stream_start(strm);
    if (status != PJ_SUCCESS) {
        app_perror("Unable to start capture stream", status);
	pjmedia_aud_stream_destroy(strm);
	pj_pool_release(test_data.pool);
        return status;
    }

    PJ_LOG(3,(THIS_FILE,
	      " Please wait while test is in progress (~%d secs)..",
	      (DURATION+SKIP_DURATION)/1000));

    /* Let the stream runs for few msec/sec to get stable result.
     * (capture normally begins with frames available simultaneously).
     */
    pj_thread_sleep(SKIP_DURATION);


    /* Begin gather data */
    test_data.running = 1;

    /* 
     * Let the test runs for a while.
     */
    pj_thread_sleep(DURATION);


    /*
     * Close stream.
     */
    test_data.running = 0;
    pjmedia_aud_stream_destroy(strm);
    pj_pool_release(test_data.pool);


    /* 
     * Gather results
     */
    ptime = param->samples_per_frame * 1000 / param->clock_rate;

    tmp = pj_math_stat_get_stddev(&test_data.capture_data.delay);
    result->rec.frame_cnt = test_data.capture_data.delay.n;
    result->rec.min_interval = DIV_ROUND(test_data.capture_data.delay.min, 1000);
    result->rec.max_interval = DIV_ROUND(test_data.capture_data.delay.max, 1000);
    result->rec.avg_interval = DIV_ROUND(test_data.capture_data.delay.mean, 1000);
    result->rec.dev_interval = DIV_ROUND(tmp, 1000);
    result->rec.max_burst    = DIV_ROUND_UP(result->rec.max_interval, ptime);

    tmp = pj_math_stat_get_stddev(&test_data.playback_data.delay);
    result->play.frame_cnt = test_data.playback_data.delay.n;
    result->play.min_interval = DIV_ROUND(test_data.playback_data.delay.min, 1000);
    result->play.max_interval = DIV_ROUND(test_data.playback_data.delay.max, 1000);
    result->play.avg_interval = DIV_ROUND(test_data.playback_data.delay.mean, 1000);
    result->play.dev_interval = DIV_ROUND(tmp, 1000);
    result->play.max_burst    = DIV_ROUND_UP(result->play.max_interval, ptime);

    /* Check drifting */
    if (param->dir == PJMEDIA_DIR_CAPTURE_PLAYBACK) {
	int play_diff, cap_diff, drift;

	play_diff = test_data.playback_data.last_timestamp -
		    test_data.playback_data.first_timestamp;
	cap_diff  = test_data.capture_data.last_timestamp -
		    test_data.capture_data.first_timestamp;
	drift = play_diff > cap_diff? play_diff - cap_diff :
		cap_diff - play_diff;

	/* Allow one frame tolerance for clock drift detection */
	if (drift < (int)param->samples_per_frame) {
	    result->rec_drift_per_sec = 0;
	} else {
	    unsigned msec_dur;

	    msec_dur = (test_data.capture_data.last_timestamp - 
		       test_data.capture_data.first_timestamp) * 1000 /
		       test_data.param->clock_rate;

	    result->rec_drift_per_sec = drift * 1000 / msec_dur;

	}
    }

    return test_data.has_error? PJ_EUNKNOWN : PJ_SUCCESS;
}
Esempio n. 5
0
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
			      struct user_namespace *user_ns,		      	
			      u32 portid, u32 seq, u16 nlmsg_flags,
			      const struct nlmsghdr *unlh)
{
	const struct inet_sock *inet = inet_sk(sk);
	struct inet_diag_msg *r;
	struct nlmsghdr  *nlh;
	struct nlattr *attr;
	void *info = NULL;
	const struct inet_diag_handler *handler;
	int ext = req->idiag_ext;

	handler = inet_diag_table[req->sdiag_protocol];
	BUG_ON(handler == NULL);

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	BUG_ON(sk->sk_state == TCP_TIME_WAIT);

	r->idiag_family = sk->sk_family;
	r->idiag_state = sk->sk_state;
	r->idiag_timer = 0;
	r->idiag_retrans = 0;

	r->id.idiag_if = sk->sk_bound_dev_if;
	sock_diag_save_cookie(sk, r->id.idiag_cookie);

	r->id.idiag_sport = inet->inet_sport;
	r->id.idiag_dport = inet->inet_dport;
	r->id.idiag_src[0] = inet->inet_rcv_saddr;
	r->id.idiag_dst[0] = inet->inet_daddr;

	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
		goto errout;

	/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
	 * hence this needs to be included regardless of socket family.
	 */
	if (ext & (1 << (INET_DIAG_TOS - 1)))
		if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
			goto errout;

#if IS_ENABLED(CONFIG_IPV6)
	if (r->idiag_family == AF_INET6) {
		const struct ipv6_pinfo *np = inet6_sk(sk);

		*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
		*(struct in6_addr *)r->id.idiag_dst = np->daddr;

		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
			if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
				goto errout;
	}
#endif

	r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
	r->idiag_inode = sock_i_ino(sk);

	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
		struct inet_diag_meminfo minfo = {
			.idiag_rmem = sk_rmem_alloc_get(sk),
			.idiag_wmem = sk->sk_wmem_queued,
			.idiag_fmem = sk->sk_forward_alloc,
			.idiag_tmem = sk_wmem_alloc_get(sk),
		};

		if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
			goto errout;
	}

	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
			goto errout;

	if (icsk == NULL) {
		handler->idiag_get_info(sk, r, NULL);
		goto out;
	}

#define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)

	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
		r->idiag_timer = 1;
		r->idiag_retrans = icsk->icsk_retransmits;
		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
		r->idiag_timer = 4;
		r->idiag_retrans = icsk->icsk_probes_out;
		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
	} else if (timer_pending(&sk->sk_timer)) {
		r->idiag_timer = 2;
		r->idiag_retrans = icsk->icsk_probes_out;
		r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
	} else {
		r->idiag_timer = 0;
		r->idiag_expires = 0;
	}
#undef EXPIRES_IN_MS

	if (ext & (1 << (INET_DIAG_INFO - 1))) {
		attr = nla_reserve(skb, INET_DIAG_INFO,
				   sizeof(struct tcp_info));
		if (!attr)
			goto errout;

		info = nla_data(attr);
	}

	if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
		if (nla_put_string(skb, INET_DIAG_CONG,
				   icsk->icsk_ca_ops->name) < 0)
			goto errout;

	handler->idiag_get_info(sk, r, info);

	if (sk->sk_state < TCP_TIME_WAIT &&
	    icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
		icsk->icsk_ca_ops->get_info(sk, ext, skb);

out:
	return nlmsg_end(skb, nlh);

errout:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);

static int inet_csk_diag_fill(struct sock *sk,
			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
			      struct user_namespace *user_ns,
			      u32 portid, u32 seq, u16 nlmsg_flags,
			      const struct nlmsghdr *unlh)
{
	return inet_sk_diag_fill(sk, inet_csk(sk),
			skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
}

static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
			       struct sk_buff *skb, struct inet_diag_req_v2 *req,
			       u32 portid, u32 seq, u16 nlmsg_flags,
			       const struct nlmsghdr *unlh)
{
	long tmo;
	struct inet_diag_msg *r;
	struct nlmsghdr *nlh;

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	BUG_ON(tw->tw_state != TCP_TIME_WAIT);

	tmo = tw->tw_ttd - jiffies;
	if (tmo < 0)
		tmo = 0;

	r->idiag_family	      = tw->tw_family;
	r->idiag_retrans      = 0;
	r->id.idiag_if	      = tw->tw_bound_dev_if;
	sock_diag_save_cookie(tw, r->id.idiag_cookie);
	r->id.idiag_sport     = tw->tw_sport;
	r->id.idiag_dport     = tw->tw_dport;
	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
	r->id.idiag_dst[0]    = tw->tw_daddr;
	r->idiag_state	      = tw->tw_substate;
	r->idiag_timer	      = 3;
	r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
	r->idiag_rqueue	      = 0;
	r->idiag_wqueue	      = 0;
	r->idiag_uid	      = 0;
	r->idiag_inode	      = 0;
#if IS_ENABLED(CONFIG_IPV6)
	if (tw->tw_family == AF_INET6) {
		const struct inet6_timewait_sock *tw6 =
						inet6_twsk((struct sock *)tw);

		*(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
		*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
	}
#endif

	return nlmsg_end(skb, nlh);
}
Esempio n. 6
0
int
gk104_gr_init(struct gf100_gr *gr)
{
	struct nvkm_device *device = gr->base.engine.subdev.device;
	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
	u32 data[TPC_MAX / 8] = {};
	u8  tpcnr[GPC_MAX];
	int gpc, tpc, rop;
	int i;

	gr->func->init_gpc_mmu(gr);

	gf100_gr_mmio(gr, gr->func->mmio);

	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);

	memset(data, 0x00, sizeof(data));
	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
		do {
			gpc = (gpc + 1) % gr->gpc_nr;
		} while (!tpcnr[gpc]);
		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;

		data[i / 8] |= tpc << ((i % 8) * 4);
	}

	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);

	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
							 gr->tpc_total);
		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
	}

	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));

	gr->func->init_rop_active_fbps(gr);

	nvkm_wr32(device, 0x400500, 0x00010001);

	nvkm_wr32(device, 0x400100, 0xffffffff);
	nvkm_wr32(device, 0x40013c, 0xffffffff);

	nvkm_wr32(device, 0x409ffc, 0x00000000);
	nvkm_wr32(device, 0x409c14, 0x00003e3e);
	nvkm_wr32(device, 0x409c24, 0x000f0001);
	nvkm_wr32(device, 0x404000, 0xc0000000);
	nvkm_wr32(device, 0x404600, 0xc0000000);
	nvkm_wr32(device, 0x408030, 0xc0000000);
	nvkm_wr32(device, 0x404490, 0xc0000000);
	nvkm_wr32(device, 0x406018, 0xc0000000);
	nvkm_wr32(device, 0x407020, 0x40000000);
	nvkm_wr32(device, 0x405840, 0xc0000000);
	nvkm_wr32(device, 0x405844, 0x00ffffff);
	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);

	gr->func->init_ppc_exceptions(gr);

	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
		}
		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
	}

	for (rop = 0; rop < gr->rop_nr; rop++) {
		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
	}

	nvkm_wr32(device, 0x400108, 0xffffffff);
	nvkm_wr32(device, 0x400138, 0xffffffff);
	nvkm_wr32(device, 0x400118, 0xffffffff);
	nvkm_wr32(device, 0x400130, 0xffffffff);
	nvkm_wr32(device, 0x40011c, 0xffffffff);
	nvkm_wr32(device, 0x400134, 0xffffffff);

	nvkm_wr32(device, 0x400054, 0x34ce3464);

	gf100_gr_zbc_init(gr);

	return gf100_gr_init_ctxctl(gr);
}
Esempio n. 7
0
/* Sub-loop of the main loop that "asynchronously" queries for the input
 * performing the following tasks while waiting for input:
 *  - checks for new IPC messages;
 *  - checks whether contents of displayed directories changed;
 *  - redraws UI if requested.
 * Returns KEY_CODE_YES for functional keys (preprocesses *c in this case), OK
 * for wide character and ERR otherwise (e.g. after timeout). */
static int
get_char_async_loop(WINDOW *win, wint_t *c, int timeout)
{
	const int IPC_F = ipc_enabled() ? 10 : 1;

	do
	{
		int i;

		int delay_slice = DIV_ROUND_UP(MIN(cfg.min_timeout_len, timeout), IPC_F);
#ifdef __PDCURSES__
		/* pdcurses performs delays in 50 ms intervals (1/20 of a second). */
		delay_slice = MAX(50, delay_slice);
#endif

		if(should_check_views_for_changes())
		{
			check_view_for_changes(curr_view);
			check_view_for_changes(other_view);
		}

		process_scheduled_updates();

		for(i = 0; i < IPC_F && timeout > 0; ++i)
		{
			int result;

			ipc_check(curr_stats.ipc);
			wtimeout(win, delay_slice);
			timeout -= delay_slice;

			if(suggestions_are_visible)
			{
				/* Redraw suggestion box as it might have been hidden due to other
				 * redraws. */
				display_suggestion_box(curr_input_buf);
			}

			/* Update cursor before waiting for input.  Modes set cursor correctly
			 * within corresponding windows, but we need to call refresh on one of
			 * them to make it active. */
			update_hardware_cursor();

			result = compat_wget_wch(win, c);
			if(result != ERR)
			{
				if(result == KEY_CODE_YES)
				{
					*c = K(*c);
				}
				else if(*c == L'\0')
				{
					*c = WC_C_SPACE;
				}
				return result;
			}

			process_scheduled_updates();
		}
	}
	while(timeout > 0);

	return ERR;
}
Esempio n. 8
0
File: xenfb.c Progetto: CTU-IIG/qemu
static int xenfb_map_fb(struct XenFB *xenfb)
{
    struct xenfb_page *page = xenfb->c.page;
    char *protocol = xenfb->c.xendev.protocol;
    int n_fbdirs;
    xen_pfn_t *pgmfns = NULL;
    xen_pfn_t *fbmfns = NULL;
    void *map, *pd;
    int mode, ret = -1;

    /* default to native */
    pd = page->pd;
    mode = sizeof(unsigned long) * 8;

    if (!protocol) {
	/*
	 * Undefined protocol, some guesswork needed.
	 *
	 * Old frontends which don't set the protocol use
	 * one page directory only, thus pd[1] must be zero.
	 * pd[1] of the 32bit struct layout and the lower
	 * 32 bits of pd[0] of the 64bit struct layout have
	 * the same location, so we can check that ...
	 */
	uint32_t *ptr32 = NULL;
	uint32_t *ptr64 = NULL;
#if defined(__i386__)
	ptr32 = (void*)page->pd;
	ptr64 = ((void*)page->pd) + 4;
#elif defined(__x86_64__)
	ptr32 = ((void*)page->pd) - 4;
	ptr64 = (void*)page->pd;
#endif
	if (ptr32) {
	    if (ptr32[1] == 0) {
		mode = 32;
		pd   = ptr32;
	    } else {
		mode = 64;
		pd   = ptr64;
	    }
	}
#if defined(__x86_64__)
    } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
	/* 64bit dom0, 32bit domU */
	mode = 32;
	pd   = ((void*)page->pd) - 4;
#elif defined(__i386__)
    } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
	/* 32bit dom0, 64bit domU */
	mode = 64;
	pd   = ((void*)page->pd) + 4;
#endif
    }

    if (xenfb->pixels) {
        munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE);
        xenfb->pixels = NULL;
    }

    xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE);
    n_fbdirs = xenfb->fbpages * mode / 8;
    n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE);

    pgmfns = g_malloc0(sizeof(xen_pfn_t) * n_fbdirs);
    fbmfns = g_malloc0(sizeof(xen_pfn_t) * xenfb->fbpages);

    xenfb_copy_mfns(mode, n_fbdirs, pgmfns, pd);
    map = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom,
                               PROT_READ, n_fbdirs, pgmfns, NULL);
    if (map == NULL)
	goto out;
    xenfb_copy_mfns(mode, xenfb->fbpages, fbmfns, map);
    xenforeignmemory_unmap(xen_fmem, map, n_fbdirs);

    xenfb->pixels = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom,
            PROT_READ, xenfb->fbpages, fbmfns, NULL);
    if (xenfb->pixels == NULL)
	goto out;

    ret = 0; /* all is fine */

out:
    g_free(pgmfns);
    g_free(fbmfns);
    return ret;
}
Esempio n. 9
0
/*
 * This routine finds the Nth virtqueue described in the configuration of
 * this device and sets it up.
 *
 * This is kind of an ugly duckling.  It'd be nicer to have a standard
 * representation of a virtqueue in the configuration space, but it seems that
 * everyone wants to do it differently.  The KVM coders want the Guest to
 * allocate its own pages and tell the Host where they are, but for lguest it's
 * simpler for the Host to simply tell us where the pages are.
 */
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
				    unsigned index,
				    void (*callback)(struct virtqueue *vq),
				    const char *name)
{
	struct lguest_device *ldev = to_lgdev(vdev);
	struct lguest_vq_info *lvq;
	struct virtqueue *vq;
	int err;

	/* We must have this many virtqueues. */
	if (index >= ldev->desc->num_vq)
		return ERR_PTR(-ENOENT);

	lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
	if (!lvq)
		return ERR_PTR(-ENOMEM);

	/*
	 * Make a copy of the "struct lguest_vqconfig" entry, which sits after
	 * the descriptor.  We need a copy because the config space might not
	 * be aligned correctly.
	 */
	memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));

	printk("Mapping virtqueue %i addr %lx\n", index,
	       (unsigned long)lvq->config.pfn << PAGE_SHIFT);
	/* Figure out how many pages the ring will take, and map that memory */
	lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
				DIV_ROUND_UP(vring_size(lvq->config.num,
							LGUEST_VRING_ALIGN),
					     PAGE_SIZE));
	if (!lvq->pages) {
		err = -ENOMEM;
		goto free_lvq;
	}

	/*
	 * OK, tell virtio_ring.c to set up a virtqueue now we know its size
	 * and we've got a pointer to its pages.
	 */
	vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
				 vdev, lvq->pages, lg_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}

	/* Make sure the interrupt is allocated. */
	lguest_setup_irq(lvq->config.irq);

	/*
	 * Tell the interrupt for this virtqueue to go to the virtio_ring
	 * interrupt handler.
	 *
	 * FIXME: We used to have a flag for the Host to tell us we could use
	 * the interrupt as a source of randomness: it'd be nice to have that
	 * back.
	 */
	err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
			  dev_name(&vdev->dev), vq);
	if (err)
		goto destroy_vring;

	/*
	 * Last of all we hook up our 'struct lguest_vq_info" to the
	 * virtqueue's priv pointer.
	 */
	vq->priv = lvq;
	return vq;

destroy_vring:
	vring_del_virtqueue(vq);
unmap:
	lguest_unmap(lvq->pages);
free_lvq:
	kfree(lvq);
	return ERR_PTR(err);
}
Esempio n. 10
0
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
{
	return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}
Esempio n. 11
0
static void rockchip_spi_config(struct rockchip_spi *rs)
{
	u32 div = 0;
	u32 dmacr = 0;
	int rsd = 0;

	u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
		| (CR0_SSD_ONE << CR0_SSD_OFFSET)
		| (CR0_EM_BIG << CR0_EM_OFFSET);

	cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
	cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
	cr0 |= (rs->tmode << CR0_XFM_OFFSET);
	cr0 |= (rs->type << CR0_FRF_OFFSET);

	if (rs->use_dma) {
		if (rs->tx)
			dmacr |= TF_DMA_EN;
		if (rs->rx)
			dmacr |= RF_DMA_EN;
	}

	if (WARN_ON(rs->speed > MAX_SCLK_OUT))
		rs->speed = MAX_SCLK_OUT;

	/* the minimum divisor is 2 */
	if (rs->max_freq < 2 * rs->speed) {
		clk_set_rate(rs->spiclk, 2 * rs->speed);
		rs->max_freq = clk_get_rate(rs->spiclk);
	}

	/* div doesn't support odd number */
	div = DIV_ROUND_UP(rs->max_freq, rs->speed);
	div = (div + 1) & 0xfffe;

	/* Rx sample delay is expressed in parent clock cycles (max 3) */
	rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
				1000000000 >> 8);
	if (!rsd && rs->rsd_nsecs) {
		pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
			     rs->max_freq, rs->rsd_nsecs);
	} else if (rsd > 3) {
		rsd = 3;
		pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
			     rs->max_freq, rs->rsd_nsecs,
			     rsd * 1000000000U / rs->max_freq);
	}
	cr0 |= rsd << CR0_RSD_OFFSET;

	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);

	writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);

	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
	writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);

	spi_set_clk(rs, div);

	dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
}
Esempio n. 12
0
/* Returns the number of sectors to allocate for an inode SIZE
   bytes long. */
inline size_t
bytes_to_sectors (off_t size)
{
  return DIV_ROUND_UP (size, DISK_SECTOR_SIZE);
}
Esempio n. 13
0
/* Returns the number of elements required for BIT_CNT bits. */
inline size_t
elem_cnt (size_t bit_cnt)
{
  return DIV_ROUND_UP (bit_cnt, ELEM_BITS);
}
Esempio n. 14
0
static int __init ram_console_init(struct ram_console_buffer *buffer,
				   size_t buffer_size, char *old_buf)
{
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
	int numerr;
	uint8_t *par;
#endif
	ram_console_buffer = buffer;
	ram_console_buffer_size =
		buffer_size - sizeof(struct ram_console_buffer);

	if (ram_console_buffer_size > buffer_size) {
		pr_err("ram_console: buffer %p, invalid size %zu, "
		       "datasize %zu\n", buffer, buffer_size,
		       ram_console_buffer_size);
		return 0;
	}

#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
	ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
						ECC_BLOCK_SIZE) + 1) * ECC_SIZE;

	if (ram_console_buffer_size > buffer_size) {
		pr_err("ram_console: buffer %p, invalid size %zu, "
		       "non-ecc datasize %zu\n",
		       buffer, buffer_size, ram_console_buffer_size);
		return 0;
	}

	ram_console_par_buffer = buffer->data + ram_console_buffer_size;


	/* first consecutive root is 0
	 * primitive element to generate roots = 1
	 */
	ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
	if (ram_console_rs_decoder == NULL) {
		printk(KERN_INFO "ram_console: init_rs failed\n");
		return 0;
	}

	ram_console_corrected_bytes = 0;
	ram_console_bad_blocks = 0;

	par = ram_console_par_buffer +
	      DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;

	numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
	if (numerr > 0) {
		printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
		ram_console_corrected_bytes += numerr;
	} else if (numerr < 0) {
		printk(KERN_INFO
		       "ram_console: uncorrectable error in header\n");
		ram_console_bad_blocks++;
	}
#endif

	if (buffer->sig == RAM_CONSOLE_SIG) {
		if (buffer->size > ram_console_buffer_size
		    || buffer->start > buffer->size)
			printk(KERN_INFO "ram_console: found existing invalid "
			       "buffer, size %d, start %d\n",
			       buffer->size, buffer->start);
		else {
			printk(KERN_INFO "ram_console: found existing buffer, "
			       "size %d, start %d\n",
			       buffer->size, buffer->start);
			ram_console_save_old(buffer, old_buf);
		}
	} else {
		printk(KERN_INFO "ram_console: no valid data in buffer "
		       "(sig = 0x%08x)\n", buffer->sig);
	}

	buffer->sig = RAM_CONSOLE_SIG;
	buffer->start = 0;
	buffer->size = 0;

	register_console(&ram_console);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
	console_verbose();
#endif
	return 0;
}
Esempio n. 15
0
/* from mac80211/util.c, modified */
int
ieee80211_frame_duration(int phymode, size_t len, int rate, int short_preamble,
			 int shortslot, int type, char qos_class, int retries)
{
	int dur;
	bool erp;
	int sifs, slottime;
	static int last_was_cts;

	erp = ieee80211_is_erp_rate(phymode, rate);

	/* calculate duration (in microseconds, rounded up to next higher
	 * integer if it includes a fractional microsecond) to send frame of
	 * len bytes (does not include FCS) at the given rate. Duration will
	 * also include SIFS.
	 *
	 * rate is in 100 kbps, so divident is multiplied by 10 in the
	 * DIV_ROUND_UP() operations.
	 */

	DEBUG("DUR mode %d, len %d, rate %d, shortpre %d shortslot %d type %x UP %d\n", phymode, (int)len, rate, short_preamble, shortslot, type, qos_class);

	if (phymode == PHY_FLAG_A || erp) {
		DEBUG("OFDM\n");
		/*
		 * OFDM:
		 *
		 * N_DBPS = DATARATE x 4
		 * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS)
		 *	(16 = SIGNAL time, 6 = tail bits)
		 * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext
		 *
		 * T_SYM = 4 usec
		 * 802.11a - 17.5.2: aSIFSTime = 16 usec
		 * 802.11g - 19.8.4: aSIFSTime = 10 usec +
		 *	signal ext = 6 usec
		 */
		sifs = 16;  /* SIFS + signal ext */
		slottime = 9;
		dur = 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */
		dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */
		dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10,
					4 * rate); /* T_SYM x N_SYM */
	} else {
		DEBUG("CCK\n");
		/*
		 * 802.11b or 802.11g with 802.11b compatibility:
		 * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime +
		 * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0.
		 *
		 * 802.11 (DS): 15.3.3, 802.11b: 18.3.4
		 * aSIFSTime = 10 usec
		 * aPreambleLength = 144 usec or 72 usec with short preamble
		 * aPLCPHeaderLength = 48 usec or 24 usec with short preamble
		 */
		sifs = 10; /* aSIFSTime = 10 usec */
		slottime = shortslot ? 9 : 20;
		dur = short_preamble ? (72 + 24) : (144 + 48);
		dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate);
	}

	if (type == WLAN_FRAME_CTS ||
	    type == WLAN_FRAME_ACK) {
		//TODO: also fragments
		DEBUG("DUR SIFS\n");
		dur += sifs;
	}
	else if (type == WLAN_FRAME_BEACON) {
		/* TODO: which AIFS and CW should be used for beacons? */
		dur += sifs + (2 * slottime); /* AIFS */
		dur += (slottime * 1) / 2; /* contention */
	}
	else if (WLAN_FRAME_IS_DATA(type) && last_was_cts) {
		DEBUG("DUR LAST CTS\n");
		dur += sifs;
	}
	else if (type == WLAN_FRAME_QDATA) {
		unsigned char ac = ieee802_1d_to_ac[(unsigned char)qos_class];
		dur += sifs + (ac_to_aifs[ac] * slottime); /* AIFS */
		dur += get_cw_time(ac_to_cwmin[ac], ac_to_cwmax[ac], retries, slottime);
		DEBUG("DUR AIFS %d CWMIN %d AC %d, UP %d\n", ac_to_aifs[ac], ac_to_cwmin[ac], ac, qos_class);
	}
	else {
		DEBUG("DUR DIFS\n");
		dur += sifs + (2 * slottime); /* DIFS */
		dur += get_cw_time(4, 10, retries, slottime);
	}

	if (type == WLAN_FRAME_CTS) {
		DEBUG("SET CTS\n");
		last_was_cts = 1;
	}
	else
		last_was_cts = 0;

	/* TODO: Add EIFS (SIFS + ACKTXTIME) to frames with CRC errors, if we can get them */

	DEBUG("DUR %d\n", dur);
	return dur;
}
	void (*set_ioforce)(bool enable);
	spinlock_t lock;
	bool sleepmode;
	/* Keep track of configured edges */
	u32 edge_rising;
	u32 edge_falling;
	u32 real_wake;
	u32 rwimsc;
	u32 fwimsc;
	u32 slpm;
	u32 enabled;
	u32 pull_up;
};

static struct nmk_gpio_chip *
nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];

static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);

#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)

static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
				unsigned offset, int gpio_mode)
{
	u32 bit = 1 << offset;
	u32 afunc, bfunc;

	afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
	bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
	if (gpio_mode & NMK_GPIO_ALT_A)
		afunc |= bit;
Esempio n. 17
0
static int ath79_spi_probe(struct platform_device *pdev)
{
	struct spi_master *master;
	struct ath79_spi *sp;
	struct ath79_spi_platform_data *pdata;
	struct resource	*r;
	unsigned long rate;
	int ret;

	master = spi_alloc_master(&pdev->dev, sizeof(*sp));
	if (master == NULL) {
		dev_err(&pdev->dev, "failed to allocate spi master\n");
		return -ENOMEM;
	}

	sp = spi_master_get_devdata(master);
	platform_set_drvdata(pdev, sp);

	pdata = dev_get_platdata(&pdev->dev);

	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
	master->setup = ath79_spi_setup;
	master->cleanup = ath79_spi_cleanup;
	if (pdata) {
		master->bus_num = pdata->bus_num;
		master->num_chipselect = pdata->num_chipselect;
	}

	sp->bitbang.master = master;
	sp->bitbang.chipselect = ath79_spi_chipselect;
	sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
	sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
	sp->bitbang.flags = SPI_CS_HIGH;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (r == NULL) {
		ret = -ENOENT;
		goto err_put_master;
	}

	sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
	if (!sp->base) {
		ret = -ENXIO;
		goto err_put_master;
	}

	sp->clk = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(sp->clk)) {
		ret = PTR_ERR(sp->clk);
		goto err_put_master;
	}

	ret = clk_enable(sp->clk);
	if (ret)
		goto err_put_master;

	rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
	if (!rate) {
		ret = -EINVAL;
		goto err_clk_disable;
	}

	sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
	dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
		sp->rrw_delay);

	ath79_spi_enable(sp);
	ret = spi_bitbang_start(&sp->bitbang);
	if (ret)
		goto err_disable;

	return 0;

err_disable:
	ath79_spi_disable(sp);
err_clk_disable:
	clk_disable(sp->clk);
err_put_master:
	spi_master_put(sp->bitbang.master);

	return ret;
}
Esempio n. 18
0
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid)
		goto err1;

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq)
			goto err2;

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq)
			goto err3;
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr)
		goto err4;

	if (user) {
		if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
			goto err5;
	} else
		if (alloc_host_sq(rdev, &wq->sq))
			goto err5;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue)
		goto err6;
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err7;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err7;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto err7;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
err7:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
err6:
	dealloc_sq(rdev, &wq->sq);
err5:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
	kfree(wq->rq.sw_rq);
err3:
	kfree(wq->sq.sw_sq);
err2:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return -ENOMEM;
}
Esempio n. 19
0
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
			     struct mlx5_create_srq_mbox_in **in, int buf_size,
			     int *inlen)
{
	int err;
	int i;
	struct mlx5_wqe_srq_next_seg *next;
	int page_shift;
	int npages;

	err = mlx5_db_alloc(&dev->mdev, &srq->db);
	if (err) {
		mlx5_ib_warn(dev, "alloc dbell rec failed\n");
		return err;
	}

	*srq->db.db = 0;

	if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
		mlx5_ib_dbg(dev, "buf alloc failed\n");
		err = -ENOMEM;
		goto err_db;
	}
	page_shift = srq->buf.page_shift;

	srq->head    = 0;
	srq->tail    = srq->msrq.max - 1;
	srq->wqe_ctr = 0;

	for (i = 0; i < srq->msrq.max; i++) {
		next = get_wqe(srq, i);
		next->next_wqe_index =
			cpu_to_be16((i + 1) & (srq->msrq.max - 1));
	}

	npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
	mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
		    buf_size, page_shift, srq->buf.npages, npages);
	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
	*in = mlx5_vzalloc(*inlen);
	if (!*in) {
		err = -ENOMEM;
		goto err_buf;
	}
	mlx5_fill_page_array(&srq->buf, (*in)->pas);

	srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
	if (!srq->wrid) {
		mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
			    (unsigned long)(srq->msrq.max * sizeof(u64)));
		err = -ENOMEM;
		goto err_in;
	}
	srq->wq_sig = !!srq_signature;

	(*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;

	return 0;

err_in:
	mlx5_vfree(*in);

err_buf:
	mlx5_buf_free(&dev->mdev, &srq->buf);

err_db:
	mlx5_db_free(&dev->mdev, &srq->db);
	return err;
}
Esempio n. 20
0
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
			   struct ib_send_wr *wr, u8 *len16)
{
	u32 plen;
	int size;
	int ret;

	if (wr->num_sge > T4_MAX_SEND_SGE)
		return -EINVAL;
	switch (wr->opcode) {
	case IB_WR_SEND:
		if (wr->send_flags & IB_SEND_SOLICITED)
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
		else
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
		wqe->send.stag_inv = 0;
		break;
	case IB_WR_SEND_WITH_INV:
		if (wr->send_flags & IB_SEND_SOLICITED)
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
		else
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
		wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
		break;

	default:
		return -EINVAL;
	}

	plen = 0;
	if (wr->num_sge) {
		if (wr->send_flags & IB_SEND_INLINE) {
			ret = build_immd(sq, wqe->send.u.immd_src, wr,
					 T4_MAX_SEND_INLINE, &plen);
			if (ret)
				return ret;
			size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
			       plen;
		} else {
			ret = build_isgl((__be64 *)sq->queue,
					 (__be64 *)&sq->queue[sq->size],
					 wqe->send.u.isgl_src,
					 wr->sg_list, wr->num_sge, &plen);
			if (ret)
				return ret;
			size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
			       wr->num_sge * sizeof(struct fw_ri_sge);
		}
	} else {
		wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
		wqe->send.u.immd_src[0].r1 = 0;
		wqe->send.u.immd_src[0].r2 = 0;
		wqe->send.u.immd_src[0].immdlen = 0;
		size = sizeof wqe->send + sizeof(struct fw_ri_immd);
		plen = 0;
	}
	*len16 = DIV_ROUND_UP(size, 16);
	wqe->send.plen = cpu_to_be32(plen);
	return 0;
}
Esempio n. 21
0
struct pm_irq_chip *  __devinit pm8xxx_irq_init(struct device *dev,
				const struct pm8xxx_irq_platform_data *pdata)
{
	struct pm_irq_chip  *chip;
	int devirq, rc;
	unsigned int pmirq;

	if (!pdata) {
		pr_err("No platform data\n");
		return ERR_PTR(-EINVAL);
	}

	devirq = pdata->devirq;
	if (devirq < 0) {
		pr_err("missing devirq\n");
		rc = devirq;
		return ERR_PTR(-EINVAL);
	}

	chip = kzalloc(sizeof(struct pm_irq_chip)
			+ sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL);
	if (!chip) {
		pr_err("Cannot alloc pm_irq_chip struct\n");
		return ERR_PTR(-EINVAL);
	}

	memset((void*)&pm8xxx_wake_state.wake_enable[0],0,sizeof(u8)*MAX_PM_IRQ);
        pm8xxx_wake_state.count_wakeable = 0;

	chip->dev = dev;
	chip->devirq = devirq;
	chip->irq_base = pdata->irq_base;
	chip->num_irqs = pdata->irq_cdata.nirqs;
	chip->base_addr = pdata->irq_cdata.base_addr;
	chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
	chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
	spin_lock_init(&chip->pm_irq_lock);

	for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) {
		irq_set_chip_and_handler(chip->irq_base + pmirq,
				&pm8xxx_irq_chip,
				handle_level_irq);
		irq_set_chip_data(chip->irq_base + pmirq, chip);
#ifdef CONFIG_ARM
		set_irq_flags(chip->irq_base + pmirq, IRQF_VALID);
#else
		irq_set_noprobe(chip->irq_base + pmirq);
#endif
	}

	if (devirq != 0) {
		rc = request_irq(devirq, pm8xxx_irq_handler,
				pdata->irq_trigger_flag,
				"pm8xxx_usr_irq", chip);
		if (rc) {
			pr_err("failed to request_irq for %d rc=%d\n",
								devirq, rc);
		} else {
			irq_set_irq_wake(devirq, 1);
		}
	}

	return chip;
}
Esempio n. 22
0
/****************
 * Use BUFFER to update MPI.
 */
int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign)
{
	const uint8_t *buffer = xbuffer, *p;
	mpi_limb_t alimb;
	int nlimbs;
	int i;

	nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
	if (RESIZE_IF_NEEDED(a, nlimbs) < 0)
		return -ENOMEM;
	a->sign = sign;

	for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) {
#if BYTES_PER_MPI_LIMB == 4
		alimb = (mpi_limb_t) *p--;
		alimb |= (mpi_limb_t) *p-- << 8;
		alimb |= (mpi_limb_t) *p-- << 16;
		alimb |= (mpi_limb_t) *p-- << 24;
#elif BYTES_PER_MPI_LIMB == 8
		alimb = (mpi_limb_t) *p--;
		alimb |= (mpi_limb_t) *p-- << 8;
		alimb |= (mpi_limb_t) *p-- << 16;
		alimb |= (mpi_limb_t) *p-- << 24;
		alimb |= (mpi_limb_t) *p-- << 32;
		alimb |= (mpi_limb_t) *p-- << 40;
		alimb |= (mpi_limb_t) *p-- << 48;
		alimb |= (mpi_limb_t) *p-- << 56;
#else
#error please implement for this limb size.
#endif
		a->d[i++] = alimb;
	}
	if (p >= buffer) {
#if BYTES_PER_MPI_LIMB == 4
		alimb = *p--;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 8;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 16;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 24;
#elif BYTES_PER_MPI_LIMB == 8
		alimb = (mpi_limb_t) *p--;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 8;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 16;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 24;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 32;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 40;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 48;
		if (p >= buffer)
			alimb |= (mpi_limb_t) *p-- << 56;
#else
#error please implement for this limb size.
#endif
		a->d[i++] = alimb;
	}
	a->nlimbs = i;

	if (i != nlimbs) {
		pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i,
		       nlimbs);
		BUG();
	}
	return 0;
}
Esempio n. 23
0
}
#else
#define gic_check_cpu_features()	do { } while(0)
#endif

union gic_base {
	void __iomem *common_base;
	void __percpu * __iomem *percpu_base;
};

struct gic_chip_data {
	struct irq_chip chip;
	union gic_base dist_base;
	union gic_base cpu_base;
#ifdef CONFIG_CPU_PM
	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
	u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
	u32 __percpu *saved_ppi_enable;
	u32 __percpu *saved_ppi_active;
	u32 __percpu *saved_ppi_conf;
#endif
	struct irq_domain *domain;
	unsigned int gic_irqs;
#ifdef CONFIG_GIC_NON_BANKED
	void __iomem *(*get_base)(union gic_base *);
#endif
};

static DEFINE_RAW_SPINLOCK(irq_controller_lock);
Esempio n. 24
0
/*
 * mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with
 *			     data from the sgl
 *
 * This function works in the same way as the mpi_read_raw_data, but it
 * takes an sgl instead of void * buffer. i.e. it allocates
 * a new MPI and reads the content of the sgl to the MPI.
 *
 * @sgl:	scatterlist to read from
 * @nbytes:	number of bytes to read
 *
 * Return:	Pointer to a new MPI or NULL on error
 */
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
{
	struct scatterlist *sg;
	int x, i, j, z, lzeros, ents;
	unsigned int nbits, nlimbs;
	mpi_limb_t a;
	MPI val = NULL;

	lzeros = 0;
	ents = sg_nents(sgl);

	for_each_sg(sgl, sg, ents, i) {
		const u8 *buff = sg_virt(sg);
		int len = sg->length;

		while (len && !*buff) {
			lzeros++;
			len--;
			buff++;
		}

		if (len && *buff)
			break;

		ents--;
		nbytes -= lzeros;
		lzeros = 0;
	}

	sgl = sg;
	nbytes -= lzeros;
	nbits = nbytes * 8;
	if (nbits > MAX_EXTERN_MPI_BITS) {
		pr_info("MPI: mpi too large (%u bits)\n", nbits);
		return NULL;
	}

	if (nbytes > 0)
		nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)) -
			(BITS_PER_LONG - 8);

	nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
	val = mpi_alloc(nlimbs);
	if (!val)
		return NULL;

	val->nbits = nbits;
	val->sign = 0;
	val->nlimbs = nlimbs;

	if (nbytes == 0)
		return val;

	j = nlimbs - 1;
	a = 0;
	z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
	z %= BYTES_PER_MPI_LIMB;

	for_each_sg(sgl, sg, ents, i) {
		const u8 *buffer = sg_virt(sg) + lzeros;
		int len = sg->length - lzeros;

		for (x = 0; x < len; x++) {
			a <<= 8;
			a |= *buffer++;
			if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
				val->d[j--] = a;
				a = 0;
			}
		}
		z += x;
		lzeros = 0;
	}
	return val;
}
Esempio n. 25
0
/* bit order: least significant first */
static int usbp5_tdi_seq(const uint8_t *txbits, uint8_t *rxbits, int nb_bits, int tap_shift)
{
	uint8_t rxbuf, txbuf = 0; //OR: swd_en ? 0 : 0xff;
	int i, ofs, n_bytes_rx, n_bytes_tx;
	/* special handling for dummy data (tx/rxbits == NULL) during RUNTEST, etc. */
	int txstride = txbits ? 1 : 0;
	int rxstride = rxbits ? 1 : 0;
	uint8_t *rxptr = rxstride ? rxbits : &rxbuf;
	const uint8_t *txptr = txstride ? txbits : &txbuf;
	uint32_t status;
		
	if(!nb_bits)
		return ERROR_OK;
	
	/* set TMS low */
	iomem[IOCFG_GPIO + IO_MODE0_RESET] = (1<<GPIO_TMS_BIT);
	
	n_bytes_rx = n_bytes_tx = DIV_ROUND_UP(nb_bits, 8);
	
	/* we can use the SPI peripheral only to transmit full bytes and
	 * only as long as we do not need a TMS transition
	 * 
	 * we cannot transmit the last byte if less than 8 bits are used
	 * or if we need a TMS transition */
	if( (tap_shift) || (nb_bits&7) ) {
		n_bytes_rx--;
		n_bytes_tx--;
	}
	
	/* remainder - handled w/ bitbanging */
	nb_bits -= n_bytes_rx<<3;
	
	for( ; n_bytes_tx ; n_bytes_tx-- , txptr+=txstride) {
		
		/* busywait
		 * - receive one byte if possible to avoid RX overflows
		 * - wait until TX fifo has at least one free entry
		 */
		do {
			status = spimem[SPI_STATUS];
			if(!(status&(1<<SPI_STATUS_RXEMPTY))) {
				*rxptr = bitrev[spimem[SPI_FIFO_DATA]];
				rxptr+=rxstride;
				n_bytes_rx--;
			}
		} while(status&(1<<SPI_STATUS_TXFULL));
		
		/* send one byte */
		spimem[SPI_FIFO_DATA] = bitrev[*txptr];
	}
	
	/* wait until we received everything */
	do {
		status = spimem[SPI_STATUS];
		if(!(status&(1<<SPI_STATUS_RXEMPTY))) {
			*rxptr = bitrev[spimem[SPI_FIFO_DATA]];
			rxptr+=rxstride;
			n_bytes_rx--;
		}
	} while(n_bytes_rx);
	
	if(!nb_bits)
		return ERROR_OK;
	
	/* handle remainder */
	
	/* switch MOSI/TDI to general purpose output */
	iomem[IOCFG_SPI + IO_MODE1_SET] = (1<<SPI_MOSI_BIT);
	
	/* special handling for last bit if TMS action needed */
	nb_bits -= tap_shift;
	
	/* process all remaining bits if no TMS action needed
	 * otherwise process all bits but the last one */
	txbuf = *txptr;
	rxbuf = 0;
	for(i=0; i<nb_bits;	i++) {
		/* (re)set TDI */
		ofs = IOCFG_SPI + IO_MODE0_SET + ((txbuf&1)^1);
		iomem[ofs] = (1<<SPI_MOSI_BIT);
		udelay(tck_halfcycle_delay);
		
		/* sample TDO */
		rxbuf |= ((iomem[IOCFG_SPI + IO_PINS]>>SPI_MISO_BIT)&1)<<i;
		
		// set TCK hi
		iomem[IOCFG_GPIO + IO_MODE0_SET] = (1<<GPIO_TCK_BIT);
		udelay(tck_halfcycle_delay);
			
		// set TCK low
		iomem[IOCFG_GPIO + IO_MODE0_RESET] = (1<<GPIO_TCK_BIT);

		txbuf >>= 1;
	}
	
	if(!tap_shift)
		goto done;
	
	/* handle final bit with TMS transition */
	
	/* set TMS hi */
	iomem[IOCFG_GPIO + IO_MODE0_SET] = (1<<GPIO_TMS_BIT);
	
	/* (re)set TDI */
	ofs = IOCFG_SPI + IO_MODE0_SET + ((txbuf&1)^1);
	iomem[ofs] = (1<<SPI_MOSI_BIT);
	udelay(tck_halfcycle_delay);
		
	/* sample TDO */
	rxbuf |= ((iomem[IOCFG_SPI + IO_PINS]>>SPI_MISO_BIT)&1)<<i;
		
	// set TCK hi
	iomem[IOCFG_GPIO + IO_MODE0_SET] = (1<<GPIO_TCK_BIT);
	udelay(tck_halfcycle_delay);
			
	// set TCK low
	iomem[IOCFG_GPIO + IO_MODE0_RESET] = (1<<GPIO_TCK_BIT);
	
done:
	/* reset GPIO TDI so we can use the SPI peripheral again 
	 * otherwise the OR gate would keep TDI high 
	 * TODO: later if dedicated TDI GPIO */
	// iomem[IOCFG_GPIO + IO_MODE0_RESET] = (1<<GPIO_TDI_BIT);
	
	/* switch MOSI/TDI back to MOSI */
	iomem[IOCFG_SPI + IO_MODE1_RESET] = (1<<SPI_MOSI_BIT);
	iomem[IOCFG_SPI + IO_MODE0_SET] = (1<<SPI_MOSI_BIT);
	
	/* store final rxbyte */
	*rxptr = rxbuf;
	
	return ERROR_OK;	
}
Esempio n. 26
0
/* Appends a description of 'learn' to 's', in the format that ovs-ofctl(8)
 * describes. */
void
learn_format(const struct ofpact_learn *learn, struct ds *s)
{
    const struct ofpact_learn_spec *spec;
    struct match match;

    match_init_catchall(&match);

    ds_put_format(s, "%slearn(%s%stable=%s%"PRIu8,
                  colors.learn, colors.end, colors.special, colors.end,
                  learn->table_id);
    if (learn->idle_timeout != OFP_FLOW_PERMANENT) {
        ds_put_format(s, ",%sidle_timeout=%s%"PRIu16,
                      colors.param, colors.end, learn->idle_timeout);
    }
    if (learn->hard_timeout != OFP_FLOW_PERMANENT) {
        ds_put_format(s, ",%shard_timeout=%s%"PRIu16,
                      colors.param, colors.end, learn->hard_timeout);
    }
    if (learn->fin_idle_timeout) {
        ds_put_format(s, ",%sfin_idle_timeout=%s%"PRIu16,
                      colors.param, colors.end, learn->fin_idle_timeout);
    }
    if (learn->fin_hard_timeout) {
        ds_put_format(s, "%s,fin_hard_timeout=%s%"PRIu16,
                      colors.param, colors.end, learn->fin_hard_timeout);
    }
    if (learn->priority != OFP_DEFAULT_PRIORITY) {
        ds_put_format(s, "%s,priority=%s%"PRIu16,
                      colors.special, colors.end, learn->priority);
    }
    if (learn->flags & NX_LEARN_F_SEND_FLOW_REM) {
        ds_put_format(s, ",%ssend_flow_rem%s", colors.value, colors.end);
    }
    if (learn->flags & NX_LEARN_F_DELETE_LEARNED) {
        ds_put_format(s, ",%sdelete_learned%s", colors.value, colors.end);
    }
    if (learn->cookie != 0) {
        ds_put_format(s, ",%scookie=%s%#"PRIx64,
                      colors.param, colors.end, ntohll(learn->cookie));
    }

    OFPACT_LEARN_SPEC_FOR_EACH (spec, learn) {
        unsigned int n_bytes = DIV_ROUND_UP(spec->n_bits, 8);
        ds_put_char(s, ',');

        switch (spec->src_type | spec->dst_type) {
        case NX_LEARN_SRC_IMMEDIATE | NX_LEARN_DST_MATCH: {
            if (spec->dst.ofs == 0
                && spec->dst.n_bits == spec->dst.field->n_bits) {
                union mf_value value;

                memset(&value, 0, sizeof value);
                memcpy(&value.b[spec->dst.field->n_bytes - n_bytes],
                       ofpact_learn_spec_imm(spec), n_bytes);
                ds_put_format(s, "%s%s=%s", colors.param,
                              spec->dst.field->name, colors.end);
                mf_format(spec->dst.field, &value, NULL, s);
            } else {
                ds_put_format(s, "%s", colors.param);
                mf_format_subfield(&spec->dst, s);
                ds_put_format(s, "=%s", colors.end);
                ds_put_hex(s, ofpact_learn_spec_imm(spec), n_bytes);
            }
            break;
        }
        case NX_LEARN_SRC_FIELD | NX_LEARN_DST_MATCH:
            ds_put_format(s, "%s", colors.param);
            mf_format_subfield(&spec->dst, s);
            ds_put_format(s, "%s", colors.end);
            if (spec->src.field != spec->dst.field ||
                spec->src.ofs != spec->dst.ofs) {
                ds_put_format(s, "%s=%s", colors.param, colors.end);
                mf_format_subfield(&spec->src, s);
            }
            break;

        case NX_LEARN_SRC_IMMEDIATE | NX_LEARN_DST_LOAD:
            ds_put_format(s, "%sload:%s", colors.special, colors.end);
            ds_put_hex(s, ofpact_learn_spec_imm(spec), n_bytes);
            ds_put_format(s, "%s->%s", colors.special, colors.end);
            mf_format_subfield(&spec->dst, s);
            break;

        case NX_LEARN_SRC_FIELD | NX_LEARN_DST_LOAD:
            ds_put_format(s, "%sload:%s", colors.special, colors.end);
            mf_format_subfield(&spec->src, s);
            ds_put_format(s, "%s->%s", colors.special, colors.end);
            mf_format_subfield(&spec->dst, s);
            break;

        case NX_LEARN_SRC_FIELD | NX_LEARN_DST_OUTPUT:
            ds_put_format(s, "%soutput:%s", colors.special, colors.end);
            mf_format_subfield(&spec->src, s);
            break;
        }
    }
Esempio n. 27
0
static int rpc_spi_xfer(struct udevice *dev, unsigned int bitlen,
			const void *dout, void *din, unsigned long flags)
{
	struct udevice *bus = dev->parent;
	struct rpc_spi_priv *priv = dev_get_priv(bus);
	u32 wlen = dout ? (bitlen / 8) : 0;
	u32 rlen = din ? (bitlen / 8) : 0;
	u32 wloop = DIV_ROUND_UP(wlen, 4);
	u32 smenr, smcr, offset;
	int ret = 0;

	if (!priv->cmdstarted) {
		if (!wlen || rlen)
			BUG();

		memcpy(priv->cmdcopy, dout, wlen);
		priv->cmdlen = wlen;

		/* Command transfer start */
		priv->cmdstarted = true;
		if (!(flags & SPI_XFER_END))
			return 0;
	}

	offset = (priv->cmdcopy[1] << 16) | (priv->cmdcopy[2] << 8) |
		 (priv->cmdcopy[3] << 0);

	smenr = 0;

	if (wlen || (!rlen && !wlen) || flags == SPI_XFER_ONCE) {
		if (wlen && flags == SPI_XFER_END)
			smenr = RPC_SMENR_SPIDE(0xf);

		rpc_spi_claim_bus(dev, true);

		writel(0, priv->regs + RPC_SMCR);

		if (priv->cmdlen >= 1) {	/* Command(1) */
			writel(RPC_SMCMR_CMD(priv->cmdcopy[0]),
			       priv->regs + RPC_SMCMR);
			smenr |= RPC_SMENR_CDE;
		} else {
			writel(0, priv->regs + RPC_SMCMR);
		}

		if (priv->cmdlen >= 4) {	/* Address(3) */
			writel(offset, priv->regs + RPC_SMADR);
			smenr |= RPC_SMENR_ADE(7);
		} else {
			writel(0, priv->regs + RPC_SMADR);
		}

		if (priv->cmdlen >= 5) {	/* Dummy(n) */
			writel(8 * (priv->cmdlen - 4) - 1,
			       priv->regs + RPC_SMDMCR);
			smenr |= RPC_SMENR_DME;
		} else {
			writel(0, priv->regs + RPC_SMDMCR);
		}

		writel(0, priv->regs + RPC_SMOPR);

		writel(0, priv->regs + RPC_SMDRENR);

		if (wlen && flags == SPI_XFER_END) {
			u32 *datout = (u32 *)dout;

			while (wloop--) {
				smcr = RPC_SMCR_SPIWE | RPC_SMCR_SPIE;
				if (wloop >= 1)
					smcr |= RPC_SMCR_SSLKP;
				writel(smenr, priv->regs + RPC_SMENR);
				writel(*datout, priv->regs + RPC_SMWDR0);
				writel(smcr, priv->regs + RPC_SMCR);
				ret = rpc_spi_wait_tend(dev);
				if (ret)
					goto err;
				datout++;
				smenr = RPC_SMENR_SPIDE(0xf);
			}

			ret = rpc_spi_wait_sslf(dev);

		} else {
			writel(smenr, priv->regs + RPC_SMENR);
			writel(RPC_SMCR_SPIE, priv->regs + RPC_SMCR);
			ret = rpc_spi_wait_tend(dev);
		}
	} else {	/* Read data only, using DRx ext access */
		rpc_spi_claim_bus(dev, false);

		if (priv->cmdlen >= 1) {	/* Command(1) */
			writel(RPC_DRCMR_CMD(priv->cmdcopy[0]),
			       priv->regs + RPC_DRCMR);
			smenr |= RPC_DRENR_CDE;
		} else {
			writel(0, priv->regs + RPC_DRCMR);
		}

		if (priv->cmdlen >= 4)		/* Address(3) */
			smenr |= RPC_DRENR_ADE(7);

		if (priv->cmdlen >= 5) {	/* Dummy(n) */
			writel(8 * (priv->cmdlen - 4) - 1,
			       priv->regs + RPC_DRDMCR);
			smenr |= RPC_DRENR_DME;
		} else {
			writel(0, priv->regs + RPC_DRDMCR);
		}

		writel(0, priv->regs + RPC_DROPR);

		writel(smenr, priv->regs + RPC_DRENR);

		if (rlen)
			memcpy_fromio(din, (void *)(priv->extr + offset), rlen);
		else
			readl(priv->extr);	/* Dummy read */
	}

err:
	priv->cmdstarted = false;

	rpc_spi_release_bus(dev);

	return ret;
}
Esempio n. 28
0
static int
wlcore_scan_get_channels(struct wl1271 *wl,
			 struct ieee80211_channel *req_channels[],
			 u32 n_channels,
			 u32 n_ssids,
			 struct conn_scan_ch_params *channels,
			 u32 band, bool radar, bool passive,
			 int start, int max_channels,
			 u8 *n_pactive_ch,
			 int scan_type)
{
	int i, j;
	u32 flags;
	bool force_passive = !n_ssids;
	u32 min_dwell_time_active, max_dwell_time_active;
	u32 dwell_time_passive, dwell_time_dfs;

	/* configure dwell times according to scan type */
	/* TODO: consider req->min/max dwell time */
	if (scan_type == SCAN_TYPE_SEARCH) {
		struct conf_scan_settings *c = &wl->conf.scan;
		bool active_vif_exists = !!wlcore_count_started_vifs(wl);

		min_dwell_time_active = active_vif_exists ?
			c->min_dwell_time_active :
			c->min_dwell_time_active_long;
		max_dwell_time_active = active_vif_exists ?
			c->max_dwell_time_active :
			c->max_dwell_time_active_long;
		dwell_time_passive = c->dwell_time_passive;
		dwell_time_dfs = c->dwell_time_dfs;
	} else {
		struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
		u32 delta_per_probe;

		if (band == IEEE80211_BAND_5GHZ)
			delta_per_probe = c->dwell_time_delta_per_probe_5;
		else
			delta_per_probe = c->dwell_time_delta_per_probe;

		min_dwell_time_active = c->base_dwell_time +
			 n_ssids * c->num_probe_reqs * delta_per_probe;

		max_dwell_time_active = min_dwell_time_active +
					c->max_dwell_time_delta;
		dwell_time_passive = c->dwell_time_passive;
		dwell_time_dfs = c->dwell_time_dfs;
	}
	min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
	max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
	dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000);
	dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000);

	for (i = 0, j = start;
	     i < n_channels && j < max_channels;
	     i++) {
		flags = req_channels[i]->flags;

		if (force_passive)
			flags |= IEEE80211_CHAN_PASSIVE_SCAN;

		if ((req_channels[i]->band == band) &&
		    !(flags & IEEE80211_CHAN_DISABLED) &&
		    (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
		    /* if radar is set, we ignore the passive flag */
		    (radar ||
		     !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
			if (flags & IEEE80211_CHAN_RADAR) {
				channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;

				channels[j].passive_duration =
					cpu_to_le16(dwell_time_dfs);
			} else {
				channels[j].passive_duration =
					cpu_to_le16(dwell_time_passive);
			}

			channels[j].min_duration =
				cpu_to_le16(min_dwell_time_active);
			channels[j].max_duration =
				cpu_to_le16(max_dwell_time_active);

			channels[j].tx_power_att = req_channels[i]->max_power;
			channels[j].channel = req_channels[i]->hw_value;

			if (n_pactive_ch &&
			    (band == IEEE80211_BAND_2GHZ) &&
			    (channels[j].channel >= 12) &&
			    (channels[j].channel <= 14) &&
			    (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
			    !force_passive) {
				/* pactive channels treated as DFS */
				channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;

				/*
				 * n_pactive_ch is counted down from the end of
				 * the passive channel list
				 */
				(*n_pactive_ch)++;
				wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d",
					     *n_pactive_ch);
			}

			wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
				     req_channels[i]->center_freq,
				     req_channels[i]->hw_value,
				     req_channels[i]->flags,
				     req_channels[i]->max_power,
				     min_dwell_time_active,
				     max_dwell_time_active,
				     flags & IEEE80211_CHAN_RADAR ?
					", DFS" : "",
				     flags & IEEE80211_CHAN_PASSIVE_SCAN ?
					", PASSIVE" : "");
			j++;
		}
	}

	return j - start;
}
static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value,
			   uint32_t context)
{
	struct msm_rpm_iv_pair iv = {
		.id = r->rpm_clk_id,
		.value = value,
	};
	return msm_rpmrs_set(context, &iv, 1);
}

static int clk_rpmrs_get_rate(struct rpm_clk *r)
{
	int rc;
	struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
	rc = msm_rpm_get_status(&iv, 1);
	return (rc < 0) ? rc : iv.value * 1000;
}

static int clk_rpmrs_handoff(struct rpm_clk *r)
{
	struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
	int rc = msm_rpm_get_status(&iv, 1);

	if (rc < 0)
		return rc;

	if (!r->branch)
		r->c.rate = iv.value * 1000;

	return 0;
}

static int clk_rpmrs_is_enabled(struct rpm_clk *r)
{
	return !!clk_rpmrs_get_rate(r);
}

static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
				uint32_t context)
{
	struct msm_rpm_kvp kvp = {
		.key = r->rpm_key,
		.data = (void *)&value,
		.length = sizeof(value),
	};

	return msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
			&kvp, 1);
}

static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
{
	if (!r->branch)
		r->c.rate = INT_MAX;

	return 0;
}

static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
{
	return !!r->c.prepare_count;
}

struct clk_rpmrs_data {
	int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
	int (*get_rate_fn)(struct rpm_clk *r);
	int (*handoff_fn)(struct rpm_clk *r);
	int (*is_enabled)(struct rpm_clk *r);
	int ctx_active_id;
	int ctx_sleep_id;
};

struct clk_rpmrs_data clk_rpmrs_data = {
	.set_rate_fn = clk_rpmrs_set_rate,
	.get_rate_fn = clk_rpmrs_get_rate,
	.handoff_fn = clk_rpmrs_handoff,
	.is_enabled = clk_rpmrs_is_enabled,
	.ctx_active_id = MSM_RPM_CTX_SET_0,
	.ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP,
};

struct clk_rpmrs_data clk_rpmrs_data_smd = {
	.set_rate_fn = clk_rpmrs_set_rate_smd,
	.handoff_fn = clk_rpmrs_handoff_smd,
	.is_enabled = clk_rpmrs_is_enabled_smd,
	.ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
	.ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
};

static DEFINE_MUTEX(rpm_clock_lock);

static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
			unsigned long *active_khz, unsigned long *sleep_khz)
{
	/* Convert the rate (hz) to khz */
	*active_khz = DIV_ROUND_UP(rate, 1000);

	/*
	 * Active-only clocks don't care what the rate is during sleep. So,
	 * they vote for zero.
	 */
	if (r->active_only)
		*sleep_khz = 0;
	else
		*sleep_khz = *active_khz;
}

static int rpm_clk_prepare(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	uint32_t value;
	int rc = 0;
	unsigned long this_khz, this_sleep_khz;
	unsigned long peer_khz = 0, peer_sleep_khz = 0;
	struct rpm_clk *peer = r->peer;

	mutex_lock(&rpm_clock_lock);

	to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);

	/* Don't send requests to the RPM if the rate has not been set. */
	if (this_khz == 0)
		goto out;

	/* Take peer clock's rate into account only if it's enabled. */
	if (peer->enabled)
		to_active_sleep_khz(peer, peer->c.rate,
				&peer_khz, &peer_sleep_khz);

	value = max(this_khz, peer_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_active(r, value);
	if (rc)
		goto out;

	value = max(this_sleep_khz, peer_sleep_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_sleep(r, value);
	if (rc) {
		/* Undo the active set vote and restore it to peer_khz */
		value = peer_khz;
		rc = clk_rpmrs_set_rate_active(r, value);
	}

out:
	if (!rc)
		r->enabled = true;

	mutex_unlock(&rpm_clock_lock);

	return rc;
}

static void rpm_clk_unprepare(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);

	mutex_lock(&rpm_clock_lock);

	if (r->c.rate) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;
		int rc;

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled)
			to_active_sleep_khz(peer, peer->c.rate,
				&peer_khz, &peer_sleep_khz);

		value = r->branch ? !!peer_khz : peer_khz;
		rc = clk_rpmrs_set_rate_active(r, value);
		if (rc)
			goto out;

		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
		rc = clk_rpmrs_set_rate_sleep(r, value);
	}
	r->enabled = false;
out:
	mutex_unlock(&rpm_clock_lock);

	return;
}

static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	unsigned long this_khz, this_sleep_khz;
	int rc = 0;

	mutex_lock(&rpm_clock_lock);

	if (r->enabled) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;

		to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled)
			to_active_sleep_khz(peer, peer->c.rate,
					&peer_khz, &peer_sleep_khz);

		value = max(this_khz, peer_khz);
		rc = clk_rpmrs_set_rate_active(r, value);
		if (rc)
			goto out;

		value = max(this_sleep_khz, peer_sleep_khz);
		rc = clk_rpmrs_set_rate_sleep(r, value);
	}

out:
	mutex_unlock(&rpm_clock_lock);

	return rc;
}

static int rpm_branch_clk_set_rate(struct clk *clk, unsigned long rate)
{
	if (rate == clk->rate)
		return 0;

	return -EPERM;
}

static unsigned long rpm_clk_get_rate(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	if (r->rpmrs_data->get_rate_fn)
		return r->rpmrs_data->get_rate_fn(r);
	else
		return clk->rate;
}

static int rpm_clk_is_enabled(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	return r->rpmrs_data->is_enabled(r);
}

static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
{
	/* Not supported. */
	return rate;
}

static bool rpm_clk_is_local(struct clk *clk)
{
	return false;
}

static enum handoff rpm_clk_handoff(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	int rc;

	/*
	 * Querying an RPM clock's status will return 0 unless the clock's
	 * rate has previously been set through the RPM. When handing off,
	 * assume these clocks are enabled (unless the RPM call fails) so
	 * child clocks of these RPM clocks can still be handed off.
	 */
	rc  = r->rpmrs_data->handoff_fn(r);
	if (rc < 0)
		return HANDOFF_DISABLED_CLK;

	/*
	 * Since RPM handoff code may update the software rate of the clock by
	 * querying the RPM, we need to make sure our request to RPM now
	 * matches the software rate of the clock. When we send the request
	 * to RPM, we also need to update any other state info we would
	 * normally update. So, call the appropriate clock function instead
	 * of directly using the RPM driver APIs.
	 */
	rc = rpm_clk_prepare(clk);
	if (rc < 0)
		return HANDOFF_DISABLED_CLK;

	return HANDOFF_ENABLED_CLK;
}

#define RPM_MISC_CLK_TYPE	0x306b6c63
#define RPM_SCALING_ENABLE_ID	0x2

void enable_rpm_scaling(void)
{
	int rc, value = 0x1;
	struct msm_rpm_kvp kvp = {
		.key = RPM_SMD_KEY_ENABLE,
		.data = (void *)&value,
		.length = sizeof(value),
	};

	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
	WARN(rc < 0, "RPM clock scaling (sleep set) did not enable!\n");

	rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
			RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
	WARN(rc < 0, "RPM clock scaling (active set) did not enable!\n");
}

struct clk_ops clk_ops_rpm = {
	.prepare = rpm_clk_prepare,
	.unprepare = rpm_clk_unprepare,
	.set_rate = rpm_clk_set_rate,
	.get_rate = rpm_clk_get_rate,
	.is_enabled = rpm_clk_is_enabled,
	.round_rate = rpm_clk_round_rate,
	.is_local = rpm_clk_is_local,
	.handoff = rpm_clk_handoff,
};

struct clk_ops clk_ops_rpm_branch = {
	.prepare = rpm_clk_prepare,
	.unprepare = rpm_clk_unprepare,
	.set_rate = rpm_branch_clk_set_rate,
	.is_local = rpm_clk_is_local,
	.handoff = rpm_clk_handoff,
};
Esempio n. 30
0
/* read CONFIG_NAND_SUNXI_ECC_STEP bytes from real_addr to temp_buf */
void
nand_read_block(struct sunxi_nand *nand, phys_addr_t src, dma_addr_t dst,
                int syndrome)
{
    struct sunxi_dma * const dma = (struct sunxi_dma *)SUNXI_DMA_BASE;
    struct sunxi_dma_cfg * const dma_cfg = &dma->ddma[0];

    uint32_t shift;
    uint32_t page;
    uint32_t addr;
    uint32_t oob_offset;
    uint32_t ecc_bytes;
    u32 val;
    u32 cmd;

    page = src / CONFIG_NAND_SUNXI_PAGE_SIZE;
    if (page > 0xFFFF) {
        /* TODO: currently this is not supported */
        printf("Reading from address >= %08X is not allowed.\n",
               0xFFFF * CONFIG_NAND_SUNXI_PAGE_SIZE);
        return;
    }

    shift = src % CONFIG_NAND_SUNXI_PAGE_SIZE;
    writel(0, &nand->ecc_st);

    /* ECC_CTL, randomization */
    ecc_bytes = CONFIG_NAND_SUNXI_ECC_STRENGTH *
                fls(CONFIG_NAND_SUNXI_ECC_STEP * 8);
    ecc_bytes = DIV_ROUND_UP(ecc_bytes, 8);
    ecc_bytes += (ecc_bytes & 1); /* Align to 2-bytes */
    ecc_bytes += 4;

    nand_config_ecc(nand, page, syndrome);
    if (syndrome) {
        /* shift every 1kB in syndrome */
        shift += (shift / CONFIG_NAND_SUNXI_ECC_STEP) * ecc_bytes;
        oob_offset = CONFIG_NAND_SUNXI_ECC_STEP + shift;
    } else {
        oob_offset = CONFIG_NAND_SUNXI_PAGE_SIZE  +
                     (shift / CONFIG_NAND_SUNXI_ECC_STEP) * ecc_bytes;
    }

    addr = (page << 16) | shift;

    /* DMA */
    val = readl(&nand->ctl);
    writel(val | SUNXI_NAND_CTL_RAM_METHOD_DMA, &nand->ctl);

    writel(oob_offset, &nand->spare_area);

    /* DMAC
     * \todo Separate this into a tidy driver */
    writel(0x0, &dma->irq_en); /* clear dma interrupts */
    writel((uint32_t) &nand->io_data , &dma_cfg->src_addr);
    writel(dst            , &dma_cfg->dst_addr);
    writel(0x00007F0F     , &dma_cfg->ddma_para);
    writel(CONFIG_NAND_SUNXI_ECC_STEP, &dma_cfg->bc);

    val = 	SUNXI_DMA_CTL_SRC_DRQ(DDMA_SRC_DRQ_NAND) |
            SUNXI_DMA_CTL_MODE_IO |
            SUNXI_DMA_CTL_SRC_DATA_WIDTH_32 |
            SUNXI_DMA_CTL_DST_DRQ(DDMA_DST_DRQ_SDRAM) |
            SUNXI_DMA_CTL_DST_DATA_WIDTH_32 |
            SUNXI_DMA_CTL_TRIGGER;
    writel(val, &dma_cfg->ctl);

    writel(0x00E00530, &nand->rcmd_set);
    nand_wait_timeout(&nand->st, SUNXI_NAND_ST_FIFO_FULL, 0);

    writel(1   , &nand->block_num);
    writel(addr, &nand->addr_low);
    writel(0   , &nand->addr_high);

    /* CMD (PAGE READ) */
    cmd = 0x85E80000;
    cmd |= SUNXI_NAND_CMD_ADDR_CYCLES(CONFIG_NAND_SUNXI_ADDR_CYCLES);
    cmd |= (syndrome ? SUNXI_NAND_CMD_ORDER_SEQ :
            SUNXI_NAND_CMD_ORDER_INTERLEAVE);
    writel(cmd, &nand->cmd);

    if(nand_wait_timeout(&nand->st, SUNXI_NAND_ST_DMA_INT,
                         SUNXI_NAND_ST_DMA_INT)) {
        printf("NAND timeout reading data\n");
        return;
    }

    if(nand_wait_timeout(&dma_cfg->ctl, SUNXI_DMA_CTL_TRIGGER, 0)) {
        printf("NAND timeout reading data\n");
        return;
    }

    if (readl(&nand->ecc_st))
        ecc_errors++;
}