Example #1
0
static int do_xsync(int argc, char ** argv)
{
	struct xsync_ctx_t ctx;
	ktime_t timeout = ktime_add_ms(ktime_get(), 3000);
	int c;

	ctx.state = PACKET_STATE_HEADER0;
	ctx.index = 0;
	ctx.fd = -1;
	ctx.quit = 0;

	while(ctx.quit == 0)
	{
		if((c = getchar()) < 0)
		{
			if(ktime_after(ktime_get(), timeout))
			{
				ctx.quit = 1;
				if(ctx.fd > 0)
				{
					vfs_close(ctx.fd);
					ctx.fd = -1;
				}
			}
			continue;
		}

		if(xsync_get(&ctx, c) < 0)
			continue;

		xsync_handle(&ctx);
		timeout = ktime_add_ms(ktime_get(), 3000);
	}
	return 0;
}
Example #2
0
static bool_t rk3128_spi_wait_till_not_busy(struct spi_rk3128_pdata_t * pdat)
{
	ktime_t timeout = ktime_add_ms(ktime_get(), 1);

	do {
		if(!(read32(pdat->virt + SPI_SR) & (1 << 0)))
			return TRUE;
	} while(ktime_before(ktime_get(), timeout));

	return FALSE;
}
Example #3
0
/*
 * Initiate the call ack/resend/expiry timer.
 */
static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
	ktime_t now = ktime_get_real(), expire_at;

	expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
	call->expire_at = expire_at;
	call->ack_at = expire_at;
	call->ping_at = expire_at;
	call->resend_at = expire_at;
	call->timer.expires = jiffies + LONG_MAX / 2;
	rxrpc_set_timer(call, rxrpc_timer_begin, now);
}
Example #4
0
/*
 * Propose a PING ACK be sent.
 */
static void rxrpc_propose_ping(struct rxrpc_call *call,
			       bool immediate, bool background)
{
	if (immediate) {
		if (background &&
		    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
			rxrpc_queue_call(call);
	} else {
		ktime_t now = ktime_get_real();
		ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);

		if (ktime_before(ping_at, call->ping_at)) {
			call->ping_at = ping_at;
			rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
		}
	}
}
Example #5
0
static int decompress(struct nx842_crypto_ctx *ctx,
		      struct nx842_crypto_param *p,
		      struct nx842_crypto_header_group *g,
		      struct nx842_constraints *c,
		      u16 ignore)
{
	unsigned int slen = be32_to_cpu(g->compressed_length);
	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
	unsigned int dlen = p->oremain, tmplen;
	unsigned int adj_slen = slen;
	u8 *src = p->in, *dst = p->out;
	u16 padding = be16_to_cpu(g->padding);
	int ret, spadding = 0;
	ktime_t timeout;

	if (!slen || !required_len)
		return -EINVAL;

	if (p->iremain <= 0 || padding + slen > p->iremain)
		return -EOVERFLOW;

	if (p->oremain <= 0 || required_len - ignore > p->oremain)
		return -ENOSPC;

	src += padding;

	if (slen % c->multiple)
		adj_slen = round_up(slen, c->multiple);
	if (slen < c->minimum)
		adj_slen = c->minimum;
	if (slen > c->maximum)
		goto usesw;
	if (slen < adj_slen || (u64)src % c->alignment) {
		/* we can append padding bytes because the 842 format defines
		 * an "end" template (see lib/842/842_decompress.c) and will
		 * ignore any bytes following it.
		 */
		if (slen < adj_slen)
			memset(ctx->sbounce + slen, 0, adj_slen - slen);
		memcpy(ctx->sbounce, src, slen);
		src = ctx->sbounce;
		spadding = adj_slen - slen;
		slen = adj_slen;
		pr_debug("using decomp sbounce buffer, len %x\n", slen);
	}

	if (dlen % c->multiple)
		dlen = round_down(dlen, c->multiple);
	if (dlen < required_len || (u64)dst % c->alignment) {
		dst = ctx->dbounce;
		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
	}
	if (dlen < c->minimum)
		goto usesw;
	if (dlen > c->maximum)
		dlen = c->maximum;

	tmplen = dlen;
	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
	do {
		dlen = tmplen; /* reset dlen, if we're retrying */
		ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
	if (ret) {
usesw:
		/* reset everything, sw doesn't have constraints */
		src = p->in + padding;
		slen = be32_to_cpu(g->compressed_length);
		spadding = 0;
		dst = p->out;
		dlen = p->oremain;
		if (dlen < required_len) { /* have ignore bytes */
			dst = ctx->dbounce;
			dlen = BOUNCE_BUFFER_SIZE;
		}
		pr_info_ratelimited("using software 842 decompression\n");
		ret = sw842_decompress(src, slen, dst, &dlen);
	}
	if (ret)
		return ret;

	slen -= spadding;

	dlen -= ignore;
	if (ignore)
		pr_debug("ignoring last %x bytes\n", ignore);

	if (dst == ctx->dbounce)
		memcpy(p->out, dst, dlen);

	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
		 slen, padding, dlen, ignore);

	return update_param(p, slen + padding, dlen);
}
Example #6
0
static int compress(struct nx842_crypto_ctx *ctx,
		    struct nx842_crypto_param *p,
		    struct nx842_crypto_header_group *g,
		    struct nx842_constraints *c,
		    u16 *ignore,
		    unsigned int hdrsize)
{
	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
	unsigned int adj_slen = slen;
	u8 *src = p->in, *dst = p->out;
	int ret, dskip = 0;
	ktime_t timeout;

	if (p->iremain == 0)
		return -EOVERFLOW;

	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
		return -ENOSPC;

	if (slen % c->multiple)
		adj_slen = round_up(slen, c->multiple);
	if (slen < c->minimum)
		adj_slen = c->minimum;
	if (slen > c->maximum)
		adj_slen = slen = c->maximum;
	if (adj_slen > slen || (u64)src % c->alignment) {
		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
		slen = min(slen, BOUNCE_BUFFER_SIZE);
		if (adj_slen > slen)
			memset(ctx->sbounce + slen, 0, adj_slen - slen);
		memcpy(ctx->sbounce, src, slen);
		src = ctx->sbounce;
		slen = adj_slen;
		pr_debug("using comp sbounce buffer, len %x\n", slen);
	}

	dst += hdrsize;
	dlen -= hdrsize;

	if ((u64)dst % c->alignment) {
		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
		dst += dskip;
		dlen -= dskip;
	}
	if (dlen % c->multiple)
		dlen = round_down(dlen, c->multiple);
	if (dlen < c->minimum) {
nospc:
		dst = ctx->dbounce;
		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
		dlen = round_down(dlen, c->multiple);
		dskip = 0;
		pr_debug("using comp dbounce buffer, len %x\n", dlen);
	}
	if (dlen > c->maximum)
		dlen = c->maximum;

	tmplen = dlen;
	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
	do {
		dlen = tmplen; /* reset dlen, if we're retrying */
		ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
		/* possibly we should reduce the slen here, instead of
		 * retrying with the dbounce buffer?
		 */
		if (ret == -ENOSPC && dst != ctx->dbounce)
			goto nospc;
	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
	if (ret)
		return ret;

	dskip += hdrsize;

	if (dst == ctx->dbounce)
		memcpy(p->out + dskip, dst, dlen);

	g->padding = cpu_to_be16(dskip);
	g->compressed_length = cpu_to_be32(dlen);
	g->uncompressed_length = cpu_to_be32(slen);

	if (p->iremain < slen) {
		*ignore = slen - p->iremain;
		slen = p->iremain;
	}

	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
		 slen, *ignore, dlen, dskip);

	return update_param(p, slen, dskip + dlen);
}
Example #7
0
/*
 * Perform retransmission of NAK'd and unack'd packets.
 */
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
{
	struct rxrpc_skb_priv *sp;
	struct sk_buff *skb;
	rxrpc_seq_t cursor, seq, top;
	ktime_t max_age, oldest, ack_ts;
	int ix;
	u8 annotation, anno_type, retrans = 0, unacked = 0;

	_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);

	max_age = ktime_sub_ms(now, rxrpc_resend_timeout);

	spin_lock_bh(&call->lock);

	cursor = call->tx_hard_ack;
	top = call->tx_top;
	ASSERT(before_eq(cursor, top));
	if (cursor == top)
		goto out_unlock;

	/* Scan the packet list without dropping the lock and decide which of
	 * the packets in the Tx buffer we're going to resend and what the new
	 * resend timeout will be.
	 */
	oldest = now;
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		annotation &= ~RXRPC_TX_ANNO_MASK;
		if (anno_type == RXRPC_TX_ANNO_ACK)
			continue;

		skb = call->rxtx_buffer[ix];
		rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
		sp = rxrpc_skb(skb);

		if (anno_type == RXRPC_TX_ANNO_UNACK) {
			if (ktime_after(skb->tstamp, max_age)) {
				if (ktime_before(skb->tstamp, oldest))
					oldest = skb->tstamp;
				continue;
			}
			if (!(annotation & RXRPC_TX_ANNO_RESENT))
				unacked++;
		}

		/* Okay, we need to retransmit a packet. */
		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
		retrans++;
		trace_rxrpc_retransmit(call, seq, annotation | anno_type,
				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
	}

	call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);

	if (unacked)
		rxrpc_congestion_timeout(call);

	/* If there was nothing that needed retransmission then it's likely
	 * that an ACK got lost somewhere.  Send a ping to find out instead of
	 * retransmitting data.
	 */
	if (!retrans) {
		rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
		spin_unlock_bh(&call->lock);
		ack_ts = ktime_sub(now, call->acks_latest_ts);
		if (ktime_to_ns(ack_ts) < call->peer->rtt)
			goto out;
		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
				  rxrpc_propose_ack_ping_for_lost_ack);
		rxrpc_send_ack_packet(call, true);
		goto out;
	}

	/* Now go through the Tx window and perform the retransmissions.  We
	 * have to drop the lock for each send.  If an ACK comes in whilst the
	 * lock is dropped, it may clear some of the retransmission markers for
	 * packets that it soft-ACKs.
	 */
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		if (anno_type != RXRPC_TX_ANNO_RETRANS)
			continue;

		skb = call->rxtx_buffer[ix];
		rxrpc_get_skb(skb, rxrpc_skb_tx_got);
		spin_unlock_bh(&call->lock);

		if (rxrpc_send_data_packet(call, skb, true) < 0) {
			rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
			return;
		}

		if (rxrpc_is_client_call(call))
			rxrpc_expose_client_call(call);

		rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
		spin_lock_bh(&call->lock);

		/* We need to clear the retransmit state, but there are two
		 * things we need to be aware of: A new ACK/NAK might have been
		 * received and the packet might have been hard-ACK'd (in which
		 * case it will no longer be in the buffer).
		 */
		if (after(seq, call->tx_hard_ack)) {
			annotation = call->rxtx_annotations[ix];
			anno_type = annotation & RXRPC_TX_ANNO_MASK;
			if (anno_type == RXRPC_TX_ANNO_RETRANS ||
			    anno_type == RXRPC_TX_ANNO_NAK) {
				annotation &= ~RXRPC_TX_ANNO_MASK;
				annotation |= RXRPC_TX_ANNO_UNACK;
			}
			annotation |= RXRPC_TX_ANNO_RESENT;
			call->rxtx_annotations[ix] = annotation;
		}

		if (after(call->tx_hard_ack, seq))
			seq = call->tx_hard_ack;
	}

out_unlock:
	spin_unlock_bh(&call->lock);
out:
	_leave("");
}
Example #8
0
/*
 * propose an ACK be sent
 */
static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
				u16 skew, u32 serial, bool immediate,
				bool background,
				enum rxrpc_propose_ack_trace why)
{
	enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
	unsigned int expiry = rxrpc_soft_ack_delay;
	ktime_t now, ack_at;
	s8 prior = rxrpc_ack_priority[ack_reason];

	/* Pings are handled specially because we don't want to accidentally
	 * lose a ping response by subsuming it into a ping.
	 */
	if (ack_reason == RXRPC_ACK_PING) {
		rxrpc_propose_ping(call, immediate, background);
		goto trace;
	}

	/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
	 * numbers, but we don't alter the timeout.
	 */
	_debug("prior %u %u vs %u %u",
	       ack_reason, prior,
	       call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
	if (ack_reason == call->ackr_reason) {
		if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
			outcome = rxrpc_propose_ack_update;
			call->ackr_serial = serial;
			call->ackr_skew = skew;
		}
		if (!immediate)
			goto trace;
	} else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
		call->ackr_reason = ack_reason;
		call->ackr_serial = serial;
		call->ackr_skew = skew;
	} else {
		outcome = rxrpc_propose_ack_subsume;
	}

	switch (ack_reason) {
	case RXRPC_ACK_REQUESTED:
		if (rxrpc_requested_ack_delay < expiry)
			expiry = rxrpc_requested_ack_delay;
		if (serial == 1)
			immediate = false;
		break;

	case RXRPC_ACK_DELAY:
		if (rxrpc_soft_ack_delay < expiry)
			expiry = rxrpc_soft_ack_delay;
		break;

	case RXRPC_ACK_IDLE:
		if (rxrpc_idle_ack_delay < expiry)
			expiry = rxrpc_idle_ack_delay;
		break;

	default:
		immediate = true;
		break;
	}

	if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
		_debug("already scheduled");
	} else if (immediate || expiry == 0) {
		_debug("immediate ACK %lx", call->events);
		if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
		    background)
			rxrpc_queue_call(call);
	} else {
		now = ktime_get_real();
		ack_at = ktime_add_ms(now, expiry);
		if (ktime_before(ack_at, call->ack_at)) {
			call->ack_at = ack_at;
			rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
		}
	}

trace:
	trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
				background, outcome);
}
Example #9
0
/*
 * send a packet through the transport endpoint
 */
int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
			   bool retrans)
{
	struct rxrpc_connection *conn = call->conn;
	struct rxrpc_wire_header whdr;
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	struct msghdr msg;
	struct kvec iov[2];
	rxrpc_serial_t serial;
	size_t len;
	bool lost = false;
	int ret, opt;

	_enter(",{%d}", skb->len);

	/* Each transmission of a Tx packet needs a new serial number */
	serial = atomic_inc_return(&conn->serial);

	whdr.epoch	= htonl(conn->proto.epoch);
	whdr.cid	= htonl(call->cid);
	whdr.callNumber	= htonl(call->call_id);
	whdr.seq	= htonl(sp->hdr.seq);
	whdr.serial	= htonl(serial);
	whdr.type	= RXRPC_PACKET_TYPE_DATA;
	whdr.flags	= sp->hdr.flags;
	whdr.userStatus	= 0;
	whdr.securityIndex = call->security_ix;
	whdr._rsvd	= htons(sp->hdr._rsvd);
	whdr.serviceId	= htons(call->service_id);

	iov[0].iov_base = &whdr;
	iov[0].iov_len = sizeof(whdr);
	iov[1].iov_base = skb->head;
	iov[1].iov_len = skb->len;
	len = iov[0].iov_len + iov[1].iov_len;

	msg.msg_name = &call->peer->srx.transport;
	msg.msg_namelen = call->peer->srx.transport_len;
	msg.msg_control = NULL;
	msg.msg_controllen = 0;
	msg.msg_flags = 0;

	/* If our RTT cache needs working on, request an ACK.  Also request
	 * ACKs if a DATA packet appears to have been lost.
	 */
	if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
	    (retrans ||
	     call->cong_mode == RXRPC_CALL_SLOW_START ||
	     (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
	     ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
			  ktime_get_real())))
		whdr.flags |= RXRPC_REQUEST_ACK;

	if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
		static int lose;
		if ((lose++ & 7) == 7) {
			ret = 0;
			lost = true;
			goto done;
		}
	}

	_proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);

	/* send the packet with the don't fragment bit set if we currently
	 * think it's small enough */
	if (iov[1].iov_len >= call->peer->maxdata)
		goto send_fragmentable;

	down_read(&conn->params.local->defrag_sem);
	/* send the packet by UDP
	 * - returns -EMSGSIZE if UDP would have to fragment the packet
	 *   to go out of the interface
	 *   - in which case, we'll have processed the ICMP error
	 *     message and update the peer record
	 */
	ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);

	up_read(&conn->params.local->defrag_sem);
	if (ret == -EMSGSIZE)
		goto send_fragmentable;

done:
	trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
			    retrans, lost);
	if (ret >= 0) {
		ktime_t now = ktime_get_real();
		skb->tstamp = now;
		smp_wmb();
		sp->hdr.serial = serial;
		if (whdr.flags & RXRPC_REQUEST_ACK) {
			call->peer->rtt_last_req = now;
			trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
		}
	}
	_leave(" = %d [%u]", ret, call->peer->maxdata);
	return ret;

send_fragmentable:
	/* attempt to send this message with fragmentation enabled */
	_debug("send fragment");

	down_write(&conn->params.local->defrag_sem);

	switch (conn->params.local->srx.transport.family) {
	case AF_INET:
		opt = IP_PMTUDISC_DONT;
		ret = kernel_setsockopt(conn->params.local->socket,
					SOL_IP, IP_MTU_DISCOVER,
					(char *)&opt, sizeof(opt));
		if (ret == 0) {
			ret = kernel_sendmsg(conn->params.local->socket, &msg,
					     iov, 2, len);

			opt = IP_PMTUDISC_DO;
			kernel_setsockopt(conn->params.local->socket, SOL_IP,
					  IP_MTU_DISCOVER,
					  (char *)&opt, sizeof(opt));
		}
		break;

#ifdef CONFIG_AF_RXRPC_IPV6
	case AF_INET6:
		opt = IPV6_PMTUDISC_DONT;
		ret = kernel_setsockopt(conn->params.local->socket,
					SOL_IPV6, IPV6_MTU_DISCOVER,
					(char *)&opt, sizeof(opt));
		if (ret == 0) {
			ret = kernel_sendmsg(conn->params.local->socket, &msg,
					     iov, 1, iov[0].iov_len);

			opt = IPV6_PMTUDISC_DO;
			kernel_setsockopt(conn->params.local->socket,
					  SOL_IPV6, IPV6_MTU_DISCOVER,
					  (char *)&opt, sizeof(opt));
		}
		break;
#endif
	}

	up_write(&conn->params.local->defrag_sem);
	goto done;
}
Example #10
0
/**
 * drm_atomic_helper_commit - commit validated state object
 * @dev: DRM device
 * @state: the driver state object
 * @async: asynchronous commit
 *
 * This function commits a with drm_atomic_helper_check() pre-validated state
 * object. This can still fail when e.g. the framebuffer reservation fails. For
 * now this doesn't implement asynchronous commits.
 *
 * RETURNS
 * Zero for success or -errno.
 */
int msm_atomic_commit(struct drm_device *dev,
		struct drm_atomic_state *state, bool async)
{
	int nplanes = dev->mode_config.num_total_plane;
	int ncrtcs = dev->mode_config.num_crtc;
	ktime_t timeout;
	struct msm_commit *c;
	int i, ret;

	ret = drm_atomic_helper_prepare_planes(dev, state);
	if (ret)
		return ret;

	c = commit_init(state);
	if (!c) {
		ret = -ENOMEM;
		goto error;
	}

	/*
	 * Figure out what crtcs we have:
	 */
	for (i = 0; i < ncrtcs; i++) {
		struct drm_crtc *crtc = state->crtcs[i];
		if (!crtc)
			continue;
		c->crtc_mask |= (1 << drm_crtc_index(crtc));
	}

	/*
	 * Figure out what fence to wait for:
	 */
	for (i = 0; i < nplanes; i++) {
		struct drm_plane *plane = state->planes[i];
		struct drm_plane_state *new_state = state->plane_states[i];

		if (!plane)
			continue;

		if ((plane->state->fb != new_state->fb) && new_state->fb)
			add_fb(c, new_state->fb);
	}

	/*
	 * Wait for pending updates on any of the same crtc's and then
	 * mark our set of crtc's as busy:
	 */
	ret = start_atomic(dev->dev_private, c->crtc_mask);
	if (ret) {
		kfree(c);
		goto error;
	}

	/*
	 * This is the point of no return - everything below never fails except
	 * when the hw goes bonghits. Which means we can commit the new state on
	 * the software side now.
	 */

	drm_atomic_helper_swap_state(dev, state);

	/*
	 * Everything below can be run asynchronously without the need to grab
	 * any modeset locks at all under one conditions: It must be guaranteed
	 * that the asynchronous work has either been cancelled (if the driver
	 * supports it, which at least requires that the framebuffers get
	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
	 * before the new state gets committed on the software side with
	 * drm_atomic_helper_swap_state().
	 *
	 * This scheme allows new atomic state updates to be prepared and
	 * checked in parallel to the asynchronous completion of the previous
	 * update. Which is important since compositors need to figure out the
	 * composition of the next frame right after having submitted the
	 * current layout.
	 */

	if (async) {
		msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
		return 0;
	}

	timeout = ktime_add_ms(ktime_get(), 1000);

	/* uninterruptible wait */
	msm_wait_fence(dev, c->fence, &timeout, false);

	complete_commit(c);

	return 0;

error:
	drm_atomic_helper_cleanup_planes(dev, state);
	return ret;
}