Beispiel #1
0
/*
 * Set the timer
 */
void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
		       ktime_t now)
{
	unsigned long t_j, now_j = jiffies;
	ktime_t t;
	bool queue = false;

	if (call->state < RXRPC_CALL_COMPLETE) {
		t = call->expire_at;
		if (!ktime_after(t, now)) {
			trace_rxrpc_timer(call, why, now, now_j);
			queue = true;
			goto out;
		}

		if (!ktime_after(call->resend_at, now)) {
			call->resend_at = call->expire_at;
			if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
				queue = true;
		} else if (ktime_before(call->resend_at, t)) {
			t = call->resend_at;
		}

		if (!ktime_after(call->ack_at, now)) {
			call->ack_at = call->expire_at;
			if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
				queue = true;
		} else if (ktime_before(call->ack_at, t)) {
			t = call->ack_at;
		}

		if (!ktime_after(call->ping_at, now)) {
			call->ping_at = call->expire_at;
			if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
				queue = true;
		} else if (ktime_before(call->ping_at, t)) {
			t = call->ping_at;
		}

		t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
		t_j += jiffies;

		/* We have to make sure that the calculated jiffies value falls
		 * at or after the nsec value, or we may loop ceaselessly
		 * because the timer times out, but we haven't reached the nsec
		 * timeout yet.
		 */
		t_j++;

		if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
			mod_timer(&call->timer, t_j);
			trace_rxrpc_timer(call, why, now, now_j);
		}
	}

out:
	if (queue)
		rxrpc_queue_call(call);
}
Beispiel #2
0
static int do_xsync(int argc, char ** argv)
{
	struct xsync_ctx_t ctx;
	ktime_t timeout = ktime_add_ms(ktime_get(), 3000);
	int c;

	ctx.state = PACKET_STATE_HEADER0;
	ctx.index = 0;
	ctx.fd = -1;
	ctx.quit = 0;

	while(ctx.quit == 0)
	{
		if((c = getchar()) < 0)
		{
			if(ktime_after(ktime_get(), timeout))
			{
				ctx.quit = 1;
				if(ctx.fd > 0)
				{
					vfs_close(ctx.fd);
					ctx.fd = -1;
				}
			}
			continue;
		}

		if(xsync_get(&ctx, c) < 0)
			continue;

		xsync_handle(&ctx);
		timeout = ktime_add_ms(ktime_get(), 3000);
	}
	return 0;
}
Beispiel #3
0
static int __watchdog_ping(struct watchdog_device *wdd)
{
	struct watchdog_core_data *wd_data = wdd->wd_data;
	ktime_t earliest_keepalive, now;
	int err;

	earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
				       ms_to_ktime(wdd->min_hw_heartbeat_ms));
	now = ktime_get();

	if (ktime_after(earliest_keepalive, now)) {
		hrtimer_start(&wd_data->timer,
			      ktime_sub(earliest_keepalive, now),
			      HRTIMER_MODE_REL);
		return 0;
	}

	wd_data->last_hw_keepalive = now;

	if (wdd->ops->ping)
		err = wdd->ops->ping(wdd);  /* ping the watchdog */
	else
		err = wdd->ops->start(wdd); /* restart watchdog */

	watchdog_update_worker(wdd);

	return err;
}
Beispiel #4
0
static int check_pcc_chan(void)
{
	int ret = -EIO;
	struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
	ktime_t next_deadline = ktime_add(ktime_get(), deadline);

	/* Retry in case the remote processor was too slow to catch up. */
	while (!ktime_after(ktime_get(), next_deadline)) {
		/*
		 * Per spec, prior to boot the PCC space wil be initialized by
		 * platform and should have set the command completion bit when
		 * PCC can be used by OSPM
		 */
		if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
			ret = 0;
			break;
		}
		/*
		 * Reducing the bus traffic in case this loop takes longer than
		 * a few retries.
		 */
		udelay(3);
	}

	return ret;
}
Beispiel #5
0
static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
{
	ktime_t kt_deadline;

	if (!md->seq_rq_merge_deadline_usecs)
		return false;

	kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
	kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);

	return !ktime_after(ktime_get(), kt_deadline);
}
Beispiel #6
0
static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask)
{
	ktime_t timeout = ktime_add_us(ktime_get(), 100);
	bool failed;
	u8 val = 0;

	while (1) {
		failed = ktime_after(ktime_get(), timeout);
		val = sdhci_readw(host, PHY_ADDR_REG);
		if (!(val & mask))
			return 0;
		if (failed)
			return -EBUSY;
	}
}
Beispiel #7
0
static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask)
{
	int ret;
	ktime_t timeout = ktime_add_us(ktime_get(), 100);
	bool failed;
	u8 val = 0;

	while (1) {
		failed = ktime_after(ktime_get(), timeout);
		ret = arasan_phy_read(host, offset, &val);
		if (ret)
			return -EBUSY;
		else if (val & mask)
			return 0;
		if (failed)
			return -EBUSY;
	}
}
Beispiel #8
0
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
{
	int ret = -EIO, status = 0;
	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
		pcc_ss_data->pcc_comm_addr;
	ktime_t next_deadline = ktime_add(ktime_get(),
					  pcc_ss_data->deadline);

	if (!pcc_ss_data->platform_owns_pcc)
		return 0;

	/* Retry in case the remote processor was too slow to catch up. */
	while (!ktime_after(ktime_get(), next_deadline)) {
		/*
		 * Per spec, prior to boot the PCC space wil be initialized by
		 * platform and should have set the command completion bit when
		 * PCC can be used by OSPM
		 */
		status = readw_relaxed(&generic_comm_base->status);
		if (status & PCC_CMD_COMPLETE_MASK) {
			ret = 0;
			if (chk_err_bit && (status & PCC_ERROR_MASK))
				ret = -EIO;
			break;
		}
		/*
		 * Reducing the bus traffic in case this loop takes longer than
		 * a few retries.
		 */
		udelay(3);
	}

	if (likely(!ret))
		pcc_ss_data->platform_owns_pcc = false;
	else
		pr_err("PCC check channel failed. Status=%x\n", status);

	return ret;
}
Beispiel #9
0
static int acpi_lid_notify_state(struct acpi_device *device, int state)
{
	struct acpi_button *button = acpi_driver_data(device);
	int ret;
	ktime_t next_report;
	bool do_update;

	/*
	 * In lid_init_state=ignore mode, if user opens/closes lid
	 * frequently with "open" missing, and "last_time" is also updated
	 * frequently, "close" cannot be delivered to the userspace.
	 * So "last_time" is only updated after a timeout or an actual
	 * switch.
	 */
	if (lid_init_state != ACPI_BUTTON_LID_INIT_IGNORE ||
	    button->last_state != !!state)
		do_update = true;
	else
		do_update = false;

	next_report = ktime_add(button->last_time,
				ms_to_ktime(lid_report_interval));
	if (button->last_state == !!state &&
	    ktime_after(ktime_get(), next_report)) {
		/* Complain the buggy firmware */
		pr_warn_once("The lid device is not compliant to SW_LID.\n");

		/*
		 * Send the unreliable complement switch event:
		 *
		 * On most platforms, the lid device is reliable. However
		 * there are exceptions:
		 * 1. Platforms returning initial lid state as "close" by
		 *    default after booting/resuming:
		 *     https://bugzilla.kernel.org/show_bug.cgi?id=89211
		 *     https://bugzilla.kernel.org/show_bug.cgi?id=106151
		 * 2. Platforms never reporting "open" events:
		 *     https://bugzilla.kernel.org/show_bug.cgi?id=106941
		 * On these buggy platforms, the usage model of the ACPI
		 * lid device actually is:
		 * 1. The initial returning value of _LID may not be
		 *    reliable.
		 * 2. The open event may not be reliable.
		 * 3. The close event is reliable.
		 *
		 * But SW_LID is typed as input switch event, the input
		 * layer checks if the event is redundant. Hence if the
		 * state is not switched, the userspace cannot see this
		 * platform triggered reliable event. By inserting a
		 * complement switch event, it then is guaranteed that the
		 * platform triggered reliable one can always be seen by
		 * the userspace.
		 */
		if (lid_init_state == ACPI_BUTTON_LID_INIT_IGNORE) {
			do_update = true;
			/*
			 * Do generate complement switch event for "close"
			 * as "close" is reliable and wrong "open" won't
			 * trigger unexpected behaviors.
			 * Do not generate complement switch event for
			 * "open" as "open" is not reliable and wrong
			 * "close" will trigger unexpected behaviors.
			 */
			if (!state) {
				input_report_switch(button->input,
						    SW_LID, state);
				input_sync(button->input);
			}
		}
	}
	/* Send the platform triggered reliable event */
	if (do_update) {
		acpi_handle_debug(device->handle, "ACPI LID %s\n",
				  state ? "open" : "closed");
		input_report_switch(button->input, SW_LID, !state);
		input_sync(button->input);
		button->last_state = !!state;
		button->last_time = ktime_get();
	}

	if (state)
		acpi_pm_wakeup_event(&device->dev);

	ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
	if (ret == NOTIFY_DONE)
		ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
						   device);
	if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
		/*
		 * It is also regarded as success if the notifier_chain
		 * returns NOTIFY_OK or NOTIFY_DONE.
		 */
		ret = 0;
	}
	return ret;
}
Beispiel #10
0
static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
{
	ktime_t cur = ktime_get();

	return hard_acs_rdy(lp) || ktime_after(cur, timeout);
}
Beispiel #11
0
static void midi_port_work(struct work_struct *work)
{
	struct snd_fw_async_midi_port *port =
			container_of(work, struct snd_fw_async_midi_port, work);
	struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
	int generation;
	int type;

	/* Under transacting or error state. */
	if (!port->idling || port->error)
		return;

	/* Nothing to do. */
	if (substream == NULL || snd_rawmidi_transmit_empty(substream))
		return;

	/* Do it in next chance. */
	if (ktime_after(port->next_ktime, ktime_get())) {
		schedule_work(&port->work);
		return;
	}

	/*
	 * Fill the buffer. The callee must use snd_rawmidi_transmit_peek().
	 * Later, snd_rawmidi_transmit_ack() is called.
	 */
	memset(port->buf, 0, port->len);
	port->consume_bytes = port->fill(substream, port->buf);
	if (port->consume_bytes <= 0) {
		/* Do it in next chance, immediately. */
		if (port->consume_bytes == 0) {
			port->next_ktime = ktime_set(0, 0);
			schedule_work(&port->work);
		} else {
			/* Fatal error. */
			port->error = true;
		}
		return;
	}

	/* Calculate type of transaction. */
	if (port->len == 4)
		type = TCODE_WRITE_QUADLET_REQUEST;
	else
		type = TCODE_WRITE_BLOCK_REQUEST;

	/* Set interval to next transaction. */
	port->next_ktime = ktime_add_ns(ktime_get(),
				port->consume_bytes * 8 * NSEC_PER_SEC / 31250);

	/* Start this transaction. */
	port->idling = false;

	/*
	 * In Linux FireWire core, when generation is updated with memory
	 * barrier, node id has already been updated. In this module, After
	 * this smp_rmb(), load/store instructions to memory are completed.
	 * Thus, both of generation and node id are available with recent
	 * values. This is a light-serialization solution to handle bus reset
	 * events on IEEE 1394 bus.
	 */
	generation = port->parent->generation;
	smp_rmb();

	fw_send_request(port->parent->card, &port->transaction, type,
			port->parent->node_id, generation,
			port->parent->max_speed, port->addr,
			port->buf, port->len, async_midi_port_callback,
			port);
}
Beispiel #12
0
/*
 * Perform retransmission of NAK'd and unack'd packets.
 */
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
{
	struct rxrpc_skb_priv *sp;
	struct sk_buff *skb;
	rxrpc_seq_t cursor, seq, top;
	ktime_t max_age, oldest, ack_ts;
	int ix;
	u8 annotation, anno_type, retrans = 0, unacked = 0;

	_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);

	max_age = ktime_sub_ms(now, rxrpc_resend_timeout);

	spin_lock_bh(&call->lock);

	cursor = call->tx_hard_ack;
	top = call->tx_top;
	ASSERT(before_eq(cursor, top));
	if (cursor == top)
		goto out_unlock;

	/* Scan the packet list without dropping the lock and decide which of
	 * the packets in the Tx buffer we're going to resend and what the new
	 * resend timeout will be.
	 */
	oldest = now;
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		annotation &= ~RXRPC_TX_ANNO_MASK;
		if (anno_type == RXRPC_TX_ANNO_ACK)
			continue;

		skb = call->rxtx_buffer[ix];
		rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
		sp = rxrpc_skb(skb);

		if (anno_type == RXRPC_TX_ANNO_UNACK) {
			if (ktime_after(skb->tstamp, max_age)) {
				if (ktime_before(skb->tstamp, oldest))
					oldest = skb->tstamp;
				continue;
			}
			if (!(annotation & RXRPC_TX_ANNO_RESENT))
				unacked++;
		}

		/* Okay, we need to retransmit a packet. */
		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
		retrans++;
		trace_rxrpc_retransmit(call, seq, annotation | anno_type,
				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
	}

	call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);

	if (unacked)
		rxrpc_congestion_timeout(call);

	/* If there was nothing that needed retransmission then it's likely
	 * that an ACK got lost somewhere.  Send a ping to find out instead of
	 * retransmitting data.
	 */
	if (!retrans) {
		rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
		spin_unlock_bh(&call->lock);
		ack_ts = ktime_sub(now, call->acks_latest_ts);
		if (ktime_to_ns(ack_ts) < call->peer->rtt)
			goto out;
		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
				  rxrpc_propose_ack_ping_for_lost_ack);
		rxrpc_send_ack_packet(call, true);
		goto out;
	}

	/* Now go through the Tx window and perform the retransmissions.  We
	 * have to drop the lock for each send.  If an ACK comes in whilst the
	 * lock is dropped, it may clear some of the retransmission markers for
	 * packets that it soft-ACKs.
	 */
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		if (anno_type != RXRPC_TX_ANNO_RETRANS)
			continue;

		skb = call->rxtx_buffer[ix];
		rxrpc_get_skb(skb, rxrpc_skb_tx_got);
		spin_unlock_bh(&call->lock);

		if (rxrpc_send_data_packet(call, skb, true) < 0) {
			rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
			return;
		}

		if (rxrpc_is_client_call(call))
			rxrpc_expose_client_call(call);

		rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
		spin_lock_bh(&call->lock);

		/* We need to clear the retransmit state, but there are two
		 * things we need to be aware of: A new ACK/NAK might have been
		 * received and the packet might have been hard-ACK'd (in which
		 * case it will no longer be in the buffer).
		 */
		if (after(seq, call->tx_hard_ack)) {
			annotation = call->rxtx_annotations[ix];
			anno_type = annotation & RXRPC_TX_ANNO_MASK;
			if (anno_type == RXRPC_TX_ANNO_RETRANS ||
			    anno_type == RXRPC_TX_ANNO_NAK) {
				annotation &= ~RXRPC_TX_ANNO_MASK;
				annotation |= RXRPC_TX_ANNO_UNACK;
			}
			annotation |= RXRPC_TX_ANNO_RESENT;
			call->rxtx_annotations[ix] = annotation;
		}

		if (after(call->tx_hard_ack, seq))
			seq = call->tx_hard_ack;
	}

out_unlock:
	spin_unlock_bh(&call->lock);
out:
	_leave("");
}