static void __noreturn pnv_power_off(void)
{
	long rc = OPAL_BUSY;

	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
		rc = opal_cec_power_down(0);
		if (rc == OPAL_BUSY_EVENT)
			opal_poll_events(NULL);
		else
			mdelay(10);
	}
	for (;;)
		opal_poll_events(NULL);
}
static void  __noreturn pnv_restart(char *cmd)
{
	long rc = OPAL_BUSY;

	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
		rc = opal_cec_reboot();
		if (rc == OPAL_BUSY_EVENT)
			opal_poll_events(NULL);
		else
			mdelay(10);
	}
	for (;;)
		opal_poll_events(NULL);
}
Exemple #3
0
void opal_shutdown(void)
{
	unsigned int i;
	long rc = OPAL_BUSY;

	/* First free interrupts, which will also mask them */
	for (i = 0; i < opal_irq_count; i++) {
		if (opal_irqs[i])
			free_irq(opal_irqs[i], NULL);
		opal_irqs[i] = 0;
	}

	/*
	 * Then sync with OPAL which ensure anything that can
	 * potentially write to our memory has completed such
	 * as an ongoing dump retrieval
	 */
	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
		rc = opal_sync_host_reboot();
		if (rc == OPAL_BUSY)
			opal_poll_events(NULL);
		else
			mdelay(10);
	}

	/* Unregister memory dump region */
	opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
}
Exemple #4
0
void opal_handle_events(void)
{
	__be64 events = 0;
	u64 e;

	e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
again:
	while (e) {
		int virq, hwirq;

		hwirq = fls64(e) - 1;
		e &= ~BIT_ULL(hwirq);

		local_irq_disable();
		virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
		if (virq) {
			irq_enter();
			generic_handle_irq(virq);
			irq_exit();
		}
		local_irq_enable();

		cond_resched();
	}
	last_outstanding_events = 0;
	if (opal_poll_events(&events) != OPAL_SUCCESS)
		return;
	e = be64_to_cpu(events) & opal_event_irqchip.mask;
	if (e)
		goto again;
}
Exemple #5
0
static int kopald(void *unused)
{
	set_freezable();
	do {
		try_to_freeze();
		opal_poll_events(NULL);
		msleep_interruptible(opal_heartbeat);
	} while (!kthread_should_stop());

	return 0;
}
void opal_notifier_enable(void)
{
	int64_t rc;
	uint64_t evt = 0;

	atomic_set(&opal_notifier_hold, 0);

	/* Process pending events */
	rc = opal_poll_events(&evt);
	if (rc == OPAL_SUCCESS && evt)
		opal_do_notifier(evt);
}
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
	s64 rc;
	__be64 evt, len;

	if (!opal.entry)
		return -ENODEV;
	opal_poll_events(&evt);
	if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
		return 0;
	len = cpu_to_be64(count);
	rc = opal_console_read(vtermno, &len, buf);	
	if (rc == OPAL_SUCCESS)
		return be64_to_cpu(len);
	return 0;
}
Exemple #8
0
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
	s64 len, rc;
	u64 evt;

	if (!opal.entry)
		return -ENODEV;
	opal_poll_events(&evt);
	if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
		return 0;
	len = count;
	rc = opal_console_read(vtermno, &len, buf);
	if (rc == OPAL_SUCCESS)
		return len;
	return 0;
}
Exemple #9
0
static int64_t dump_read_data(struct dump_obj *dump)
{
	struct opal_sg_list *list;
	uint64_t addr;
	int64_t rc;

	/* Allocate memory */
	dump->buffer = vzalloc(PAGE_ALIGN(dump->size));
	if (!dump->buffer) {
		pr_err("%s : Failed to allocate memory\n", __func__);
		rc = -ENOMEM;
		goto out;
	}

	/* Generate SG list */
	list = dump_data_to_sglist(dump);
	if (!list) {
		rc = -ENOMEM;
		goto out;
	}

	/* Translate sg list addr to real address */
	sglist_to_phy_addr(list);

	/* First entry address */
	addr = __pa(list);

	/* Fetch data */
	rc = OPAL_BUSY_EVENT;
	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
		rc = opal_dump_read(dump->id, addr);
		if (rc == OPAL_BUSY_EVENT) {
			opal_poll_events(NULL);
			msleep(20);
		}
	}

	if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL)
		pr_warn("%s: Extract dump failed for ID 0x%x\n",
			__func__, dump->id);

	/* Free SG list */
	free_dump_sg_list(list);

out:
	return rc;
}
Exemple #10
0
static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
{
	s64 rc = OPAL_BUSY;
	int off;

	if (*index >= nvram_size)
		return 0;
	off = *index;
	if ((off + count) > nvram_size)
		count = nvram_size - off;

	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
		rc = opal_write_nvram(__pa(buf), count, off);
		if (rc == OPAL_BUSY_EVENT)
			opal_poll_events(NULL);
	}
	*index += count;
	return count;
}
Exemple #11
0
/* HMI exception handler called in virtual mode during check_irq_replay. */
int opal_handle_hmi_exception(struct pt_regs *regs)
{
	s64 rc;
	__be64 evt = 0;

	/*
	 * Check if HMI event is available.
	 * if Yes, then call opal_poll_events to pull opal messages and
	 * process them.
	 */
	if (!local_paca->hmi_event_available)
		return 0;

	local_paca->hmi_event_available = 0;
	rc = opal_poll_events(&evt);
	if (rc == OPAL_SUCCESS && evt)
		opal_do_notifier(be64_to_cpu(evt));

	return 1;
}
int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
{
	int written = 0;
	__be64 olen;
	s64 len, rc;
	unsigned long flags;
	__be64 evt;

	if (!opal.entry)
		return -ENODEV;

	/* We want put_chars to be atomic to avoid mangling of hvsi
	 * packets. To do that, we first test for room and return
	 * -EAGAIN if there isn't enough.
	 *
	 * Unfortunately, opal_console_write_buffer_space() doesn't
	 * appear to work on opal v1, so we just assume there is
	 * enough room and be done with it
	 */
	spin_lock_irqsave(&opal_write_lock, flags);
	if (firmware_has_feature(FW_FEATURE_OPALv2)) {
		rc = opal_console_write_buffer_space(vtermno, &olen);
		len = be64_to_cpu(olen);
		if (rc || len < total_len) {
			spin_unlock_irqrestore(&opal_write_lock, flags);
			/* Closed -> drop characters */
			if (rc)
				return total_len;
			opal_poll_events(NULL);
			return -EAGAIN;
		}
	}

	/* We still try to handle partial completions, though they
	 * should no longer happen.
	 */
	rc = OPAL_BUSY;
	while(total_len > 0 && (rc == OPAL_BUSY ||
				rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
		olen = cpu_to_be64(total_len);
		rc = opal_console_write(vtermno, &olen, data);
		len = be64_to_cpu(olen);

		/* Closed or other error drop */
		if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
		    rc != OPAL_BUSY_EVENT) {
			written = total_len;
			break;
		}
		if (rc == OPAL_SUCCESS) {
			total_len -= len;
			data += len;
			written += len;
		}
		/* This is a bit nasty but we need that for the console to
		 * flush when there aren't any interrupts. We will clean
		 * things a bit later to limit that to synchronous path
		 * such as the kernel console and xmon/udbg
		 */
		do
			opal_poll_events(&evt);
		while(rc == OPAL_SUCCESS &&
			(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
	}
	spin_unlock_irqrestore(&opal_write_lock, flags);
	return written;
}