Esempio n. 1
0
/*
 * The GPE handler is called when IBE/OBF or SCI events occur.  We are
 * called from an unknown lock context.
 */
static UINT32
EcGpeHandler(ACPI_HANDLE GpeDevice, UINT32 GpeNumber, void *Context)
{
    struct acpi_ec_softc *sc = Context;
    ACPI_STATUS		       Status;
    EC_STATUS		       EcStatus;

    KASSERT(Context != NULL, ("EcGpeHandler called with NULL"));
    CTR0(KTR_ACPI, "ec gpe handler start");

    /*
     * Notify EcWaitEvent() that the status register is now fresh.  If we
     * didn't do this, it wouldn't be possible to distinguish an old IBE
     * from a new one, for example when doing a write transaction (writing
     * address and then data values.)
     */
    atomic_add_int(&sc->ec_gencount, 1);
    wakeup(sc);

    /*
     * If the EC_SCI bit of the status register is set, queue a query handler.
     * It will run the query and _Qxx method later, under the lock.
     */
    EcStatus = EC_GET_CSR(sc);
    if ((EcStatus & EC_EVENT_SCI) && !sc->ec_sci_pend) {
	CTR0(KTR_ACPI, "ec gpe queueing query handler");
	Status = AcpiOsExecute(OSL_GPE_HANDLER, EcGpeQueryHandler, Context);
	if (ACPI_SUCCESS(Status)) {
	    sc->ec_sci_pend = TRUE;
	    return (0);
	} else
	    printf("EcGpeHandler: queuing GPE query handler failed\n");
    }
    return (ACPI_REENABLE_GPE);
}
Esempio n. 2
0
static void
ntb_rx_completion_task(void *arg, int pending)
{
	struct ntb_transport_qp *qp = arg;
	struct mbuf *m;
	struct ntb_queue_entry *entry;

	CTR0(KTR_NTB, "RX: rx_completion_task");

	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) {
		m = entry->buf;
		CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
		if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
			qp->rx_handler(qp, qp->cb_data, m, entry->len);

		entry->buf = NULL;
		entry->len = qp->transport->bufsize;

		CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q "
		    "and added to rx_pend_q", entry);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
		if (qp->rx_err_no_buf > qp->last_rx_no_buf) {
			qp->last_rx_no_buf = qp->rx_err_no_buf;
			CTR0(KTR_NTB, "RX: could spawn rx task");
			callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full,
			    qp);
		}
	}
}
Esempio n. 3
0
/**
 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
 * @qp: NTB transport layer queue the entry is to be enqueued on
 * @cb: per buffer pointer for callback function to use
 * @data: pointer to data buffer that will be sent
 * @len: length of the data buffer
 *
 * Enqueue a new transmit buffer onto the transport queue from which a NTB
 * payload will be transmitted.  This assumes that a lock is behing held to
 * serialize access to the qp.
 *
 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
 */
static int
ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
    unsigned int len)
{
	struct ntb_queue_entry *entry;
	int rc;

	if (qp == NULL || qp->qp_link != NTB_LINK_UP || len == 0) {
		CTR0(KTR_NTB, "TX: link not up");
		return (EINVAL);
	}

	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
	if (entry == NULL) {
		CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
		return (ENOMEM);
	}
	CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);

	entry->cb_data = cb;
	entry->buf = data;
	entry->len = len;
	entry->flags = 0;

	rc = ntb_process_tx(qp, entry);
	if (rc != 0) {
		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
		CTR1(KTR_NTB,
		    "TX: process_tx failed. Returning entry %p to tx_free_q",
		    entry);
	}
	return (rc);
}
Esempio n. 4
0
static ACPI_STATUS
EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 Data)
{
    ACPI_STATUS	status;
    UINT8 data;
    u_int gen_count;

    ACPI_SERIAL_ASSERT(ec);
    CTR2(KTR_ACPI, "ec write to %#x, data %#x", Address, Data);

    /* If we can't start burst mode, continue anyway. */
    status = EcCommand(sc, EC_COMMAND_BURST_ENABLE);
    if (status == AE_OK) {
    	data = EC_GET_DATA(sc);
	if (data == EC_BURST_ACK) {
	    CTR0(KTR_ACPI, "ec burst enabled");
	    sc->ec_burstactive = TRUE;
	}
    }

    status = EcCommand(sc, EC_COMMAND_WRITE);
    if (ACPI_FAILURE(status))
	return (status);

    gen_count = sc->ec_gencount;
    EC_SET_DATA(sc, Address);
    status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
    if (ACPI_FAILURE(status)) {
	device_printf(sc->ec_dev, "EcRead: failed waiting for sent address\n");
	return (status);
    }

    gen_count = sc->ec_gencount;
    EC_SET_DATA(sc, Data);
    status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
    if (ACPI_FAILURE(status)) {
	device_printf(sc->ec_dev, "EcWrite: failed waiting for sent data\n");
	return (status);
    }

    if (sc->ec_burstactive) {
	sc->ec_burstactive = FALSE;
	status = EcCommand(sc, EC_COMMAND_BURST_DISABLE);
	if (ACPI_FAILURE(status))
	    return (status);
	CTR0(KTR_ACPI, "ec disabled burst ok");
    }

    return (AE_OK);
}
/*
 * Handle an IPI sent to this processor.
 */
intrmask_t
smp_handle_ipi(struct trapframe *frame)
{
	cpumask_t cpumask;		/* This cpu mask */
	u_int	ipi, ipi_bitmap;

	ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
	cpumask = PCPU_GET(cpumask);

	CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
	while (ipi_bitmap) {
		/*
		 * Find the lowest set bit.
		 */
		ipi = ipi_bitmap & ~(ipi_bitmap - 1);
		ipi_bitmap &= ~ipi;
		switch (ipi) {
		case IPI_INVLTLB:
			CTR0(KTR_SMP, "IPI_INVLTLB");
			break;

		case IPI_RENDEZVOUS:
			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
			smp_rendezvous_action();
			break;

		case IPI_AST:
			CTR0(KTR_SMP, "IPI_AST");
			break;

		case IPI_STOP:

			/*
			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
			 * necessary to add it in the switch.
			 */
			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
			atomic_set_int(&stopped_cpus, cpumask);

			while ((started_cpus & cpumask) == 0)
			    ;
			atomic_clear_int(&started_cpus, cpumask);
			atomic_clear_int(&stopped_cpus, cpumask);
			break;
		}
	}
	return CR_INT_IPI;
}
Esempio n. 6
0
static void
ntb_start(struct ifnet *ifp)
{
	struct mbuf *m_head;
	struct ntb_netdev *nt = ifp->if_softc;
	int rc;

	mtx_lock(&nt->tx_lock);
	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
	CTR0(KTR_NTB, "TX: ntb_start");
	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
		CTR1(KTR_NTB, "TX: start mbuf %p", m_head);
		rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head,
			     m_length(m_head, NULL));
		if (rc != 0) {
			CTR1(KTR_NTB,
			    "TX: could not tx mbuf %p. Returning to snd q",
			    m_head);
			if (rc == EAGAIN) {
				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
				callout_reset(&nt->qp->queue_full, hz / 1000,
				    ntb_qp_full, ifp);
			}
			break;
		}

	}
	mtx_unlock(&nt->tx_lock);
}
Esempio n. 7
0
static void
ntb_rx_pendq_full(void *arg)
{

	CTR0(KTR_NTB, "RX: ntb_rx_pendq_full callout");
	ntb_transport_rx(arg);
}
Esempio n. 8
0
int
drm_poll(struct cdev *kdev, int events, struct thread *td)
{
	struct drm_file *file_priv;
	struct drm_device *dev;
	int error, revents;

	error = devfs_get_cdevpriv((void **)&file_priv);
	if (error != 0) {
		DRM_ERROR("can't find authenticator\n");
		return (EINVAL);
	}
	dev = drm_get_device_from_kdev(kdev);

	revents = 0;
	mtx_lock(&dev->event_lock);
	if ((events & (POLLIN | POLLRDNORM)) != 0) {
		if (list_empty(&file_priv->event_list)) {
			CTR0(KTR_DRM, "drm_poll empty list");
			selrecord(td, &file_priv->event_poll);
		} else {
			revents |= events & (POLLIN | POLLRDNORM);
			CTR1(KTR_DRM, "drm_poll revents %x", revents);
		}
	}
	mtx_unlock(&dev->event_lock);
	return (revents);
}
Esempio n. 9
0
struct bus_dmadesc *
ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
    bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
    void *callback_arg, uint32_t flags)
{
	struct ioat_dma_hw_descriptor *hw_desc;
	struct ioat_descriptor *desc;
	struct ioat_softc *ioat;

	CTR0(KTR_IOAT, __func__);
	ioat = to_ioat_softc(dmaengine);

	if (((src | dst) & (0xffffull << 48)) != 0) {
		ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
		    __func__);
		return (NULL);
	}

	desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
	    callback_arg, flags);
	if (desc == NULL)
		return (NULL);

	hw_desc = desc->u.dma;
	if (g_ioat_debug_level >= 3)
		dump_descriptor(hw_desc);

	ioat_submit_single(ioat);
	return (&desc->bus_dmadesc);
}
Esempio n. 10
0
static void
ntb_qp_full(void *arg)
{

	CTR0(KTR_NTB, "TX: qp_full callout");
	ntb_start(arg);
}
Esempio n. 11
0
void
ioat_acquire(bus_dmaengine_t dmaengine)
{
	struct ioat_softc *ioat;

	ioat = to_ioat_softc(dmaengine);
	mtx_lock(&ioat->submit_lock);
	CTR0(KTR_IOAT, __func__);
}
Esempio n. 12
0
static void
ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data,
    int len)
{
	struct mbuf *m = data;
	struct ifnet *ifp = qp_data;

	CTR0(KTR_NTB, "RX: rx handler");
	(*ifp->if_input)(ifp, m);
}
Esempio n. 13
0
void
ioat_release(bus_dmaengine_t dmaengine)
{
	struct ioat_softc *ioat;

	ioat = to_ioat_softc(dmaengine);
	CTR0(KTR_IOAT, __func__);
	ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head);
	mtx_unlock(&ioat->submit_lock);
}
Esempio n. 14
0
static void
ntb_transport_rx(struct ntb_transport_qp *qp)
{
	int rc, i;

	/* 
	 * Limit the number of packets processed in a single interrupt to
	 * provide fairness to others
	 */
	mtx_lock(&qp->transport->rx_lock);
	CTR0(KTR_NTB, "RX: transport_rx");
	for (i = 0; i < NTB_RX_MAX_PKTS; i++) {
		rc = ntb_process_rxc(qp);
		if (rc != 0) {
			CTR0(KTR_NTB, "RX: process_rxc failed");
			break;
		}
	}
	mtx_unlock(&qp->transport->rx_lock);
}
Esempio n. 15
0
/* Transport Rx */
static void
ntb_transport_rxc_db(void *arg, int pending __unused)
{
	struct ntb_transport_qp *qp = arg;
	ntb_q_idx_t i;
	int rc;

	/*
	 * Limit the number of packets processed in a single interrupt to
	 * provide fairness to others
	 */
	CTR0(KTR_NTB, "RX: transport_rx");
	mtx_lock(&qp->transport->rx_lock);
	for (i = 0; i < qp->rx_max_entry; i++) {
		rc = ntb_process_rxc(qp);
		if (rc != 0) {
			CTR0(KTR_NTB, "RX: process_rxc failed");
			break;
		}
	}
	mtx_unlock(&qp->transport->rx_lock);

	if (i == qp->rx_max_entry)
		taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work);
	else if ((ntb_db_read(qp->ntb) & (1ull << qp->qp_num)) != 0) {
		/* If db is set, clear it and read it back to commit clear. */
		ntb_db_clear(qp->ntb, 1ull << qp->qp_num);
		(void)ntb_db_read(qp->ntb);

		/*
		 * An interrupt may have arrived between finishing
		 * ntb_process_rxc and clearing the doorbell bit: there might
		 * be some more work to do.
		 */
		taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work);
	}
}
Esempio n. 16
0
static void
ntb_complete_rxc(void *arg, int pending)
{
	struct ntb_transport_qp *qp = arg;
	struct ntb_queue_entry *entry;
	struct mbuf *m;
	unsigned len;

	CTR0(KTR_NTB, "RX: rx_completion_task");

	mtx_lock_spin(&qp->ntb_rx_q_lock);

	while (!STAILQ_EMPTY(&qp->rx_post_q)) {
		entry = STAILQ_FIRST(&qp->rx_post_q);
		if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0)
			break;

		entry->x_hdr->flags = 0;
		iowrite32(entry->index, &qp->rx_info->entry);

		STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry);

		len = entry->len;
		m = entry->buf;

		/*
		 * Re-initialize queue_entry for reuse; rx_handler takes
		 * ownership of the mbuf.
		 */
		entry->buf = NULL;
		entry->len = transport_mtu;
		entry->cb_data = qp->transport->ifp;

		STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry);

		mtx_unlock_spin(&qp->ntb_rx_q_lock);

		CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
		if (qp->rx_handler != NULL && qp->client_ready)
			qp->rx_handler(qp, qp->cb_data, m, len);
		else
			m_freem(m);

		mtx_lock_spin(&qp->ntb_rx_q_lock);
	}

	mtx_unlock_spin(&qp->ntb_rx_q_lock);
}
Esempio n. 17
0
/*
 * Called by KASSERT, this decides if we will panic
 * or if we will log via printf and/or ktr.
 */
void
kassert_panic(const char *fmt, ...)
{
	static char buf[256];
	va_list ap;

	va_start(ap, fmt);
	(void)vsnprintf(buf, sizeof(buf), fmt, ap);
	va_end(ap);

	/*
	 * panic if we're not just warning, or if we've exceeded
	 * kassert_log_panic_at warnings.
	 */
	if (!kassert_warn_only ||
	    (kassert_log_panic_at > 0 &&
	     kassert_warnings >= kassert_log_panic_at)) {
		va_start(ap, fmt);
		vpanic(fmt, ap);
		/* NORETURN */
	}
#ifdef KTR
	if (kassert_do_ktr)
		CTR0(ktr_mask, buf);
#endif /* KTR */
	/*
	 * log if we've not yet met the mute limit.
	 */
	if (kassert_do_log &&
	    (kassert_log_mute_at == 0 ||
	     kassert_warnings < kassert_log_mute_at)) {
		static  struct timeval lasterr;
		static  int curerr;

		if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) {
			printf("KASSERT failed: %s\n", buf);
			kdb_backtrace();
		}
	}
#ifdef KDB
	if (kassert_do_kdb) {
		kdb_enter(KDB_WHY_KASSERT, buf);
	}
#endif
	atomic_add_int(&kassert_warnings, 1);
}
Esempio n. 18
0
struct bus_dmadesc *
ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
    bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
    bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
{
	struct ioat_dma_hw_descriptor *hw_desc;
	struct ioat_descriptor *desc;
	struct ioat_softc *ioat;

	CTR0(KTR_IOAT, __func__);
	ioat = to_ioat_softc(dmaengine);

	if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
		ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
		    __func__);
		return (NULL);
	}
	if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
		ioat_log_message(0, "%s: Addresses must be page-aligned\n",
		    __func__);
		return (NULL);
	}

	desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
	    callback_fn, callback_arg, flags);
	if (desc == NULL)
		return (NULL);

	hw_desc = desc->u.dma;
	if (src2 != src1 + PAGE_SIZE) {
		hw_desc->u.control.src_page_break = 1;
		hw_desc->next_src_addr = src2;
	}
	if (dst2 != dst1 + PAGE_SIZE) {
		hw_desc->u.control.dest_page_break = 1;
		hw_desc->next_dest_addr = dst2;
	}

	if (g_ioat_debug_level >= 3)
		dump_descriptor(hw_desc);

	ioat_submit_single(ioat);
	return (&desc->bus_dmadesc);
}
Esempio n. 19
0
struct bus_dmadesc *
ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
    void *callback_arg, uint32_t flags)
{
	struct ioat_dma_hw_descriptor *hw_desc;
	struct ioat_descriptor *desc;
	struct ioat_softc *ioat;

	CTR0(KTR_IOAT, __func__);
	ioat = to_ioat_softc(dmaengine);

	desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
	    callback_arg, flags);
	if (desc == NULL)
		return (NULL);

	hw_desc = desc->u.dma;
	hw_desc->u.control.null = 1;
	ioat_submit_single(ioat);
	return (&desc->bus_dmadesc);
}
Esempio n. 20
0
static int
ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
{
	void *offset;

	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
	CTR3(KTR_NTB,
	    "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u",
	    qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
	if (qp->tx_index == qp->remote_rx_info->entry) {
		CTR0(KTR_NTB, "TX: ring full");
		qp->tx_ring_full++;
		return (EAGAIN);
	}

	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
		if (qp->tx_handler != NULL)
			qp->tx_handler(qp, qp->cb_data, entry->buf,
			    EIO);
		else
			m_freem(entry->buf);

		entry->buf = NULL;
		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
		CTR1(KTR_NTB,
		    "TX: frame too big. returning entry %p to tx_free_q",
		    entry);
		return (0);
	}
	CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset);
	ntb_memcpy_tx(qp, entry, offset);

	qp->tx_index++;
	qp->tx_index %= qp->tx_max_entry;

	qp->tx_pkts++;

	return (0);
}
Esempio n. 21
0
struct bus_dmadesc *
ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
    bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
    uint32_t flags)
{
	struct ioat_fill_hw_descriptor *hw_desc;
	struct ioat_descriptor *desc;
	struct ioat_softc *ioat;

	CTR0(KTR_IOAT, __func__);
	ioat = to_ioat_softc(dmaengine);

	if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
		ioat_log_message(0, "%s: Device lacks BFILL capability\n",
		    __func__);
		return (NULL);
	}

	if ((dst & (0xffffull << 48)) != 0) {
		ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
		    __func__);
		return (NULL);
	}

	desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
	    callback_fn, callback_arg, flags);
	if (desc == NULL)
		return (NULL);

	hw_desc = desc->u.fill;
	if (g_ioat_debug_level >= 3)
		dump_descriptor(hw_desc);

	ioat_submit_single(ioat);
	return (&desc->bus_dmadesc);
}
Esempio n. 22
0
void
init_secondary(int cpu)
{
	struct pcpu *pc;
	uint32_t loop_counter;
#ifndef INTRNG
	int start = 0, end = 0;
#endif
	uint32_t actlr_mask, actlr_set;

	pmap_set_tex();
	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
	reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set);
	cpu_setup();

	/* Provide stack pointers for other processor modes. */
	set_stackptrs(cpu);

	enable_interrupts(PSR_A);
	pc = &__pcpu[cpu];

	/*
	 * pcpu_init() updates queue, so it should not be executed in parallel
	 * on several cores
	 */
	while(mp_naps < (cpu - 1))
		;

	pcpu_init(pc, cpu, sizeof(struct pcpu));
	dpcpu_init(dpcpu[cpu - 1], cpu);
#if __ARM_ARCH >= 6 && defined(DDB)
	dbg_monitor_init_secondary();
#endif
	/* Signal our startup to BSP */
	atomic_add_rel_32(&mp_naps, 1);

	/* Spin until the BSP releases the APs */
	while (!atomic_load_acq_int(&aps_ready)) {
#if __ARM_ARCH >= 7
		__asm __volatile("wfe");
#endif
	}

	/* Initialize curthread */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	pc->pc_curthread = pc->pc_idlethread;
	pc->pc_curpcb = pc->pc_idlethread->td_pcb;
	set_curthread(pc->pc_idlethread);
#ifdef VFP
	vfp_init();
#endif

	/* Configure the interrupt controller */
	intr_pic_init_secondary();

	mtx_lock_spin(&ap_boot_mtx);

	atomic_add_rel_32(&smp_cpus, 1);

	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
	}

	mtx_unlock_spin(&ap_boot_mtx);

#ifndef INTRNG
	/* Enable ipi */
#ifdef IPI_IRQ_START
	start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
	end = IPI_IRQ_END;
#else
	end = IPI_IRQ_START;
#endif
#endif

	for (int i = start; i <= end; i++)
		arm_unmask_irq(i);
#endif /* INTRNG */
	enable_interrupts(PSR_I);

	loop_counter = 0;
	while (smp_started == 0) {
		DELAY(100);
		loop_counter++;
		if (loop_counter == 1000)
			CTR0(KTR_SMP, "AP still wait for smp_started");
	}
	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	CTR0(KTR_SMP, "go into scheduler");

	/* Enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
Esempio n. 23
0
static ACPI_STATUS
EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 Width,
	       UINT64 *Value, void *Context, void *RegionContext)
{
    struct acpi_ec_softc	*sc = (struct acpi_ec_softc *)Context;
    ACPI_STATUS			Status;
    UINT8			*EcData;
    UINT8			EcAddr;
    int				bytes, i;

    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, (UINT32)Address);

    if (Width % 8 != 0 || Value == NULL || Context == NULL)
	return_ACPI_STATUS (AE_BAD_PARAMETER);
    bytes = Width / 8;
    if (Address + bytes - 1 > 0xFF)
	return_ACPI_STATUS (AE_BAD_ADDRESS);

    if (Function == ACPI_READ)
	*Value = 0;
    EcAddr = Address;
    EcData = (UINT8 *)Value;

    /*
     * If booting, check if we need to run the query handler.  If so, we
     * we call it directly here since our thread taskq is not active yet.
     */
    if (cold || rebooting || sc->ec_suspending) {
	if ((EC_GET_CSR(sc) & EC_EVENT_SCI)) {
	    CTR0(KTR_ACPI, "ec running gpe handler directly");
	    EcGpeQueryHandler(sc);
	}
    }

    /* Serialize with EcGpeQueryHandler() at transaction granularity. */
    Status = EcLock(sc);
    if (ACPI_FAILURE(Status))
	return_ACPI_STATUS (Status);

    /* Perform the transaction(s), based on Width. */
    for (i = 0; i < bytes; i++, EcAddr++, EcData++) {
	switch (Function) {
	case ACPI_READ:
	    Status = EcRead(sc, EcAddr, EcData);
	    break;
	case ACPI_WRITE:
	    Status = EcWrite(sc, EcAddr, *EcData);
	    break;
	default:
	    device_printf(sc->ec_dev, "invalid EcSpaceHandler function %d\n",
			  Function);
	    Status = AE_BAD_PARAMETER;
	    break;
	}
	if (ACPI_FAILURE(Status))
	    break;
    }

    EcUnlock(sc);
    return_ACPI_STATUS (Status);
}
Esempio n. 24
0
static ACPI_STATUS
EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event, u_int gen_count)
{
    ACPI_STATUS	Status;
    int		count, i, slp_ival;

    ACPI_SERIAL_ASSERT(ec);
    Status = AE_NO_HARDWARE_RESPONSE;
    int need_poll = cold || rebooting || ec_polled_mode || sc->ec_suspending;
    /*
     * The main CPU should be much faster than the EC.  So the status should
     * be "not ready" when we start waiting.  But if the main CPU is really
     * slow, it's possible we see the current "ready" response.  Since that
     * can't be distinguished from the previous response in polled mode,
     * this is a potential issue.  We really should have interrupts enabled
     * during boot so there is no ambiguity in polled mode.
     *
     * If this occurs, we add an additional delay before actually entering
     * the status checking loop, hopefully to allow the EC to go to work
     * and produce a non-stale status.
     */
    if (need_poll) {
	static int	once;

	if (EcCheckStatus(sc, "pre-check", Event) == AE_OK) {
	    if (!once) {
		device_printf(sc->ec_dev,
		    "warning: EC done before starting event wait\n");
		once = 1;
	    }
	    AcpiOsStall(10);
	}
    }

    /* Wait for event by polling or GPE (interrupt). */
    if (need_poll) {
	count = (ec_timeout * 1000) / EC_POLL_DELAY;
	if (count == 0)
	    count = 1;
	for (i = 0; i < count; i++) {
	    Status = EcCheckStatus(sc, "poll", Event);
	    if (Status == AE_OK)
		break;
	    AcpiOsStall(EC_POLL_DELAY);
	}
    } else {
	slp_ival = hz / 1000;
	if (slp_ival != 0) {
	    count = ec_timeout;
	} else {
	    /* hz has less than 1 ms resolution so scale timeout. */
	    slp_ival = 1;
	    count = ec_timeout / (1000 / hz);
	}

	/*
	 * Wait for the GPE to signal the status changed, checking the
	 * status register each time we get one.  It's possible to get a
	 * GPE for an event we're not interested in here (i.e., SCI for
	 * EC query).
	 */
	for (i = 0; i < count; i++) {
	    if (gen_count != sc->ec_gencount) {
		/*
		 * Record new generation count.  It's possible the GPE was
		 * just to notify us that a query is needed and we need to
		 * wait for a second GPE to signal the completion of the
		 * event we are actually waiting for.
		 */
		gen_count = sc->ec_gencount;
		Status = EcCheckStatus(sc, "sleep", Event);
		if (Status == AE_OK)
		    break;
	    }
	    tsleep(&sc->ec_gencount, PZERO, "ecgpe", slp_ival);
	}

	/*
	 * We finished waiting for the GPE and it never arrived.  Try to
	 * read the register once and trust whatever value we got.  This is
	 * the best we can do at this point.  Then, force polled mode on
	 * since this system doesn't appear to generate GPEs.
	 */
	if (Status != AE_OK) {
	    Status = EcCheckStatus(sc, "sleep_end", Event);
	    device_printf(sc->ec_dev,
		"wait timed out (%sresponse), forcing polled mode\n",
		Status == AE_OK ? "" : "no ");
	    ec_polled_mode = TRUE;
	}
    }
    if (Status != AE_OK)
	    CTR0(KTR_ACPI, "error: ec wait timed out");
    return (Status);
}
Esempio n. 25
0
static void
ioat_process_events(struct ioat_softc *ioat)
{
	struct ioat_descriptor *desc;
	struct bus_dmadesc *dmadesc;
	uint64_t comp_update, status;
	uint32_t completed, chanerr;
	int error;

	mtx_lock(&ioat->cleanup_lock);

	completed = 0;
	comp_update = *ioat->comp_update;
	status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;

	CTR0(KTR_IOAT, __func__);

	if (status == ioat->last_seen)
		goto out;

	while (1) {
		desc = ioat_get_ring_entry(ioat, ioat->tail);
		dmadesc = &desc->bus_dmadesc;
		CTR1(KTR_IOAT, "completing desc %d", ioat->tail);

		if (dmadesc->callback_fn != NULL)
			dmadesc->callback_fn(dmadesc->callback_arg, 0);

		completed++;
		ioat->tail++;
		if (desc->hw_desc_bus_addr == status)
			break;
	}

	ioat->last_seen = desc->hw_desc_bus_addr;

	if (ioat->head == ioat->tail) {
		ioat->is_completion_pending = FALSE;
		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
		    ioat_timer_callback, ioat);
	}

	ioat->stats.descriptors_processed += completed;

out:
	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
	mtx_unlock(&ioat->cleanup_lock);

	ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
	wakeup(&ioat->tail);

	if (!is_ioat_halted(comp_update))
		return;

	ioat->stats.channel_halts++;

	/*
	 * Fatal programming error on this DMA channel.  Flush any outstanding
	 * work with error status and restart the engine.
	 */
	ioat_log_message(0, "Channel halted due to fatal programming error\n");
	mtx_lock(&ioat->submit_lock);
	mtx_lock(&ioat->cleanup_lock);
	ioat->quiescing = TRUE;

	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
	ioat_halted_debug(ioat, chanerr);
	ioat->stats.last_halt_chanerr = chanerr;

	while (ioat_get_active(ioat) > 0) {
		desc = ioat_get_ring_entry(ioat, ioat->tail);
		dmadesc = &desc->bus_dmadesc;
		CTR1(KTR_IOAT, "completing err desc %d", ioat->tail);

		if (dmadesc->callback_fn != NULL)
			dmadesc->callback_fn(dmadesc->callback_arg,
			    chanerr_to_errno(chanerr));

		ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
		ioat->tail++;
		ioat->stats.descriptors_processed++;
		ioat->stats.descriptors_error++;
	}

	/* Clear error status */
	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);

	mtx_unlock(&ioat->cleanup_lock);
	mtx_unlock(&ioat->submit_lock);

	ioat_log_message(0, "Resetting channel to recover from error\n");
	error = ioat_reset_hw(ioat);
	KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
}
Esempio n. 26
0
static int
ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
    struct ioat_descriptor **newring)
{
	struct ioat_dma_hw_descriptor *hw;
	struct ioat_descriptor *ent, *next;
	uint32_t oldsize, newsize, current_idx, new_idx, i;
	int error;

	CTR0(KTR_IOAT, __func__);

	mtx_assert(&ioat->submit_lock, MA_OWNED);

	if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) {
		error = EINVAL;
		goto out_unlocked;
	}

	oldsize = (1 << oldorder);
	newsize = (1 << (oldorder - 1));

	mtx_lock(&ioat->cleanup_lock);

	/* Can't shrink below current active set! */
	if (ioat_get_active(ioat) >= newsize) {
		error = ENOMEM;
		goto out;
	}

	/*
	 * Copy current descriptors to the new ring, dropping the removed
	 * descriptors.
	 */
	for (i = 0; i < newsize; i++) {
		current_idx = (ioat->tail + i) & (oldsize - 1);
		new_idx = (ioat->tail + i) & (newsize - 1);

		newring[new_idx] = ioat->ring[current_idx];
		newring[new_idx]->id = new_idx;
	}

	/* Free deleted descriptors */
	for (i = newsize; i < oldsize; i++) {
		ent = ioat_get_ring_entry(ioat, ioat->tail + i);
		ioat_free_ring_entry(ioat, ent);
	}

	/* Fix up hardware ring. */
	hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma;
	next = newring[(ioat->tail + newsize) & (newsize - 1)];
	hw->next = next->hw_desc_bus_addr;

	free(ioat->ring, M_IOAT);
	ioat->ring = newring;
	ioat->ring_size_order = oldorder - 1;
	error = 0;

out:
	mtx_unlock(&ioat->cleanup_lock);
out_unlocked:
	if (error)
		ioat_free_ring(ioat, (1 << (oldorder - 1)), newring);
	return (error);
}
Esempio n. 27
0
static int
ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
    struct ioat_descriptor **newring)
{
	struct ioat_descriptor *tmp, *next;
	struct ioat_dma_hw_descriptor *hw;
	uint32_t oldsize, newsize, head, tail, i, end;
	int error;

	CTR0(KTR_IOAT, __func__);

	mtx_assert(&ioat->submit_lock, MA_OWNED);

	if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) {
		error = EINVAL;
		goto out;
	}

	oldsize = (1 << oldorder);
	newsize = (1 << (oldorder + 1));

	mtx_lock(&ioat->cleanup_lock);

	head = ioat->head & (oldsize - 1);
	tail = ioat->tail & (oldsize - 1);

	/* Copy old descriptors to new ring */
	for (i = 0; i < oldsize; i++)
		newring[i] = ioat->ring[i];

	/*
	 * If head has wrapped but tail hasn't, we must swap some descriptors
	 * around so that tail can increment directly to head.
	 */
	if (head < tail) {
		for (i = 0; i <= head; i++) {
			tmp = newring[oldsize + i];

			newring[oldsize + i] = newring[i];
			newring[oldsize + i]->id = oldsize + i;

			newring[i] = tmp;
			newring[i]->id = i;
		}
		head += oldsize;
	}

	KASSERT(head >= tail, ("invariants"));

	/* Head didn't wrap; we only need to link in oldsize..newsize */
	if (head < oldsize) {
		i = oldsize - 1;
		end = newsize;
	} else {
		/* Head did wrap; link newhead..newsize and 0..oldhead */
		i = head;
		end = newsize + (head - oldsize) + 1;
	}

	/*
	 * Fix up hardware ring, being careful not to trample the active
	 * section (tail -> head).
	 */
	for (; i < end; i++) {
		KASSERT((i & (newsize - 1)) < tail ||
		    (i & (newsize - 1)) >= head, ("trampling snake"));

		next = newring[(i + 1) & (newsize - 1)];
		hw = newring[i & (newsize - 1)]->u.dma;
		hw->next = next->hw_desc_bus_addr;
	}

	free(ioat->ring, M_IOAT);
	ioat->ring = newring;
	ioat->ring_size_order = oldorder + 1;
	ioat->tail = tail;
	ioat->head = head;
	error = 0;

	mtx_unlock(&ioat->cleanup_lock);
out:
	if (error)
		ioat_free_ring(ioat, (1 << (oldorder + 1)), newring);
	return (error);
}
Esempio n. 28
0
static ACPI_STATUS
EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 Width,
	       UINT64 *Value, void *Context, void *RegionContext)
{
    struct acpi_ec_softc	*sc = (struct acpi_ec_softc *)Context;
    ACPI_PHYSICAL_ADDRESS	EcAddr;
    UINT8			*EcData;
    ACPI_STATUS			Status;

    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, (UINT32)Address);

    if (Function != ACPI_READ && Function != ACPI_WRITE)
	return_ACPI_STATUS (AE_BAD_PARAMETER);
    if (Width % 8 != 0 || Value == NULL || Context == NULL)
	return_ACPI_STATUS (AE_BAD_PARAMETER);
    if (Address + Width / 8 > 256)
	return_ACPI_STATUS (AE_BAD_ADDRESS);

    /*
     * If booting, check if we need to run the query handler.  If so, we
     * we call it directly here since our thread taskq is not active yet.
     */
    if (cold || rebooting || sc->ec_suspending) {
	if ((EC_GET_CSR(sc) & EC_EVENT_SCI)) {
	    CTR0(KTR_ACPI, "ec running gpe handler directly");
	    EcGpeQueryHandler(sc);
	}
    }

    /* Serialize with EcGpeQueryHandler() at transaction granularity. */
    Status = EcLock(sc);
    if (ACPI_FAILURE(Status))
	return_ACPI_STATUS (Status);

    /* If we can't start burst mode, continue anyway. */
    Status = EcCommand(sc, EC_COMMAND_BURST_ENABLE);
    if (ACPI_SUCCESS(Status)) {
	if (EC_GET_DATA(sc) == EC_BURST_ACK) {
	    CTR0(KTR_ACPI, "ec burst enabled");
	    sc->ec_burstactive = TRUE;
	}
    }

    /* Perform the transaction(s), based on Width. */
    EcAddr = Address;
    EcData = (UINT8 *)Value;
    if (Function == ACPI_READ)
	*Value = 0;
    do {
	switch (Function) {
	case ACPI_READ:
	    Status = EcRead(sc, EcAddr, EcData);
	    break;
	case ACPI_WRITE:
	    Status = EcWrite(sc, EcAddr, *EcData);
	    break;
	}
	if (ACPI_FAILURE(Status))
	    break;
	EcAddr++;
	EcData++;
    } while (EcAddr < Address + Width / 8);

    if (sc->ec_burstactive) {
	sc->ec_burstactive = FALSE;
	if (ACPI_SUCCESS(EcCommand(sc, EC_COMMAND_BURST_DISABLE)))
	    CTR0(KTR_ACPI, "ec disabled burst ok");
    }

    EcUnlock(sc);
    return_ACPI_STATUS (Status);
}
Esempio n. 29
0
static int
ntb_process_rxc(struct ntb_transport_qp *qp)
{
	struct ntb_payload_header *hdr;
	struct ntb_queue_entry *entry;
	void *offset;

	offset = (void *)
	    ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index);
	hdr = (void *)
	    ((char *)offset + qp->rx_max_frame -
		sizeof(struct ntb_payload_header));

	CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
	if (entry == NULL) {
		qp->rx_err_no_buf++;
		CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
		return (ENOMEM);
	}
	callout_stop(&qp->rx_full);
	CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);

	if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) {
		CTR1(KTR_NTB,
		    "RX: hdr not done. Returning entry %p to rx_pend_q", entry);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
		qp->rx_ring_empty++;
		return (EAGAIN);
	}

	if (hdr->ver != (uint32_t) qp->rx_pkts) {
		CTR3(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
		    "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts,
		    entry);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
		qp->rx_err_ver++;
		return (EIO);
	}

	if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) {
		ntb_qp_link_down(qp);
		CTR1(KTR_NTB,
		    "RX: link down. adding entry %p back to rx_pend_q", entry);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
		goto out;
	}

	if (hdr->len <= entry->len) {
		entry->len = hdr->len;
		ntb_rx_copy_task(qp, entry, offset);
	} else {
		CTR1(KTR_NTB,
		    "RX: len too long. Returning entry %p to rx_pend_q", entry);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);

		qp->rx_err_oflow++;
	}

	qp->rx_bytes += hdr->len;
	qp->rx_pkts++;
	CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);


out:
	/* Ensure that the data is globally visible before clearing the flag */
	wmb();
	hdr->flags = 0;
	/* TODO: replace with bus_space_write */
	qp->rx_info->entry = qp->rx_index;

	qp->rx_index++;
	qp->rx_index %= qp->rx_max_entry;

	return (0);
}
Esempio n. 30
0
static ACPI_STATUS
EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event, u_int gen_count)
{
    static int	no_intr = 0;
    ACPI_STATUS	Status;
    int		count, i, need_poll, slp_ival;

    ACPI_SERIAL_ASSERT(ec);
    Status = AE_NO_HARDWARE_RESPONSE;
    need_poll = cold || rebooting || ec_polled_mode || sc->ec_suspending;

    /* Wait for event by polling or GPE (interrupt). */
    if (need_poll) {
	count = (ec_timeout * 1000) / EC_POLL_DELAY;
	if (count == 0)
	    count = 1;
	DELAY(10);
	for (i = 0; i < count; i++) {
	    Status = EcCheckStatus(sc, "poll", Event);
	    if (ACPI_SUCCESS(Status))
		break;
	    DELAY(EC_POLL_DELAY);
	}
    } else {
	slp_ival = hz / 1000;
	if (slp_ival != 0) {
	    count = ec_timeout;
	} else {
	    /* hz has less than 1 ms resolution so scale timeout. */
	    slp_ival = 1;
	    count = ec_timeout / (1000 / hz);
	}

	/*
	 * Wait for the GPE to signal the status changed, checking the
	 * status register each time we get one.  It's possible to get a
	 * GPE for an event we're not interested in here (i.e., SCI for
	 * EC query).
	 */
	for (i = 0; i < count; i++) {
	    if (gen_count == sc->ec_gencount)
		tsleep(sc, 0, "ecgpe", slp_ival);
	    /*
	     * Record new generation count.  It's possible the GPE was
	     * just to notify us that a query is needed and we need to
	     * wait for a second GPE to signal the completion of the
	     * event we are actually waiting for.
	     */
	    Status = EcCheckStatus(sc, "sleep", Event);
	    if (ACPI_SUCCESS(Status)) {
		if (gen_count == sc->ec_gencount)
		    no_intr++;
		else
		    no_intr = 0;
		break;
	    }
	    gen_count = sc->ec_gencount;
	}

	/*
	 * We finished waiting for the GPE and it never arrived.  Try to
	 * read the register once and trust whatever value we got.  This is
	 * the best we can do at this point.
	 */
	if (ACPI_FAILURE(Status))
	    Status = EcCheckStatus(sc, "sleep_end", Event);
    }
    if (!need_poll && no_intr > 10) {
	device_printf(sc->ec_dev,
	    "not getting interrupts, switched to polled mode\n");
	ec_polled_mode = 1;
    }
    if (ACPI_FAILURE(Status))
	    CTR0(KTR_ACPI, "error: ec wait timed out");
    return (Status);
}