Пример #1
0
/*
 * Parallel port DMA interrupt.
 */
int
lsi64854_pp_intr(void *arg)
{
	struct lsi64854_softc *sc = arg;
	char bits[64];
	int ret, trans, resid = 0;
	uint32_t csr;

	csr = L64854_GCSR(sc);

	DPRINTF(LDB_PP, ("%s: pp intr: addr 0x%x, csr %s\n",
	    device_xname(sc->sc_dev),
	    bus_space_read_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR),
	    bitmask_snprintf(csr, PDMACSR_BITS, bits, sizeof(bits))));

	if (csr & (P_ERR_PEND|P_SLAVE_ERR)) {
		resid = bus_space_read_4(sc->sc_bustag, sc->sc_regs,
		    L64854_REG_CNT);
		printf("%s: pp error: resid %d csr=%s\n",
		    device_xname(sc->sc_dev), resid,
		    bitmask_snprintf(csr, PDMACSR_BITS, bits,sizeof(bits)));
		csr &= ~P_EN_DMA;	/* Stop DMA */
		/* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
		csr |= P_INVALIDATE|P_SLAVE_ERR;
		L64854_SCSR(sc, csr);
		return 1;
	}

	ret = (csr & P_INT_PEND) != 0;

	if (sc->sc_active != 0) {
		DMA_DRAIN(sc, 0);
		resid = bus_space_read_4(sc->sc_bustag, sc->sc_regs,
		    L64854_REG_CNT);
	}

	/* DMA has stopped */
	csr &= ~D_EN_DMA;
	L64854_SCSR(sc, csr);
	sc->sc_active = 0;

	trans = sc->sc_dmasize - resid;
	if (trans < 0) {			/* transferred < 0 ? */
		trans = sc->sc_dmasize;
	}
	*sc->sc_dmalen -= trans;
	*sc->sc_dmaaddr += trans;

	if (sc->sc_dmamap->dm_nsegs > 0) {
		bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize,
		    (csr & D_WRITE) != 0 ?
		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap);
	}

	return ret != 0;
}
Пример #2
0
void
fdcretry(struct fdc_softc *fdc)
{
	char bits[64];
	struct fd_softc *fd;
	struct buf *bp;

	fd = TAILQ_FIRST(&fdc->sc_drives);
	bp = BUFQ_PEEK(fd->sc_q);

	if (fd->sc_opts & FDOPT_NORETRY)
	    goto fail;
	switch (fdc->sc_errors) {
	case 0:
		/* try again */
		fdc->sc_state = DOSEEK;
		break;

	case 1: case 2: case 3:
		/* didn't work; try recalibrating */
		fdc->sc_state = DORECAL;
		break;

	case 4:
		/* still no go; reset the bastard */
		fdc->sc_state = DORESET;
		break;

	default:
	fail:
		if ((fd->sc_opts & FDOPT_SILENT) == 0) {
			diskerr(bp, "fd", "hard error", LOG_PRINTF,
				fd->sc_skip / FDC_BSIZE, NULL);

			printf(" (st0 %s",
			       bitmask_snprintf(fdc->sc_status[0],
						NE7_ST0BITS, bits,
						sizeof(bits)));
			printf(" st1 %s",
			       bitmask_snprintf(fdc->sc_status[1],
						NE7_ST1BITS, bits,
						sizeof(bits)));
			printf(" st2 %s",
			       bitmask_snprintf(fdc->sc_status[2],
						NE7_ST2BITS, bits,
						sizeof(bits)));
			printf(" cyl %d head %d sec %d)\n",
			       fdc->sc_status[3],
			       fdc->sc_status[4],
			       fdc->sc_status[5]);
		}

		bp->b_error = EIO;
		fdfinish(fd, bp);
	}
	fdc->sc_errors++;
}
Пример #3
0
/*
 * Await completion of the named function
 * and check for errors.
 */
void
dewait(struct de_softc *sc, char *fn)
{
	int csr0;

	while ((DE_RCSR(DE_PCSR0) & PCSR0_INTR) == 0)
		;
	csr0 = DE_RCSR(DE_PCSR0);
	DE_WHIGH(csr0 >> 8);
	if (csr0 & PCSR0_PCEI) {
		char bits[64];
		printf("%s: %s failed, csr0=%s ", sc->sc_dev.dv_xname, fn,
		    bitmask_snprintf(csr0, PCSR0_BITS, bits, sizeof(bits)));
		printf("csr1=%s\n", bitmask_snprintf(DE_RCSR(DE_PCSR1),
		    PCSR1_BITS, bits, sizeof(bits)));
	}
}
Пример #4
0
static void
lpt_attach(device_t parent, device_t self, void *aux)
{
	struct lpt_softc * sc = device_private(self);
	struct ppbus_device_softc * ppbdev = &(sc->ppbus_dev);
	struct ppbus_attach_args * args = aux;
	char buf[64];
	int error;

	ppbdev->sc_dev = self;

	error = lpt_request_ppbus(sc, 0);
	if(error) {
		printf("%s(%s): error (%d) requesting bus(%s). Device not "
			"properly attached.\n", __func__, device_xname(self),
			error, device_xname(parent));
		return;
	}

	/* Record capabilities */
	ppbdev->capabilities = args->capabilities;

	/* Allocate memory buffers */
	if(ppbdev->capabilities & PPBUS_HAS_DMA) {
		if(ppbus_dma_malloc(parent, &(sc->sc_inbuf),
			&(sc->sc_in_baddr), BUFSIZE)) {

			printf(" : cannot allocate input DMA buffer. Device "
				"not properly attached!\n");
			return;
		}
		if(ppbus_dma_malloc(parent, &(sc->sc_outbuf),
			&(sc->sc_out_baddr), BUFSIZE)) {

			ppbus_dma_free(parent, &(sc->sc_inbuf),
				&(sc->sc_in_baddr), BUFSIZE);
			printf(" : cannot allocate output DMA buffer. Device "
				"not properly attached!\n");
			return;
		}
	} else {
		sc->sc_inbuf = malloc(BUFSIZE, M_DEVBUF, M_WAITOK);
		sc->sc_outbuf = malloc(BUFSIZE, M_DEVBUF, M_WAITOK);
	}

	/* Print out mode */
        ppbdev->ctx.mode = ppbus_get_mode(parent);
	bitmask_snprintf(ppbdev->ctx.mode, "\20\1COMPATIBLE\2NIBBLE"
		"\3PS2\4EPP\5ECP\6FAST_CENTR", buf, sizeof(buf));
	printf(": port mode = %s\n", buf);

	/* Initialize the device on open by default */
	sc->sc_flags = LPT_PRIME;

	lpt_release_ppbus(sc, 0);
}
Пример #5
0
void
fdcstatus(device_t dv, int n, const char *s)
{
	struct fdc_softc *fdc = device_private(device_parent(dv));
	char bits[64];

	if (n == 0) {
		out_fdc(fdc->sc_iot, fdc->sc_ioh, NE7CMD_SENSEI);
		(void) fdcresult(fdc);
		n = 2;
	}

	aprint_normal_dev(dv, "%s", s);

	switch (n) {
	case 0:
		printf("\n");
		break;
	case 2:
		printf(" (st0 %s cyl %d)\n",
		    bitmask_snprintf(fdc->sc_status[0], NE7_ST0BITS,
		    bits, sizeof(bits)), fdc->sc_status[1]);
		break;
	case 7:
		printf(" (st0 %s", bitmask_snprintf(fdc->sc_status[0],
		    NE7_ST0BITS, bits, sizeof(bits)));
		printf(" st1 %s", bitmask_snprintf(fdc->sc_status[1],
		    NE7_ST1BITS, bits, sizeof(bits)));
		printf(" st2 %s", bitmask_snprintf(fdc->sc_status[2],
		    NE7_ST2BITS, bits, sizeof(bits)));
		printf(" cyl %d head %d sec %d)\n",
		    fdc->sc_status[3], fdc->sc_status[4], fdc->sc_status[5]);
		break;
#ifdef DIAGNOSTIC
	default:
		printf("\nfdcstatus: weird size");
		break;
#endif
	}
}
Пример #6
0
void
nextdma_init(struct nextdma_softc *nsc)
{
#ifdef ND_DEBUG
	if (NEXTDMA_DEBUG) {
		char sbuf[256];

		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
				 sbuf, sizeof(sbuf));
		printf("DMA init ipl (%ld) intr(0x%s)\n",
			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
	}
#endif

	nsc->sc_stat.nd_map = NULL;
	nsc->sc_stat.nd_idx = 0;
	nsc->sc_stat.nd_map_cont = NULL;
	nsc->sc_stat.nd_idx_cont = 0;
	nsc->sc_stat.nd_exception = 0;

	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
	nd_bsw4 (DD_CSR, 0);

#if 01
	nextdma_setup_curr_regs(nsc);
	nextdma_setup_cont_regs(nsc);
#endif

#if defined(DIAGNOSTIC)
	{
		u_long state;
		state = nd_bsr4 (DD_CSR);

#if 1
		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
		 * Darrin B. Jewell <*****@*****.**>  Mon May 25 07:53:05 1998
		 */
		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
#else
		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE | 
			  DMACSR_SUPDATE | DMACSR_ENABLE);
#endif
		if (state) {
			nextdma_print(nsc);
			panic("DMA did not reset");
		}
	}
#endif
}
Пример #7
0
int
ilwait(struct il_softc *sc, char *op)
{

	while ((IL_RCSR(IL_CSR)&IL_CDONE) == 0)
		;
	if (IL_RCSR(IL_CSR)&IL_STATUS) {
		char bits[64];

		aprint_error_dev(&sc->sc_dev, "%s failed, csr=%s\n", op,
		    bitmask_snprintf(IL_RCSR(IL_CSR), IL_BITS, bits,
		    sizeof(bits)));
		return (-1);
	}
	return (0);
}
Пример #8
0
/*
 * We can obtain the information about MCA bus presence via
 * GET CONFIGURATION BIOS call - int 0x15, function 0xc0.
 * The call returns a pointer to memory place with the configuration block
 * in es:bx (on AT-compatible, e.g. all we care about, computers).
 *
 * Configuration block contains block length (2 bytes), model
 * number (1 byte), submodel number (1 byte), BIOS revision
 * (1 byte) and up to 5 feature bytes. We only care about
 * first feature byte.
 */
void
mca_busprobe(void)
{
	struct bioscallregs regs;
	struct bios_config *scp;
	paddr_t             paddr;
	char buf[80];

	memset(&regs, 0, sizeof(regs));
	regs.AH = 0xc0;
	bioscall(0x15, &regs);

	if ((regs.EFLAGS & PSL_C) || regs.AH != 0) {
		aprint_verbose("BIOS CFG: Not supported. Not AT-compatible?\n");
		return;
	}

	paddr = (regs.ES << 4) + regs.BX;
	scp = (struct bios_config *)ISA_HOLE_VADDR(paddr);

	bitmask_snprintf((scp->feature2 << 8) | scp->feature1,
		"\20"
		"\01MCA+ISA"
		"\02MCA"
		"\03EBDA"
		"\04WAITEV"
		"\05KBDINT"
		"\06RTC"
		"\07IC2"
		"\010DMA3B"
		"\011res"
		"\012DSTR"
		"\013n8042"
		"\014CPUF"
		"\015MMF"
		"\016GPDF"
		"\017KBDF"
		"\020DMA32\n",
		buf, sizeof(buf));

	aprint_verbose("BIOS CFG: Model-SubM-Rev: %02x-%02x-%02x, 0x%s\n",
		scp->model, scp->submodel, scp->bios_rev, buf);

	MCA_system = (scp->feature1 & FEATURE_MCABUS) ? 1 : 0;
}
Пример #9
0
void
db_dump_pcb(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif)
{
	struct pcb *pcb;
	char bits[64];
	int i;

	if (have_addr)
		pcb = (struct pcb *) addr;
	else
		pcb = curcpu()->curpcb;

	db_printf("pcb@%p sp:%p pc:%p psr:%s onfault:%p\nfull windows:\n",
		  pcb, (void *)(long)pcb->pcb_sp, (void *)(long)pcb->pcb_pc,
		  bitmask_snprintf(pcb->pcb_psr, PSR_BITS, bits, sizeof(bits)),
		  (void *)pcb->pcb_onfault);

	for (i=0; i<pcb->pcb_nsaved; i++) {
		db_printf("win %d: at %llx local, in\n", i,
			  (unsigned long long)pcb->pcb_rw[i+1].rw_in[6]);
		db_printf("%16llx %16llx %16llx %16llx\n",
			  (unsigned long long)pcb->pcb_rw[i].rw_local[0],
			  (unsigned long long)pcb->pcb_rw[i].rw_local[1],
			  (unsigned long long)pcb->pcb_rw[i].rw_local[2],
			  (unsigned long long)pcb->pcb_rw[i].rw_local[3]);
		db_printf("%16llx %16llx %16llx %16llx\n",
			  (unsigned long long)pcb->pcb_rw[i].rw_local[4],
			  (unsigned long long)pcb->pcb_rw[i].rw_local[5],
			  (unsigned long long)pcb->pcb_rw[i].rw_local[6],
			  (unsigned long long)pcb->pcb_rw[i].rw_local[7]);
		db_printf("%16llx %16llx %16llx %16llx\n",
			  (unsigned long long)pcb->pcb_rw[i].rw_in[0],
			  (unsigned long long)pcb->pcb_rw[i].rw_in[1],
			  (unsigned long long)pcb->pcb_rw[i].rw_in[2],
			  (unsigned long long)pcb->pcb_rw[i].rw_in[3]);
		db_printf("%16llx %16llx %16llx %16llx\n",
			  (unsigned long long)pcb->pcb_rw[i].rw_in[4],
			  (unsigned long long)pcb->pcb_rw[i].rw_in[5],
			  (unsigned long long)pcb->pcb_rw[i].rw_in[6],
			  (unsigned long long)pcb->pcb_rw[i].rw_in[7]);
	}
}
Пример #10
0
/*
 * Pseudo (chained) interrupt to le driver to handle DMA errors.
 */
int
lsi64854_enet_intr(void *arg)
{
	struct lsi64854_softc *sc = arg;
	char bits[64];
	uint32_t csr;
	static int dodrain = 0;
	int rv;

	csr = L64854_GCSR(sc);

	/* If the DMA logic shows an interrupt, claim it */
	rv = ((csr & E_INT_PEND) != 0) ? 1 : 0;

	if (csr & (E_ERR_PEND|E_SLAVE_ERR)) {
		printf("%s: error: csr=%s\n", device_xname(sc->sc_dev),
		    bitmask_snprintf(csr, EDMACSR_BITS, bits,sizeof(bits)));
		csr &= ~L64854_EN_DMA;	/* Stop DMA */
		/* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
		csr |= E_INVALIDATE|E_SLAVE_ERR;
		L64854_SCSR(sc, csr);
		DMA_RESET(sc);
		dodrain = 1;
		return 1;
	}

	if (dodrain) {	/* XXX - is this necessary with D_DSBL_WRINVAL on? */
		int i = 10;
		csr |= E_DRAIN;
		L64854_SCSR(sc, csr);
		while (i-- > 0 && (L64854_GCSR(sc) & D_DRAINING))
			delay(1);
	}

	return rv | (*sc->sc_intrchain)(sc->sc_intrchainarg);
}
Пример #11
0
/*
 * Pseudo (chained) interrupt from the esp driver to kick the
 * current running DMA transfer. I am replying on espintr() to
 * pickup and clean errors for now
 *
 * return 1 if it was a DMA continue.
 */
int
espdmaintr(struct esp_softc *sc)
{
	struct ncr53c9x_softc *nsc = (struct ncr53c9x_softc *)sc;
	int trans, resid;
	u_long csr = sc->sc_dma_direction;

#if 0
	if (csr & D_ERR_PEND) {
		DMACSR(sc) &= ~D_EN_DMA;	/* Stop DMA */
		DMACSR(sc) |= D_INVALIDATE;
		printf("%s: error: csr=%s\n", device_xname(nsc->sc_dev),
		    bitmask_snprintf(csr, DMACSRBITS, bits, sizeof(bits)));
		return -1;
	}
#endif

	/* This is an "assertion" :) */
	if (sc->sc_dmaactive == 0)
		panic("%s: DMA wasn't active", __func__);

	/* dbdma_flush(sc->sc_dmareg); */

	/* DMA has stopped */
	dbdma_stop(sc->sc_dmareg);
	sc->sc_dmaactive = 0;

	if (sc->sc_dmasize == 0) {
		/* A "Transfer Pad" operation completed */
		NCR_DMA(("dmaintr: discarded %d bytes (tcl=%d, tcm=%d)\n",
			NCR_READ_REG(nsc, NCR_TCL) |
				(NCR_READ_REG(nsc, NCR_TCM) << 8),
			NCR_READ_REG(nsc, NCR_TCL),
			NCR_READ_REG(nsc, NCR_TCM)));
		return 0;
	}

	resid = 0;
	/*
	 * If a transfer onto the SCSI bus gets interrupted by the device
	 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
	 * as residual since the ESP counter registers get decremented as
	 * bytes are clocked into the FIFO.
	 */
	if (!(csr & D_WRITE) &&
	    (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
		NCR_DMA(("dmaintr: empty esp FIFO of %d ", resid));
	}

	if ((nsc->sc_espstat & NCRSTAT_TC) == 0) {
		/*
		 * `Terminal count' is off, so read the residue
		 * out of the ESP counter registers.
		 */
		resid += (NCR_READ_REG(nsc, NCR_TCL) |
			  (NCR_READ_REG(nsc, NCR_TCM) << 8) |
			   ((nsc->sc_cfg2 & NCRCFG2_FE)
				? (NCR_READ_REG(nsc, NCR_TCH) << 16)
				: 0));

		if (resid == 0 && sc->sc_dmasize == 65536 &&
		    (nsc->sc_cfg2 & NCRCFG2_FE) == 0)
			/* A transfer of 64K is encoded as `TCL=TCM=0' */
			resid = 65536;
	}

	trans = sc->sc_dmasize - resid;
	if (trans < 0) {			/* transferred < 0 ? */
#if 0
		/*
		 * This situation can happen in perfectly normal operation
		 * if the ESP is reselected while using DMA to select
		 * another target.  As such, don't print the warning.
		 */
		printf("%s: xfer (%d) > req (%d)\n",
		    device_xname(nsc->sc_dev), trans, sc->sc_dmasize);
#endif
		trans = sc->sc_dmasize;
	}

	NCR_DMA(("dmaintr: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
		NCR_READ_REG(nsc, NCR_TCL),
		NCR_READ_REG(nsc, NCR_TCM),
		(nsc->sc_cfg2 & NCRCFG2_FE)
			? NCR_READ_REG(nsc, NCR_TCH) : 0,
		trans, resid));

#if 0
	if (csr & D_WRITE)
		flushcache(*sc->sc_dmaaddr, trans);
#endif

	*sc->sc_dmalen -= trans;
	*sc->sc_dmaaddr += trans;

#if 0	/* this is not normal operation just yet */
	if (*sc->sc_dmalen == 0 ||
	    nsc->sc_phase != nsc->sc_prevphase)
		return 0;

	/* and again */
	dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE);
	return 1;
#endif
	return 0;
}
Пример #12
0
void
memattach(struct device *parent, struct device *self, void *aux)
{
	struct pdc_iodc_minit pdc_minit PDC_ALIGNMENT;
	struct confargs *ca = aux;
	struct mem_softc *sc = (struct mem_softc *)self;
	int s, err, pagezero_cookie;
	char bits[128];

	printf (":");

	pagezero_cookie = hp700_pagezero_map();

	/* XXX check if we are dealing w/ Viper */
	if (ca->ca_hpa == (hppa_hpa_t)VIPER_HPA) {

		sc->sc_vp = (struct vi_trs *)
		    &((struct iomod *)ca->ca_hpa)->priv_trs;

		/* XXX other values seem to blow it up */
		if (sc->sc_vp->vi_status.hw_rev == 0) {
			bitmask_snprintf(VI_CTRL, VIPER_BITS, bits, 
			    sizeof(bits));
			printf (" viper rev %x, ctrl %s",
			    sc->sc_vp->vi_status.hw_rev,
			    bits);

			s = splhigh();
			VI_CTRL |= VI_CTRL_ANYDEN;
			((struct vi_ctrl *)&VI_CTRL)->core_den = 0;
			((struct vi_ctrl *)&VI_CTRL)->sgc0_den = 0;
			((struct vi_ctrl *)&VI_CTRL)->sgc1_den = 0;
			((struct vi_ctrl *)&VI_CTRL)->core_prf = 1;
			sc->sc_vp->vi_control = VI_CTRL;
			splx(s);
#ifdef DEBUG
			bitmask_snprintf(VI_CTRL, VIPER_BITS, bits, 
			    sizeof(bits));
			printf (" >> %s", bits);
#endif
		} else
			sc->sc_vp = NULL;
	} else
		sc->sc_vp = NULL;

	if ((err = pdc_call((iodcio_t)pdc, 0, PDC_IODC, PDC_IODC_NINIT,
			    &pdc_minit, ca->ca_hpa, PAGE0->imm_spa_size)) < 0)
		pdc_minit.max_spa = PAGE0->imm_max_mem;

	hp700_pagezero_unmap(pagezero_cookie);

	printf (" size %d", pdc_minit.max_spa / (1024*1024));
	if (pdc_minit.max_spa % (1024*1024))
		printf (".%d", pdc_minit.max_spa % (1024*1024));
	printf ("MB");

	/* L2 cache controller is a part of the memory controller on PCXL2 */
	if (HPPA_PA_SPEC_MAJOR(hppa_cpu_info->hppa_cpu_info_pa_spec) == 1 &&
	    HPPA_PA_SPEC_MINOR(hppa_cpu_info->hppa_cpu_info_pa_spec) == 1 &&
	    HPPA_PA_SPEC_LETTER(hppa_cpu_info->hppa_cpu_info_pa_spec) == 'e') {
		sc->sc_l2 = (struct l2_mioc *)ca->ca_hpa;
#ifdef DEBUG
		bitmask_snprintf(sc->sc_l2->sltcv, SLTCV_BITS, bits,
				 sizeof(bits));
		printf(", sltcv %s", bits);
#endif
		/* sc->sc_l2->sltcv |= SLTCV_UP4COUT; */
		if (sc->sc_l2->sltcv & SLTCV_ENABLE) {
			uint32_t tagmask = sc->sc_l2->tagmask >> 20;
			printf(", %dMB L2 cache", tagmask + 1);
		}
Пример #13
0
/*
 * Pseudo (chained) interrupt from the esp driver to kick the
 * current running DMA transfer. Called from ncr53c9x_intr()
 * for now.
 *
 * return 1 if it was a DMA continue.
 */
int
lsi64854_scsi_intr(void *arg)
{
	struct lsi64854_softc *sc = arg;
	struct ncr53c9x_softc *nsc = sc->sc_client;
	char bits[64];
	int trans, resid;
	uint32_t csr;

	csr = L64854_GCSR(sc);

	DPRINTF(LDB_SCSI, ("%s: %s: addr 0x%x, csr %s\n",
	    device_xname(sc->sc_dev), __func__,
	    bus_space_read_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR),
	    bitmask_snprintf(csr, DDMACSR_BITS, bits, sizeof(bits))));

	if (csr & (D_ERR_PEND|D_SLAVE_ERR)) {
		printf("%s: error: csr=%s\n", device_xname(sc->sc_dev),
		    bitmask_snprintf(csr, DDMACSR_BITS, bits,sizeof(bits)));
		csr &= ~D_EN_DMA;	/* Stop DMA */
		/* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
		csr |= D_INVALIDATE|D_SLAVE_ERR;
		L64854_SCSR(sc, csr);
		return -1;
	}

	/* This is an "assertion" :) */
	if (sc->sc_active == 0)
		panic("%s: DMA wasn't active", __func__);

	DMA_DRAIN(sc, 0);

	/* DMA has stopped */
	csr &= ~D_EN_DMA;
	L64854_SCSR(sc, csr);
	sc->sc_active = 0;

	if (sc->sc_dmasize == 0) {
		/* A "Transfer Pad" operation completed */
		DPRINTF(LDB_SCSI, ("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
		    __func__,
		    NCR_READ_REG(nsc, NCR_TCL) |
		    (NCR_READ_REG(nsc, NCR_TCM) << 8),
		    NCR_READ_REG(nsc, NCR_TCL),
		    NCR_READ_REG(nsc, NCR_TCM)));
		return 0;
	}

	resid = 0;
	/*
	 * If a transfer onto the SCSI bus gets interrupted by the device
	 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
	 * as residual since the NCR53C9X counter registers get decremented
	 * as bytes are clocked into the FIFO.
	 */
	if (!(csr & D_WRITE) &&
	    (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
		DPRINTF(LDB_SCSI, ("%s: empty esp FIFO of %d ",
		    __func__, resid));
		if (nsc->sc_rev == NCR_VARIANT_FAS366 &&
		    (NCR_READ_REG(nsc, NCR_CFG3) & NCRFASCFG3_EWIDE))
			resid <<= 1;
	}

	if ((nsc->sc_espstat & NCRSTAT_TC) == 0) {
		/*
		 * `Terminal count' is off, so read the residue
		 * out of the NCR53C9X counter registers.
		 */
		resid += (NCR_READ_REG(nsc, NCR_TCL) |
			  (NCR_READ_REG(nsc, NCR_TCM) << 8) |
			   ((nsc->sc_cfg2 & NCRCFG2_FE) ?
			    (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0));

		if (resid == 0 && sc->sc_dmasize == 65536 &&
		    (nsc->sc_cfg2 & NCRCFG2_FE) == 0)
			/* A transfer of 64K is encoded as `TCL=TCM=0' */
			resid = 65536;
	}

	trans = sc->sc_dmasize - resid;
	if (trans < 0) {			/* transferred < 0 ? */
#if 0
		/*
		 * This situation can happen in perfectly normal operation
		 * if the ESP is reselected while using DMA to select
		 * another target.  As such, don't print the warning.
		 */
		printf("%s: xfer (%d) > req (%d)\n",
		    device_xname(&sc->sc_dev), trans, sc->sc_dmasize);
#endif
		trans = sc->sc_dmasize;
	}

	DPRINTF(LDB_SCSI, ("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
	    __func__,
	    NCR_READ_REG(nsc, NCR_TCL),
	    NCR_READ_REG(nsc, NCR_TCM),
	    (nsc->sc_cfg2 & NCRCFG2_FE) ?
	    NCR_READ_REG(nsc, NCR_TCH) : 0,
	    trans, resid));

	if (sc->sc_dmamap->dm_nsegs > 0) {
		bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize,
		    (csr & D_WRITE) != 0 ?
		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap);
	}

	*sc->sc_dmalen -= trans;
	*sc->sc_dmaaddr += trans;

#if 0	/* this is not normal operation just yet */
	if (*sc->sc_dmalen == 0 ||
	    nsc->sc_phase != nsc->sc_prevphase)
		return 0;

	/* and again */
	dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE);
	return 1;
#endif
	return 0;
}
Пример #14
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
	caddr_t v, v2;
	unsigned long sz;
	int x;
	vaddr_t minaddr, maxaddr;
	vsize_t size;
	char buf[160];				/* about 2 line */
	char pbuf[9];

	/*
	 * Initialize error message buffer (et end of core).
	 */
	msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_64_round_page(MSGBUFSIZE));
	if (msgbuf_vaddr == 0)
		panic("failed to valloc msgbuf_vaddr");

	/* msgbuf_paddr was init'd in pmap */
	for (x = 0; x < btoc(MSGBUFSIZE); x++)
		pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
		    msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);

	initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));

	printf("%s", version);

	printf("cpu0: %s", cpu_model);
	if (cpu_tsc_freq != 0)
		printf(", %ld.%02ld MHz", (cpu_tsc_freq + 4999) / 1000000,
		    ((cpu_tsc_freq + 4999) / 10000) % 100);
	printf("\n");

	if ((cpu_feature & CPUID_MASK1) != 0) {
		bitmask_snprintf(cpu_feature, CPUID_FLAGS1,
		    buf, sizeof(buf));
		printf("cpu0: features %s\n", buf);
	}
	if ((cpu_feature & CPUID_MASK2) != 0) {
		bitmask_snprintf(cpu_feature, CPUID_FLAGS2,
		    buf, sizeof(buf));
		printf("cpu0: features %s\n", buf);
	}

	if (cpuid_level >= 3 && ((cpu_feature & CPUID_PN) != 0)) {
		printf("cpu0: serial number %04X-%04X-%04X-%04X-%04X-%04X\n",
			cpu_serial[0] / 65536, cpu_serial[0] % 65536,
			cpu_serial[1] / 65536, cpu_serial[1] % 65536,
			cpu_serial[2] / 65536, cpu_serial[2] % 65536);
	}

	format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
	printf("total memory = %s\n", pbuf);

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = (unsigned long)allocsys(NULL, NULL);
	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	v2 = allocsys(v, NULL);
	if ((v2 - v) != sz)
		panic("startup: table size inconsistency");

	/*
	 * Allocate virtual address space for the buffers.  The area
	 * is not managed by the VM system.
	 */
	size = MAXBSIZE * nbuf;
	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
		    NULL, UVM_UNKNOWN_OFFSET, 0,
		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
				UVM_ADV_NORMAL, 0)) != 0)
		panic("cpu_startup: cannot allocate VM for buffers");
	minaddr = (vaddr_t)buffers;
	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
		/* don't want to alloc more physical mem than needed */
		bufpages = btoc(MAXBSIZE) * nbuf;
	}

	/*
	 * XXX We defer allocation of physical pages for buffers until
	 * XXX after autoconfiguration has run.  We must do this because
	 * XXX on system with large amounts of memory or with large
	 * XXX user-configured buffer caches, the buffer cache will eat
	 * XXX up all of the lower 16M of RAM.  This prevents ISA DMA
	 * XXX maps from allocating bounce pages.
	 *
	 * XXX Note that nothing can use buffer cache buffers until after
	 * XXX autoconfiguration completes!!
	 *
	 * XXX This is a hack, and needs to be replaced with a better
	 * XXX solution!  [email protected], December 6, 1997
	 */

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * Finally, allocate mbuf cluster submap.
	 */
	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);

	/*
	 * XXX Buffer cache pages haven't yet been allocated, so
	 * XXX we need to account for those pages when printing
	 * XXX the amount of free memory.
	 */
	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages));
	printf("avail memory = %s\n", pbuf);
	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);

	/* Safe for i/o port / memory space allocation to use malloc now. */
	x86_64_bus_space_mallocok();
}
Пример #15
0
static void
sw_attach(device_t parent, device_t self, void *aux)
{
	struct sw_softc *sc = device_private(self);
	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
	union obio_attach_args *uoba = aux;
	struct obio4_attach_args *oba = &uoba->uoba_oba4;
	bus_space_handle_t bh;
	char bits[64];
	int i;

	ncr_sc->sc_dev = self;
	sc->sc_dmatag = oba->oba_dmatag;

	/* Map the controller registers. */
	if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
			  SWREG_BANK_SZ,
			  BUS_SPACE_MAP_LINEAR,
			  &bh) != 0) {
		aprint_error(": cannot map registers\n");
		return;
	}

	ncr_sc->sc_regt = oba->oba_bustag;
	ncr_sc->sc_regh = bh;

	sc->sc_options = sw_options;

	ncr_sc->sc_dma_setup = sw_dma_setup;
	ncr_sc->sc_dma_start = sw_dma_start;
	ncr_sc->sc_dma_eop   = sw_dma_stop;
	ncr_sc->sc_dma_stop  = sw_dma_stop;
	ncr_sc->sc_intr_on   = sw_intr_on;
	ncr_sc->sc_intr_off  = sw_intr_off;

	/*
	 * Establish interrupt channel.
	 * Default interrupt priority always is 3.  At least, that's
	 * what my board seems to be at.  --thorpej
	 */
	if (oba->oba_pri == -1)
		oba->oba_pri = 3;

	(void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO,
				 sw_intr, sc);

	aprint_normal(" pri %d\n", oba->oba_pri);


	/*
	 * Pull in the options flags.  Allow the user to completely
	 * override the default values.
	 */
	if ((device_cfdata(self)->cf_flags & SW_OPTIONS_MASK) != 0)
		sc->sc_options =
		    device_cfdata(self)->cf_flags & SW_OPTIONS_MASK;

	/*
	 * Initialize fields used by the MI code
	 */

	/* NCR5380 register bank offsets */
	ncr_sc->sci_r0 = 0;
	ncr_sc->sci_r1 = 1;
	ncr_sc->sci_r2 = 2;
	ncr_sc->sci_r3 = 3;
	ncr_sc->sci_r4 = 4;
	ncr_sc->sci_r5 = 5;
	ncr_sc->sci_r6 = 6;
	ncr_sc->sci_r7 = 7;

	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;

	/*
	 * MD function pointers used by the MI code.
	 */
	ncr_sc->sc_pio_out = ncr5380_pio_out;
	ncr_sc->sc_pio_in =  ncr5380_pio_in;
	ncr_sc->sc_dma_alloc = sw_dma_alloc;
	ncr_sc->sc_dma_free  = sw_dma_free;
	ncr_sc->sc_dma_poll  = sw_dma_poll;

	ncr_sc->sc_flags = 0;
	if ((sc->sc_options & SW_DO_RESELECT) == 0)
		ncr_sc->sc_no_disconnect = 0xFF;
	if ((sc->sc_options & SW_DMA_INTR) == 0)
		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;


	/*
	 * Allocate DMA handles.
	 */
	i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
	sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
	if (sc->sc_dma == NULL)
		panic("sw: DMA handle malloc failed");

	for (i = 0; i < SCI_OPENINGS; i++) {
		sc->sc_dma[i].dh_flags = 0;

		/* Allocate a DMA handle */
		if (bus_dmamap_create(
				sc->sc_dmatag,	/* tag */
				MAXPHYS,	/* size */
				1,		/* nsegments */
				MAXPHYS,	/* maxsegsz */
				0,		/* boundary */
				BUS_DMA_NOWAIT,
				&sc->sc_dma[i].dh_dmamap) != 0) {

			aprint_error_dev(self, "DMA buffer map create error\n");
			return;
		}
	}

	if (sc->sc_options) {
		aprint_normal_dev(self, "options=%s\n",
		    bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
		    bits, sizeof(bits)));
	}

	ncr_sc->sc_channel.chan_id = 7;
	ncr_sc->sc_adapter.adapt_minphys = sw_minphys;

	/* Initialize sw board */
	sw_reset_adapter(ncr_sc);

	/* Attach the ncr5380 chip driver */
	ncr5380_attach(ncr_sc);
}