Пример #1
0
static int
advisaprobe(struct isa_device *id)
{
	int	port_index;
	int	max_port_index;

	/*
	 * Default to scanning all possible device locations.
	 */
	port_index = 0;
	max_port_index = MAX_ISA_IOPORT_INDEX;

	if (id->id_iobase > 0) {
		for (;port_index <= max_port_index; port_index++)
			if (id->id_iobase <= adv_isa_ioports[port_index])
				break;
		if ((port_index > max_port_index)
		 || (id->id_iobase != adv_isa_ioports[port_index])) {
			printf("adv%d: Invalid baseport of 0x%x specified. "
				"Neerest valid baseport is 0x%x.  Failing "
				"probe.\n", id->id_unit, id->id_iobase,
				(port_index <= max_port_index) ?
					adv_isa_ioports[port_index] :
					adv_isa_ioports[max_port_index]);
			return 0;
		}
		max_port_index = port_index;
	}

	/* Perform the actual probing */
	adv_set_isapnp_wait_for_key();
	for (;port_index <= max_port_index; port_index++) {
		u_int16_t port_addr = adv_isa_ioports[port_index];
		bus_size_t maxsegsz;
		bus_size_t maxsize;
		bus_addr_t lowaddr;
		int error;

		if (port_addr == 0)
			/* Already been attached */
			continue;
		id->id_iobase = port_addr;
		if (haveseen_isadev(id, CC_IOADDR | CC_QUIET))
			continue;

		if (adv_find_signature(I386_BUS_SPACE_IO, port_addr)) {
			/*
			 * Got one.  Now allocate our softc
			 * and see if we can initialize the card.
			 */
			struct adv_softc *adv;
			adv = adv_alloc(id->id_unit, I386_BUS_SPACE_IO,
					port_addr);
			if (adv == NULL)
				return (0);

			adv_unit++;

			id->id_iobase = adv->bsh;

			/*
			 * Stop the chip.
			 */
			ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
			ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
			/*
			 * Determine the chip version.
			 */
			adv->chip_version = ADV_INB(adv,
						    ADV_NONEISA_CHIP_REVISION);
			if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL)
			 && (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) {
				adv->type = ADV_VL;
				maxsegsz = ADV_VL_MAX_DMA_COUNT;
				maxsize = BUS_SPACE_MAXSIZE_32BIT;
				lowaddr = ADV_VL_MAX_DMA_ADDR;
				id->id_drq = -1;				
			} else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA)
				&& (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) {
				if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) {
					adv->type = ADV_ISAPNP;
					ADV_OUTB(adv, ADV_REG_IFC,
						 ADV_IFC_INIT_DEFAULT);
				} else {
					adv->type = ADV_ISA;
				}
				maxsegsz = ADV_ISA_MAX_DMA_COUNT;
				maxsize = BUS_SPACE_MAXSIZE_24BIT;
				lowaddr = ADV_ISA_MAX_DMA_ADDR;
				adv->isa_dma_speed = ADV_DEF_ISA_DMA_SPEED;
				adv->isa_dma_channel =
				    adv_get_isa_dma_channel(adv);
				id->id_drq = adv->isa_dma_channel;
			} else {
				panic("advisaprobe: Unknown card revision\n");
			}

			/*
			 * Allocate a parent dmatag for all tags created
			 * by the MI portions of the advansys driver
			 */
			/* XXX Should be a child of the ISA bus dma tag */ 
			error =
			    bus_dma_tag_create(/*parent*/NULL,
					       /*alignemnt*/0,
					       /*boundary*/0,
					       lowaddr,
					       /*highaddr*/BUS_SPACE_MAXADDR,
					       /*filter*/NULL,
					       /*filterarg*/NULL,
					       maxsize,
					       /*nsegs*/BUS_SPACE_UNRESTRICTED,
					       maxsegsz,
					       /*flags*/0,
					       &adv->parent_dmat); 
 
			if (error != 0) {
				printf("%s: Could not allocate DMA tag - error %d\n",
				       adv_name(adv), error); 
				adv_free(adv); 
				return (0); 
			}

			adv->init_level++;

			if (overrun_buf == NULL) {
				/* Need to allocate our overrun buffer */
				if (bus_dma_tag_create(adv->parent_dmat,
						       /*alignment*/8,
						       /*boundary*/0,
						       ADV_ISA_MAX_DMA_ADDR,
						       BUS_SPACE_MAXADDR,
						       /*filter*/NULL,
						       /*filterarg*/NULL,
						       ADV_OVERRUN_BSIZE,
						       /*nsegments*/1,
						       BUS_SPACE_MAXSIZE_32BIT,
						       /*flags*/0,
						       &overrun_dmat) != 0) {
					adv_free(adv);
					return (0);
        			}
				if (bus_dmamem_alloc(overrun_dmat,
						     (void **)&overrun_buf,
						     BUS_DMA_NOWAIT,
						     &overrun_dmamap) != 0) {
					bus_dma_tag_destroy(overrun_dmat);
					adv_free(adv);
					return (0);
				}
				/* And permanently map it in */  
				bus_dmamap_load(overrun_dmat, overrun_dmamap,
						overrun_buf, ADV_OVERRUN_BSIZE,
                        			adv_map, &overrun_physbase,
						/*flags*/0);
			}

			adv->overrun_physbase = overrun_physbase;
			
			if (adv_init(adv) != 0) {
				adv_free(adv);
				return (0);
			}

			switch (adv->type) {
			case ADV_ISAPNP:
				if (adv->chip_version == ADV_CHIP_VER_ASYN_BUG){
					adv->bug_fix_control
					    |= ADV_BUG_FIX_ASYN_USE_SYN;
					adv->fix_asyn_xfer = ~0;
				}
				/* Fall Through */
			case ADV_ISA:
				adv->max_dma_count = ADV_ISA_MAX_DMA_COUNT;
				adv->max_dma_addr = ADV_ISA_MAX_DMA_ADDR;
				adv_set_isa_dma_settings(adv);
				break;

			case ADV_VL:
				adv->max_dma_count = ADV_VL_MAX_DMA_COUNT;
				adv->max_dma_addr = ADV_VL_MAX_DMA_ADDR;
				break;
			default:
				panic("advisaprobe: Invalid card type\n");
			}
			
			/* Determine our IRQ */
			if (id->id_irq == 0 /* irq ? */)
				id->id_irq = 1 << adv_get_chip_irq(adv);
			else
				adv_set_chip_irq(adv, ffs(id->id_irq) - 1);

			id->id_intr = adv_isa_intr;
			
			/* Mark as probed */
			adv_isa_ioports[port_index] = 0;
			return 1;
		}
	}

	return 0;
}
Пример #2
0
static int
at91_usart_bus_attach(struct uart_softc *sc)
{
	int err;
	int i;
	struct at91_usart_softc *atsc;

	atsc = (struct at91_usart_softc *)sc;

	if (at91_usart_requires_rts0_workaround(sc))
		atsc->flags |= USE_RTS0_WORKAROUND;

	/*
	 * See if we have a TIMEOUT bit.  We disable all interrupts as
	 * a side effect.  Boot loaders may have enabled them.  Since
	 * a TIMEOUT interrupt can't happen without other setup, the
	 * apparent race here can't actually happen.
	 */
	WR4(&sc->sc_bas, USART_IDR, 0xffffffff);
	WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT);
	if (RD4(&sc->sc_bas, USART_IMR) & USART_CSR_TIMEOUT)
		atsc->flags |= HAS_TIMEOUT;
	WR4(&sc->sc_bas, USART_IDR, 0xffffffff);

	/*
	 * Allocate transmit DMA tag and map.  We allow a transmit buffer
	 * to be any size, but it must map to a single contiguous physical
	 * extent.
	 */
	err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    BUS_SPACE_MAXSIZE_32BIT, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
	    NULL, &atsc->tx_tag);
	if (err != 0)
		goto errout;
	err = bus_dmamap_create(atsc->tx_tag, 0, &atsc->tx_map);
	if (err != 0)
		goto errout;

	if (atsc->flags & HAS_TIMEOUT) {
		/*
		 * Allocate receive DMA tags, maps, and buffers.
		 * The receive buffers should be aligned to arm_dcache_align,
		 * otherwise partial cache line flushes on every receive
		 * interrupt are pretty much guaranteed.
		 */
		err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
		    arm_dcache_align, 0, BUS_SPACE_MAXADDR_32BIT,
		    BUS_SPACE_MAXADDR, NULL, NULL, sc->sc_rxfifosz, 1,
		    sc->sc_rxfifosz, BUS_DMA_ALLOCNOW, NULL, NULL,
		    &atsc->rx_tag);
		if (err != 0)
			goto errout;
		for (i = 0; i < 2; i++) {
			err = bus_dmamem_alloc(atsc->rx_tag,
			    (void **)&atsc->ping_pong[i].buffer,
			    BUS_DMA_NOWAIT, &atsc->ping_pong[i].map);
			if (err != 0)
				goto errout;
			err = bus_dmamap_load(atsc->rx_tag,
			    atsc->ping_pong[i].map,
			    atsc->ping_pong[i].buffer, sc->sc_rxfifosz,
			    at91_getaddr, &atsc->ping_pong[i].pa, 0);
			if (err != 0)
				goto errout;
			bus_dmamap_sync(atsc->rx_tag, atsc->ping_pong[i].map,
			    BUS_DMASYNC_PREREAD);
		}
		atsc->ping = &atsc->ping_pong[0];
		atsc->pong = &atsc->ping_pong[1];
	}

	/* Turn on rx and tx */
	DELAY(1000);		/* Give pending character a chance to drain.  */
	WR4(&sc->sc_bas, USART_CR, USART_CR_RSTSTA | USART_CR_RSTRX | USART_CR_RSTTX);
	WR4(&sc->sc_bas, USART_CR, USART_CR_RXEN | USART_CR_TXEN);

	/*
	 * Setup the PDC to receive data.  We use the ping-pong buffers
	 * so that we can more easily bounce between the two and so that
	 * we get an interrupt 1/2 way through the software 'fifo' we have
	 * to avoid overruns.
	 */
	if (atsc->flags & HAS_TIMEOUT) {
		WR4(&sc->sc_bas, PDC_RPR, atsc->ping->pa);
		WR4(&sc->sc_bas, PDC_RCR, sc->sc_rxfifosz);
		WR4(&sc->sc_bas, PDC_RNPR, atsc->pong->pa);
		WR4(&sc->sc_bas, PDC_RNCR, sc->sc_rxfifosz);
		WR4(&sc->sc_bas, PDC_PTCR, PDC_PTCR_RXTEN);

		/*
		 * Set the receive timeout to be 1.5 character times
		 * assuming 8N1.
		 */
		WR4(&sc->sc_bas, USART_RTOR, 15);
		WR4(&sc->sc_bas, USART_CR, USART_CR_STTTO);
		WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT |
		    USART_CSR_RXBUFF | USART_CSR_ENDRX);
	} else {
		WR4(&sc->sc_bas, USART_IER, USART_CSR_RXRDY);
	}
	WR4(&sc->sc_bas, USART_IER, USART_CSR_RXBRK | USART_DCE_CHANGE_BITS);

	/* Prime sc->hwsig with the initial hw line states. */
	at91_usart_bus_getsig(sc);

errout:
	return (err);
}
static void
iavc_pci_attach(struct device * parent,
	struct device * self, void *aux)
{
	struct iavc_pci_softc *psc = (void *) self;
	struct iavc_softc *sc = (void *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	const struct iavc_pci_product *pp;
	pci_intr_handle_t ih;
	const char *intrstr;
	int ret;

	pp = find_cardname(pa);
	if (pp == NULL)
		return;

	sc->sc_t1 = 0;
	sc->sc_dma = 0;
	sc->dmat = pa->pa_dmat;

	iavc_b1dma_reset(sc);

	if (pci_mapreg_map(pa, IAVC_PCI_IOBA, PCI_MAPREG_TYPE_IO, 0,
		&sc->sc_io_bt, &sc->sc_io_bh, &psc->io_base, &psc->io_size)) {
		aprint_error(": unable to map i/o registers\n");
		return;
	}

	if (pci_mapreg_map(pa, IAVC_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0,
	     &sc->sc_mem_bt, &sc->sc_mem_bh, &psc->mem_base, &psc->mem_size)) {
		aprint_error(": unable to map mem registers\n");
		return;
	}
	aprint_normal(": %s\n", pp->name);

	if (pp->npp_product == PCI_PRODUCT_AVM_T1) {
		aprint_error_dev(&sc->sc_dev, "sorry, PRI not yet supported\n");
		return;

#if 0
		sc->sc_capi.card_type = CARD_TYPEC_AVM_T1_PCI;
		sc->sc_capi.sc_nbch = NBCH_PRI;
		ret = iavc_t1_detect(sc);
		if (ret) {
			if (ret < 6) {
				aprint_error_dev(&sc->sc_dev, "no card detected?\n");
			} else {
				aprint_error_dev(&sc->sc_dev, "black box not on\n");
			}
			return;
		} else {
			sc->sc_dma = 1;
			sc->sc_t1 = 1;
		}
#endif

	} else if (pp->npp_product == PCI_PRODUCT_AVM_B1) {
		sc->sc_capi.card_type = CARD_TYPEC_AVM_B1_PCI;
		sc->sc_capi.sc_nbch = NBCH_BRI;
		ret = iavc_b1dma_detect(sc);
		if (ret) {
			ret = iavc_b1_detect(sc);
			if (ret) {
				aprint_error_dev(&sc->sc_dev, "no card detected?\n");
				return;
			}
		} else {
			sc->sc_dma = 1;
		}
	}
	if (sc->sc_dma)
		iavc_b1dma_reset(sc);

#if 0
	/*
         * XXX: should really be done this way, but this freezes the card
         */
	if (sc->sc_t1)
		iavc_t1_reset(sc);
	else
		iavc_b1_reset(sc);
#endif

	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(&sc->sc_dev, "couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, iavc_pci_intr, psc);
	if (psc->sc_ih == NULL) {
		aprint_error_dev(&sc->sc_dev, "couldn't establish interrupt");
		if (intrstr != NULL)
			aprint_normal(" at %s", intrstr);
		aprint_normal("\n");
		return;
	}
	psc->sc_pc = pc;
	aprint_normal("%s: interrupting at %s\n", device_xname(&sc->sc_dev), intrstr);

	memset(&sc->sc_txq, 0, sizeof(struct ifqueue));
	sc->sc_txq.ifq_maxlen = sc->sc_capi.sc_nbch * 4;

	sc->sc_intr = 0;
	sc->sc_state = IAVC_DOWN;
	sc->sc_blocked = 0;

	/* setup capi link */
	sc->sc_capi.load = iavc_load;
	sc->sc_capi.reg_appl = iavc_register;
	sc->sc_capi.rel_appl = iavc_release;
	sc->sc_capi.send = iavc_send;
	sc->sc_capi.ctx = (void *) sc;

	/* lock & load DMA for TX */
	if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
	    &sc->txseg, 1, &sc->ntxsegs, BUS_DMA_ALLOCNOW)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't allocate tx DMA memory, error = %d\n",
		    ret);
		goto fail1;
	}

	if ((ret = bus_dmamem_map(sc->dmat, &sc->txseg, sc->ntxsegs,
	    IAVC_DMA_SIZE, &sc->sc_sendbuf, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't map tx DMA memory, error = %d\n",
		    ret);
		goto fail2;
	}

	if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1,
	    IAVC_DMA_SIZE, 0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
	    &sc->tx_map)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't create tx DMA map, error = %d\n",
		    ret);
		goto fail3;
	}

	if ((ret = bus_dmamap_load(sc->dmat, sc->tx_map, sc->sc_sendbuf,
	    IAVC_DMA_SIZE, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't load tx DMA map, error = %d\n",
		    ret);
		goto fail4;
	}

	/* do the same for RX */
	if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
	    &sc->rxseg, 1, &sc->nrxsegs, BUS_DMA_ALLOCNOW)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't allocate rx DMA memory, error = %d\n",
		    ret);
		goto fail5;
	}

	if ((ret = bus_dmamem_map(sc->dmat, &sc->rxseg, sc->nrxsegs,
	    IAVC_DMA_SIZE, &sc->sc_recvbuf, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't map rx DMA memory, error = %d\n",
		    ret);
		goto fail6;
	}

	if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1, IAVC_DMA_SIZE,
	    0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT, &sc->rx_map)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't create rx DMA map, error = %d\n",
		    ret);
		goto fail7;
	}

	if ((ret = bus_dmamap_load(sc->dmat, sc->rx_map, sc->sc_recvbuf,
	    IAVC_DMA_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't load rx DMA map, error = %d\n",
		    ret);
		goto fail8;
	}

	if (capi_ll_attach(&sc->sc_capi, device_xname(&sc->sc_dev), pp->name)) {
		aprint_error_dev(&sc->sc_dev, "capi attach failed\n");
		goto fail9;
	}
	return;

	/* release resources in case of failed attach */
fail9:
	bus_dmamap_unload(sc->dmat, sc->rx_map);
fail8:
	bus_dmamap_destroy(sc->dmat, sc->rx_map);
fail7:
	bus_dmamem_unmap(sc->dmat, sc->sc_recvbuf, IAVC_DMA_SIZE);
fail6:
	bus_dmamem_free(sc->dmat, &sc->rxseg, sc->nrxsegs);
fail5:
	bus_dmamap_unload(sc->dmat, sc->tx_map);
fail4:
	bus_dmamap_destroy(sc->dmat, sc->tx_map);
fail3:
	bus_dmamem_unmap(sc->dmat, sc->sc_sendbuf, IAVC_DMA_SIZE);
fail2:
	bus_dmamem_free(sc->dmat, &sc->txseg, sc->ntxsegs);
fail1:
	pci_intr_disestablish(psc->sc_pc, psc->sc_ih);

	return;
}
Пример #4
0
static int
tws_init(struct tws_softc *sc)
{

    u_int32_t max_sg_elements;
    u_int32_t dma_mem_size;
    int error;
    u_int32_t reg;

    sc->seq_id = 0;
    if ( tws_queue_depth > TWS_MAX_REQS )
        tws_queue_depth = TWS_MAX_REQS;
    if (tws_queue_depth < TWS_RESERVED_REQS+1)
        tws_queue_depth = TWS_RESERVED_REQS+1;
    sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
    max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ? 
                                 TWS_MAX_64BIT_SG_ELEMENTS : 
                                 TWS_MAX_32BIT_SG_ELEMENTS;
    dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
                             (TWS_SECTOR_SIZE) ;
    if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */ 
                            TWS_ALIGNMENT,           /* alignment */
                            0,                       /* boundary */
                            BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
                            BUS_SPACE_MAXADDR,       /* highaddr */
                            NULL, NULL,              /* filter, filterarg */
                            BUS_SPACE_MAXSIZE,       /* maxsize */
                            max_sg_elements,         /* numsegs */
                            BUS_SPACE_MAXSIZE,       /* maxsegsize */
                            0,                       /* flags */
                            NULL, NULL,              /* lockfunc, lockfuncarg */
                            &sc->parent_tag          /* tag */
                           )) {
        TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements, 
                                                    sc->is64bit);
        return(ENOMEM);
    }
    /* In bound message frame requires 16byte alignment.
     * Outbound MF's can live with 4byte alignment - for now just 
     * use 16 for both.
     */
    if ( bus_dma_tag_create(sc->parent_tag,       /* parent */          
                            TWS_IN_MF_ALIGNMENT,  /* alignment */
                            0,                    /* boundary */
                            BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
                            BUS_SPACE_MAXADDR,    /* highaddr */
                            NULL, NULL,           /* filter, filterarg */
                            dma_mem_size,         /* maxsize */
                            1,                    /* numsegs */
                            BUS_SPACE_MAXSIZE,    /* maxsegsize */
                            0,                    /* flags */
                            NULL, NULL,           /* lockfunc, lockfuncarg */
                            &sc->cmd_tag          /* tag */
                           )) {
        TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
        return(ENOMEM);
    }

    if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
                    BUS_DMA_NOWAIT, &sc->cmd_map)) {
        TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
        return(ENOMEM);
    }

    /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
    sc->dma_mem_phys=0;
    error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
                    dma_mem_size, tws_dmamap_cmds_load_cbfn,
                    &sc->dma_mem_phys, 0);

   /*
    * Create a dma tag for data buffers; size will be the maximum
    * possible I/O size (128kB).
    */
    if (bus_dma_tag_create(sc->parent_tag,         /* parent */
                           TWS_ALIGNMENT,          /* alignment */
                           0,                      /* boundary */
                           BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
                           BUS_SPACE_MAXADDR,      /* highaddr */
                           NULL, NULL,             /* filter, filterarg */
                           TWS_MAX_IO_SIZE,        /* maxsize */
                           max_sg_elements,        /* nsegments */
                           TWS_MAX_IO_SIZE,        /* maxsegsize */
                           BUS_DMA_ALLOCNOW,       /* flags */
                           busdma_lock_mutex,      /* lockfunc */
                           &sc->io_lock,           /* lockfuncarg */
                           &sc->data_tag           /* tag */)) {
        TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
        return(ENOMEM);
    }

    sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
                      M_WAITOK | M_ZERO);
    if ( sc->reqs == NULL ) {
        TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
        return(ENOMEM);
    }
    sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
                      M_WAITOK | M_ZERO);
    if ( sc->sense_bufs == NULL ) {
        TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit);
        return(ENOMEM);
    }
    sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
    if ( sc->scan_ccb == NULL ) {
        TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit);
        return(ENOMEM);
    }
    if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
            (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
        device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
        return(ENOMEM);
    }

    if ( !tws_ctlr_ready(sc) )
        if( !tws_ctlr_reset(sc) )
            return(FAILURE);
    
    bzero(&sc->stats, sizeof(struct tws_stats));
    tws_init_qs(sc);
    tws_turn_off_interrupts(sc);

    /* 
     * enable pull mode by setting bit1 .
     * setting bit0 to 1 will enable interrupt coalesing 
     * will revisit. 
     */

#ifdef TWS_PULL_MODE_ENABLE

    reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
    TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
    tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);

#endif

    TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
    if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
        return(FAILURE);
    if ( tws_init_aen_q(sc) == FAILURE )
        return(FAILURE);

    return(SUCCESS);
    
} 
Пример #5
0
static void
bce_attach(device_t parent, device_t self, void *aux)
{
	struct bce_softc *sc = device_private(self);
	struct pci_attach_args *pa = aux;
	const struct bce_product *bp;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	uint32_t	command;
	pcireg_t	memtype, pmode;
	bus_addr_t	memaddr;
	bus_size_t	memsize;
	void		*kva;
	bus_dma_segment_t seg;
	int             error, i, pmreg, rseg;
	struct ifnet   *ifp;
	char intrbuf[PCI_INTRSTR_LEN];

	sc->bce_dev = self;

	bp = bce_lookup(pa);
	KASSERT(bp != NULL);

	sc->bce_pa = *pa;

	/* BCM440x can only address 30 bits (1GB) */
	if (bus_dmatag_subregion(pa->pa_dmat, 0, (1 << 30),
	    &(sc->bce_dmatag), BUS_DMA_NOWAIT) != 0) {
		aprint_error_dev(self,
		    "WARNING: failed to restrict dma range,"
		    " falling back to parent bus dma range\n");
		sc->bce_dmatag = pa->pa_dmat;
	}

	 aprint_naive(": Ethernet controller\n");
	 aprint_normal(": %s\n", bp->bp_name);

	/*
	 * Map control/status registers.
	 */
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);

	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
		aprint_error_dev(self, "failed to enable memory mapping!\n");
		return;
	}
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	switch (memtype) {
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
		if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
		    &sc->bce_bhandle, &memaddr, &memsize) == 0)
			break;
	default:
		aprint_error_dev(self, "unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, NULL)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			aprint_error_dev(self,
			    "unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			aprint_normal_dev(self,
			    "waking up from power state D%d\n", pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}
	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(self, "couldn't map interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));

	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc);

	if (sc->bce_intrhand == NULL) {
		aprint_error_dev(self, "couldn't establish interrupt\n");
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		return;
	}
	aprint_normal_dev(self, "interrupting at %s\n", intrstr);

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to alloc space for ring descriptors, error = %d\n",
		    error);
		return;
	}
	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to map DMA buffers, error = %d\n", error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
	    &sc->bce_ring_map))) {
		aprint_error_dev(self,
		    "unable to create ring DMA map, error = %d\n", error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) ((char *)kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create tx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create rx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->ethercom.ec_if;
	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	ifp->if_stop = bce_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/* Initialize our media structures and probe the MII. */

	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;

	sc->ethercom.ec_mii = &sc->bce_mii;
	ifmedia_init(&sc->bce_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, MIIF_FORCEANEG|MIIF_DOPAUSE);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_PHY) & 0x1f;
	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write(sc->bce_dev, 1, 26,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 26) & 0x7fff);	 /* MAGIC */
	/* enable traffic meter led mode */
	bce_mii_write(sc->bce_dev, 1, 27,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	sc->enaddr[0] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET0);
	sc->enaddr[1] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET1);
	sc->enaddr[2] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET2);
	sc->enaddr[3] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET3);
	sc->enaddr[4] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET4);
	sc->enaddr[5] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET5);
	aprint_normal_dev(self, "Ethernet address %s\n",
	    ether_sprintf(sc->enaddr));
	ether_ifattach(ifp, sc->enaddr);
	rnd_attach_source(&sc->rnd_source, device_xname(self),
	    RND_TYPE_NET, 0);
	callout_init(&sc->bce_timeout, 0);

	if (pmf_device_register(self, NULL, bce_resume))
		pmf_class_network_register(self, ifp);
	else
		aprint_error_dev(self, "couldn't establish power handler\n");
}
Пример #6
0
/*
 * ae_attach:
 *
 *	Attach an ae interface to the system.
 */
void
ae_attach(device_t parent, device_t self, void *aux)
{
	const uint8_t *enaddr;
	prop_data_t ea;
	struct ae_softc *sc = device_private(self);
	struct arbus_attach_args *aa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int i, error;

	sc->sc_dev = self;

	callout_init(&sc->sc_tick_callout, 0);

	printf(": Atheros AR531X 10/100 Ethernet\n");

	/*
	 * Try to get MAC address.
	 */
	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
	if (ea == NULL) {
		printf("%s: unable to get mac-addr property\n",
		    device_xname(sc->sc_dev));
		return;
	}
	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
	enaddr = prop_data_data_nocopy(ea);

	/* Announce ourselves. */
	printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
	    ether_sprintf(enaddr));

	sc->sc_cirq = aa->aa_cirq;
	sc->sc_mirq = aa->aa_mirq;
	sc->sc_st = aa->aa_bst;
	sc->sc_dmat = aa->aa_dmat;

	SIMPLEQ_INIT(&sc->sc_txfreeq);
	SIMPLEQ_INIT(&sc->sc_txdirtyq);

	/*
	 * Map registers.
	 */
	sc->sc_size = aa->aa_size;
	if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
	    &sc->sc_sh)) != 0) {
		printf("%s: unable to map registers, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_0;
	}

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
	    1, &sc->sc_cdnseg, 0)) != 0) {
		printf("%s: unable to allocate control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_1;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
	    sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_2;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct ae_control_data), 1,
	    sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("%s: unable to create control data DMA map, "
		    "error = %d\n", device_xname(sc->sc_dev), error);
		goto fail_3;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct ae_control_data), NULL,
	    0)) != 0) {
		printf("%s: unable to load control data DMA map, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_4;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		    AE_NTXSEGS, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
			printf("%s: unable to create tx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_5;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < AE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
			printf("%s: unable to create rx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_6;
		}
		sc->sc_rxsoft[i].rxs_mbuf = NULL;
	}

	/*
	 * Reset the chip to a known state.
	 */
	ae_reset(sc);

	/*
	 * From this point forward, the attachment cannot fail.  A failure
	 * before this point releases all resources that may have been
	 * allocated.
	 */
	sc->sc_flags |= AE_ATTACHED;

	/*
	 * Initialize our media structures.  This may probe the MII, if
	 * present.
	 */
	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = ae_mii_readreg;
	sc->sc_mii.mii_writereg = ae_mii_writereg;
	sc->sc_mii.mii_statchg = ae_mii_statchg;
	sc->sc_ethercom.ec_mii = &sc->sc_mii;
	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	sc->sc_tick = ae_mii_tick;

	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	sc->sc_if_flags = ifp->if_flags;
	ifp->if_ioctl = ae_ioctl;
	ifp->if_start = ae_start;
	ifp->if_watchdog = ae_watchdog;
	ifp->if_init = ae_init;
	ifp->if_stop = ae_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);
	ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);

	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
	    RND_TYPE_NET, RND_FLAG_DEFAULT);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		printf("%s: WARNING: unable to establish shutdown hook\n",
		    device_xname(sc->sc_dev));

	/*
	 * Add a suspend hook to make sure we come back up after a
	 * resume.
	 */
	sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev),
	    ae_power, sc);
	if (sc->sc_powerhook == NULL)
		printf("%s: WARNING: unable to establish power hook\n",
		    device_xname(sc->sc_dev));
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < AE_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].rxs_dmamap);
	}
 fail_5:
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if (sc->sc_txsoft[i].txs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_4:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
	    sizeof(struct ae_control_data));
 fail_2:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
 fail_1:
	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
 fail_0:
	return;
}
Пример #7
0
/*
 * Attach this instance, and then all the sub-devices
 */
static void
pcscp_attach(device_t parent, device_t self, void *aux)
{
	struct pcscp_softc *esc = device_private(self);
	struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x;
	struct pci_attach_args *pa = aux;
	bus_space_tag_t iot;
	bus_space_handle_t ioh;
	pci_intr_handle_t ih;
	const char *intrstr;
	pcireg_t csr;
	bus_dma_segment_t seg;
	int error, rseg;

	sc->sc_dev = self;
	pci_aprint_devinfo(pa, NULL);
	aprint_normal("%s", device_xname(sc->sc_dev));

	if (pci_mapreg_map(pa, IO_MAP_REG, PCI_MAPREG_TYPE_IO, 0,
	    &iot, &ioh, NULL, NULL)) {
		aprint_error(": unable to map registers\n");
		return;
	}

	sc->sc_glue = &pcscp_glue;

	esc->sc_st = iot;
	esc->sc_sh = ioh;
	esc->sc_dmat = pa->pa_dmat;

	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
	    csr | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE);

	/*
	 * XXX More of this should be in ncr53c9x_attach(), but
	 * XXX should we really poke around the chip that much in
	 * XXX the MI code?  Think about this more...
	 */

	/*
	 * Set up static configuration info.
	 */

	/*
	 * XXX should read configuration from EEPROM?
	 *
	 * MI ncr53c9x driver does not support configuration
	 * per each target device, though...
	 */
	sc->sc_id = 7;
	sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB;
	sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE;
	sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK;
	sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE;
	sc->sc_rev = NCR_VARIANT_AM53C974;
	sc->sc_features = NCR_F_FASTSCSI;
	sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI;
	sc->sc_freq = 40; /* MHz */

	/*
	 * XXX minsync and maxxfer _should_ be set up in MI code,
	 * XXX but it appears to have some dependency on what sort
	 * XXX of DMA we're hooked up to, etc.
	 */

	/*
	 * This is the value used to start sync negotiations
	 * Note that the NCR register "SYNCTP" is programmed
	 * in "clocks per byte", and has a minimum value of 4.
	 * The SCSI period used in negotiation is one-fourth
	 * of the time (in nanoseconds) needed to transfer one byte.
	 * Since the chip's clock is given in MHz, we have the following
	 * formula: 4 * period = (1000 / freq) * 4
	 */

	sc->sc_minsync = 1000 / sc->sc_freq;

	/* Really no limit, but since we want to fit into the TCR... */
	sc->sc_maxxfer = 16 * 1024 * 1024;

	/*
	 * Create the DMA maps for the data transfers.
	 */

#define MDL_SEG_SIZE	0x1000 /* 4kbyte per segment */
#define MDL_SEG_OFFSET	0x0FFF
#define MDL_SIZE	(MAXPHYS / MDL_SEG_SIZE + 1) /* no hardware limit? */

	if (bus_dmamap_create(esc->sc_dmat, MAXPHYS, MDL_SIZE, MDL_SEG_SIZE,
	    MDL_SEG_SIZE, BUS_DMA_NOWAIT, &esc->sc_xfermap)) {
		aprint_error(": can't create DMA maps\n");
		return;
	}

	/*
	 * Allocate and map memory for the MDL.
	 */

	if ((error = bus_dmamem_alloc(esc->sc_dmat,
	    sizeof(uint32_t) * MDL_SIZE, PAGE_SIZE, 0, &seg, 1, &rseg,
	    BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": unable to allocate memory for the MDL,"
		    " error = %d\n", error);
		goto fail_0;
	}
	if ((error = bus_dmamem_map(esc->sc_dmat, &seg, rseg,
	    sizeof(uint32_t) * MDL_SIZE , (void **)&esc->sc_mdladdr,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": unable to map the MDL memory, error = %d\n",
		    error);
		goto fail_1;
	}
	if ((error = bus_dmamap_create(esc->sc_dmat,
	    sizeof(uint32_t) * MDL_SIZE, 1, sizeof(uint32_t) * MDL_SIZE,
	    0, BUS_DMA_NOWAIT, &esc->sc_mdldmap)) != 0) {
		aprint_error(": unable to map_create for the MDL, error = %d\n",
		    error);
		goto fail_2;
	}
	if ((error = bus_dmamap_load(esc->sc_dmat, esc->sc_mdldmap,
	     esc->sc_mdladdr, sizeof(uint32_t) * MDL_SIZE,
	     NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": unable to load for the MDL, error = %d\n",
		    error);
		goto fail_3;
	}

	/* map and establish interrupt */
	if (pci_intr_map(pa, &ih)) {
		aprint_error(": couldn't map interrupt\n");
		goto fail_4;
	}

	intrstr = pci_intr_string(pa->pa_pc, ih);
	esc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
	    ncr53c9x_intr, esc);
	if (esc->sc_ih == NULL) {
		aprint_error(": couldn't establish interrupt");
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		goto fail_4;
	}
	if (intrstr != NULL) {
		aprint_normal(": interrupting at %s\n", intrstr);
		aprint_normal("%s", device_xname(sc->sc_dev));
	}

	/* Do the common parts of attachment. */
	sc->sc_adapter.adapt_minphys = minphys;
	sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request;
	ncr53c9x_attach(sc);

	/* Turn on target selection using the `DMA' method */
	sc->sc_features |= NCR_F_DMASELECT;

	return;

 fail_4:
	bus_dmamap_unload(esc->sc_dmat, esc->sc_mdldmap);
 fail_3:
	bus_dmamap_destroy(esc->sc_dmat, esc->sc_mdldmap);
 fail_2:
	bus_dmamem_unmap(esc->sc_dmat, (void *)esc->sc_mdldmap,
	    sizeof(uint32_t) * MDL_SIZE);
 fail_1:
	bus_dmamem_free(esc->sc_dmat, &seg, rseg);
 fail_0:
	bus_dmamap_destroy(esc->sc_dmat, esc->sc_xfermap);
}
Пример #8
0
static int
zy7_devcfg_write(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct zy7_devcfg_softc *sc = dev->si_drv1;
	void *dma_mem;
	bus_addr_t dma_physaddr;
	int segsz, err;

	DEVCFG_SC_LOCK(sc);

	/* First write?  Reset PL. */
	if (uio->uio_offset == 0 && uio->uio_resid > 0)	{
		zy7_devcfg_init_hw(sc);
		zy7_slcr_preload_pl();
		err = zy7_devcfg_reset_pl(sc);
		if (err != 0) {
			DEVCFG_SC_UNLOCK(sc);
			return (err);
		}
	}

	/* Allocate dma memory and load. */
	err = bus_dmamem_alloc(sc->dma_tag, &dma_mem, BUS_DMA_NOWAIT,
			       &sc->dma_map);
	if (err != 0) {
		DEVCFG_SC_UNLOCK(sc);
		return (err);
	}
	err = bus_dmamap_load(sc->dma_tag, sc->dma_map, dma_mem, PAGE_SIZE,
			      zy7_dma_cb2, &dma_physaddr, 0);
	if (err != 0) {
		bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
		DEVCFG_SC_UNLOCK(sc);
		return (err);
	}

	while (uio->uio_resid > 0) {
		/* If DONE signal has been set, we shouldn't write anymore. */
		if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
		     ZY7_DEVCFG_INT_PCFG_DONE) != 0) {
			err = EIO;
			break;
		}

		/* uiomove the data from user buffer to our dma map. */
		segsz = MIN(PAGE_SIZE, uio->uio_resid);
		DEVCFG_SC_UNLOCK(sc);
		err = uiomove(dma_mem, segsz, uio);
		DEVCFG_SC_LOCK(sc);
		if (err != 0)
			break;

		/* Flush the cache to memory. */
		bus_dmamap_sync(sc->dma_tag, sc->dma_map,
				BUS_DMASYNC_PREWRITE);

		/* Program devcfg's DMA engine.  The ordering of these
		 * register writes is critical.
		 */
		if (uio->uio_resid > segsz)
			WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
			    (uint32_t) dma_physaddr);
		else
			WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
			    (uint32_t) dma_physaddr |
			    ZY7_DEVCFG_DMA_ADDR_WAIT_PCAP);
		WR4(sc, ZY7_DEVCFG_DMA_DST_ADDR, ZY7_DEVCFG_DMA_ADDR_ILLEGAL);
		WR4(sc, ZY7_DEVCFG_DMA_SRC_LEN, (segsz+3)/4);
		WR4(sc, ZY7_DEVCFG_DMA_DST_LEN, 0);

		/* Now clear done bit and set up DMA done interrupt. */
		WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
		WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_DMA_DONE);

		/* Wait for DMA done interrupt. */
		err = mtx_sleep(sc->dma_map, &sc->sc_mtx, PCATCH,
				"zy7dma", hz);
		if (err != 0)
			break;

		bus_dmamap_sync(sc->dma_tag, sc->dma_map,
				BUS_DMASYNC_POSTWRITE);

		/* Check DONE signal. */
		if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
		     ZY7_DEVCFG_INT_PCFG_DONE) != 0)
			zy7_slcr_postload_pl(zy7_en_level_shifters);
	}

	bus_dmamap_unload(sc->dma_tag, sc->dma_map);
	bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
	DEVCFG_SC_UNLOCK(sc);
	return (err);
}
Пример #9
0
/*
 * Setup for communication with the device.  We allocate
 * a command buffer and map it for bus dma use.  The pci
 * device id is used to identify whether the device has
 * SRAM on it (in which case f/w download must include a
 * memory controller reset).  All bus i/o operations happen
 * in BAR 1; the driver passes in the tag and handle we need.
 */
struct malo_hal *
malo_hal_attach(device_t dev, uint16_t devid,
    bus_space_handle_t ioh, bus_space_tag_t iot, bus_dma_tag_t tag)
{
	int error;
	struct malo_hal *mh;

	mh = malloc(sizeof(struct malo_hal), M_DEVBUF, M_NOWAIT | M_ZERO);
	if (mh == NULL)
		return NULL;

	mh->mh_dev = dev;
	mh->mh_ioh = ioh;
	mh->mh_iot = iot;

	snprintf(mh->mh_mtxname, sizeof(mh->mh_mtxname),
	    "%s_hal", device_get_nameunit(dev));
	mtx_init(&mh->mh_mtx, mh->mh_mtxname, NULL, MTX_DEF);

	/*
	 * Allocate the command buffer and map into the address
	 * space of the h/w.  We request "coherent" memory which
	 * will be uncached on some architectures.
	 */
	error = bus_dma_tag_create(tag,		/* parent */
		       PAGE_SIZE, 0,		/* alignment, bounds */
		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
		       BUS_SPACE_MAXADDR,	/* highaddr */
		       NULL, NULL,		/* filter, filterarg */
		       MALO_CMDBUF_SIZE,	/* maxsize */
		       1,			/* nsegments */
		       MALO_CMDBUF_SIZE,	/* maxsegsize */
		       BUS_DMA_ALLOCNOW,	/* flags */
		       NULL,			/* lockfunc */
		       NULL,			/* lockarg */
		       &mh->mh_dmat);
	if (error != 0) {
		device_printf(dev, "unable to allocate memory for cmd tag, "
			"error %u\n", error);
		goto fail;
	}

	/* allocate descriptors */
	error = bus_dmamap_create(mh->mh_dmat, BUS_DMA_NOWAIT, &mh->mh_dmamap);
	if (error != 0) {
		device_printf(dev, "unable to create dmamap for cmd buffers, "
			"error %u\n", error);
		goto fail;
	}

	error = bus_dmamem_alloc(mh->mh_dmat, (void**) &mh->mh_cmdbuf,
				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 
				 &mh->mh_dmamap);
	if (error != 0) {
		device_printf(dev, "unable to allocate memory for cmd buffer, "
			"error %u\n", error);
		goto fail;
	}

	error = bus_dmamap_load(mh->mh_dmat, mh->mh_dmamap,
				mh->mh_cmdbuf, MALO_CMDBUF_SIZE,
				malo_hal_load_cb, &mh->mh_cmdaddr,
				BUS_DMA_NOWAIT);
	if (error != 0) {
		device_printf(dev, "unable to load cmd buffer, error %u\n",
			error);
		goto fail;
	}

	return (mh);

fail:
	if (mh->mh_dmamap != NULL) {
		bus_dmamap_unload(mh->mh_dmat, mh->mh_dmamap);
		if (mh->mh_cmdbuf != NULL)
			bus_dmamem_free(mh->mh_dmat, mh->mh_cmdbuf,
			    mh->mh_dmamap);
		bus_dmamap_destroy(mh->mh_dmat, mh->mh_dmamap);
	}
	if (mh->mh_dmat)
		bus_dma_tag_destroy(mh->mh_dmat);
	free(mh, M_DEVBUF);

	return (NULL);
}
Пример #10
0
static int
le_isa_attach(device_t dev)
{
	struct le_isa_softc *lesc;
	struct lance_softc *sc;
	bus_size_t macstart, rap, rdp;
	int error, i, j, macstride;

	lesc = device_get_softc(dev);
	sc = &lesc->sc_am7990.lsc;

	LE_LOCK_INIT(sc, device_get_nameunit(dev));

	j = 0;
	switch (ISA_PNP_PROBE(device_get_parent(dev), dev, le_isa_ids)) {
	case 0:
		lesc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
		    &j, RF_ACTIVE);
		rap = PCNET_RAP;
		rdp = PCNET_RDP;
		macstart = 0;
		macstride = 1;
		break;
	case ENOENT:
		for (i = 0; i < sizeof(le_isa_params) /
		    sizeof(le_isa_params[0]); i++) {
			if (le_isa_probe_legacy(dev, &le_isa_params[i]) == 0) {
				lesc->sc_rres = bus_alloc_resource(dev,
				    SYS_RES_IOPORT, &j, 0, ~0,
				    le_isa_params[i].iosize, RF_ACTIVE);
				rap = le_isa_params[i].rap;
				rdp = le_isa_params[i].rdp;
				macstart = le_isa_params[i].macstart;
				macstride = le_isa_params[i].macstride;
				goto found;
			}
		}
		/* FALLTHROUGH */
	case ENXIO:
	default:
		device_printf(dev, "cannot determine chip\n");
		error = ENXIO;
		goto fail_mtx;
	}

 found:
	if (lesc->sc_rres == NULL) {
		device_printf(dev, "cannot allocate registers\n");
		error = ENXIO;
		goto fail_mtx;
	}
	lesc->sc_rap = rap;
	lesc->sc_rdp = rdp;

	i = 0;
	if ((lesc->sc_dres = bus_alloc_resource_any(dev, SYS_RES_DRQ,
	    &i, RF_ACTIVE)) == NULL) {
		device_printf(dev, "cannot allocate DMA channel\n");
		error = ENXIO;
		goto fail_rres;
	}

	i = 0;
	if ((lesc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &i, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
		device_printf(dev, "cannot allocate interrupt\n");
		error = ENXIO;
		goto fail_dres;
	}

	error = bus_dma_tag_create(
	    bus_get_dma_tag(dev),	/* parent */
	    1, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_24BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
	    0,				/* nsegments */
	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
	    0,				/* flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &lesc->sc_pdmat);
	if (error != 0) {
		device_printf(dev, "cannot allocate parent DMA tag\n");
		goto fail_ires;
	}

	sc->sc_memsize = LE_ISA_MEMSIZE;
	/*
	 * For Am79C90, Am79C961 and Am79C961A the init block must be 2-byte
	 * aligned and the ring descriptors must be 8-byte aligned.
	 */
	error = bus_dma_tag_create(
	    lesc->sc_pdmat,		/* parent */
	    8, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_24BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    sc->sc_memsize,		/* maxsize */
	    1,				/* nsegments */
	    sc->sc_memsize,		/* maxsegsize */
	    0,				/* flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &lesc->sc_dmat);
	if (error != 0) {
		device_printf(dev, "cannot allocate buffer DMA tag\n");
		goto fail_pdtag;
	}

	error = bus_dmamem_alloc(lesc->sc_dmat, (void **)&sc->sc_mem,
	    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &lesc->sc_dmam);
	if (error != 0) {
		device_printf(dev, "cannot allocate DMA buffer memory\n");
		goto fail_dtag;
	}

	sc->sc_addr = 0;
	error = bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmam, sc->sc_mem,
	    sc->sc_memsize, le_isa_dma_callback, sc, 0);
	if (error != 0 || sc->sc_addr == 0) {
		device_printf(dev, "cannot load DMA buffer map\n");
		goto fail_dmem;
	}

	isa_dmacascade(rman_get_start(lesc->sc_dres));

	sc->sc_flags = 0;
	sc->sc_conf3 = 0;

	/*
	 * Extract the physical MAC address from the ROM.
	 */
	for (i = 0; i < sizeof(sc->sc_enaddr); i++)
		sc->sc_enaddr[i] = bus_read_1(lesc->sc_rres,
		    macstart + i * macstride);

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	sc->sc_rdcsr = le_isa_rdcsr;
	sc->sc_wrcsr = le_isa_wrcsr;
	sc->sc_hwreset = NULL;
	sc->sc_hwinit = NULL;
	sc->sc_hwintr = NULL;
	sc->sc_nocarrier = NULL;
	sc->sc_mediachange = NULL;
	sc->sc_mediastatus = NULL;
	sc->sc_supmedia = NULL;

	error = am7990_config(&lesc->sc_am7990, device_get_name(dev),
	    device_get_unit(dev));
	if (error != 0) {
		device_printf(dev, "cannot attach Am7990\n");
		goto fail_dmap;
	}

	error = bus_setup_intr(dev, lesc->sc_ires, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, am7990_intr, sc, &lesc->sc_ih);
	if (error != 0) {
		device_printf(dev, "cannot set up interrupt\n");
		goto fail_am7990;
	}

	return (0);

 fail_am7990:
	am7990_detach(&lesc->sc_am7990);
 fail_dmap:
	bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam);
 fail_dmem:
	bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam);
 fail_dtag:
	bus_dma_tag_destroy(lesc->sc_dmat);
 fail_pdtag:
	bus_dma_tag_destroy(lesc->sc_pdmat);
 fail_ires:
	bus_release_resource(dev, SYS_RES_IRQ,
	    rman_get_rid(lesc->sc_ires), lesc->sc_ires);
 fail_dres:
	bus_release_resource(dev, SYS_RES_DRQ,
	    rman_get_rid(lesc->sc_dres), lesc->sc_dres);
 fail_rres:
	bus_release_resource(dev, SYS_RES_IOPORT,
	    rman_get_rid(lesc->sc_rres), lesc->sc_rres);
 fail_mtx:
	LE_LOCK_DESTROY(sc);
	return (error);
}
Пример #11
0
static int
adv_pci_attach(device_t dev)
{
	struct		adv_softc *adv;
	u_int32_t	id;
	u_int32_t	command;
	int		error, rid, irqrid;
	void		*ih;
	struct resource	*iores, *irqres;

	/*
	 * Determine the chip version.
	 */
	id = pci_read_config(dev, PCIR_DEVVENDOR, /*bytes*/4);
	command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/1);

	/*
	 * These cards do not allow memory mapped accesses, so we must
	 * ensure that I/O accesses are available or we won't be able
	 * to talk to them.
	 */
	if ((command & (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN))
	 != (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN)) {
		command |= PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN;
		pci_write_config(dev, PCIR_COMMAND, command, /*bytes*/1);
	}

	/*
	 * Early chips can't handle non-zero latency timer settings.
	 */
	if (id == PCI_DEVICE_ID_ADVANSYS_1200A
	 || id == PCI_DEVICE_ID_ADVANSYS_1200B) {
		pci_write_config(dev, PCIR_LATTIMER, /*value*/0, /*bytes*/1);
	}

	rid = PCI_BASEADR0;
	iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
				   RF_ACTIVE);
	if (iores == NULL)
		return ENXIO;

	if (adv_find_signature(rman_get_bustag(iores),
			       rman_get_bushandle(iores)) == 0) {
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv = adv_alloc(dev, rman_get_bustag(iores), rman_get_bushandle(iores));
	if (adv == NULL) {
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	/* Allocate a dmatag for our transfer DMA maps */
	/* XXX Should be a child of the PCI bus dma tag */
	error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1,
				   /*boundary*/0,
				   /*lowaddr*/ADV_PCI_MAX_DMA_ADDR,
				   /*highaddr*/BUS_SPACE_MAXADDR,
				   /*filter*/NULL, /*filterarg*/NULL,
				   /*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
				   /*nsegments*/BUS_SPACE_UNRESTRICTED,
				   /*maxsegsz*/ADV_PCI_MAX_DMA_COUNT,
				   /*flags*/0,
				   &adv->parent_dmat);
 
	if (error != 0) {
		kprintf("%s: Could not allocate DMA tag - error %d\n",
		       adv_name(adv), error);
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv->init_level++;

	if (overrun_buf == NULL) {
		/* Need to allocate our overrun buffer */
		if (bus_dma_tag_create(adv->parent_dmat,
				       /*alignment*/8, /*boundary*/0,
				       ADV_PCI_MAX_DMA_ADDR, BUS_SPACE_MAXADDR,
				       /*filter*/NULL, /*filterarg*/NULL,
				       ADV_OVERRUN_BSIZE, /*nsegments*/1,
				       BUS_SPACE_MAXSIZE_32BIT, /*flags*/0,
				       &overrun_dmat) != 0) {
			bus_dma_tag_destroy(adv->parent_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
			return ENXIO;
       		}
		if (bus_dmamem_alloc(overrun_dmat,
				     (void *)&overrun_buf,
				     BUS_DMA_NOWAIT,
				     &overrun_dmamap) != 0) {
			bus_dma_tag_destroy(overrun_dmat);
			bus_dma_tag_destroy(adv->parent_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
			return ENXIO;
		}
		/* And permanently map it in */  
		bus_dmamap_load(overrun_dmat, overrun_dmamap,
				overrun_buf, ADV_OVERRUN_BSIZE,
				adv_map, &overrun_physbase,
				/*flags*/0);
	}

	adv->overrun_physbase = overrun_physbase;
			
	/*
	 * Stop the chip.
	 */
	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);

	adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION);
	adv->type = ADV_PCI;
	
	/*
	 * Setup active negation and signal filtering.
	 */
	{
		u_int8_t extra_cfg;

		if (adv->chip_version >= ADV_CHIP_VER_PCI_ULTRA_3150)
			adv->type |= ADV_ULTRA;
		if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
			extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_WR_EN_FILTER;
		else
			extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_SLEW_RATE;
		ADV_OUTB(adv, ADV_REG_IFC, extra_cfg);
	}

	if (adv_init(adv) != 0) {
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv->max_dma_count = ADV_PCI_MAX_DMA_COUNT;
	adv->max_dma_addr = ADV_PCI_MAX_DMA_ADDR;

#if defined(CC_DISABLE_PCI_PARITY_INT) && CC_DISABLE_PCI_PARITY_INT
	{
		u_int16_t config_msw;

		config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
		config_msw &= 0xFFC0;
		ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 
	}
#endif
 
	if (id == PCI_DEVICE_ID_ADVANSYS_1200A
	 || id == PCI_DEVICE_ID_ADVANSYS_1200B) {
		adv->bug_fix_control |= ADV_BUG_FIX_IF_NOT_DWB;
		adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN;
		adv->fix_asyn_xfer = ~0;
	}

	irqrid = 0;
	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &irqrid, 0, ~0, 1,
				    RF_SHAREABLE | RF_ACTIVE);
	if (irqres == NULL ||
	    bus_setup_intr(dev, irqres, 0, adv_intr, adv, &ih, NULL)) {
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv_attach(adv);
	return 0;
}
Пример #12
0
void
leattach_ledma(device_t parent, device_t self, void *aux)
{
	struct le_softc *lesc = device_private(self);
	struct lance_softc *sc = &lesc->sc_am7990.lsc;
	struct lsi64854_softc *lsi = device_private(parent);
	struct sbus_attach_args *sa = aux;
	bus_dma_tag_t dmatag = sa->sa_dmatag;
	bus_dma_segment_t seg;
	int rseg, error;

	sc->sc_dev = self;
	lesc->sc_bustag = sa->sa_bustag;

	/* Establish link to `ledma' device */
	lesc->sc_dma = lsi;
	lesc->sc_dma->sc_client = lesc;

	/* Map device registers */
	if (sbus_bus_map(sa->sa_bustag,
			 sa->sa_slot,
			 sa->sa_offset,
			 sa->sa_size,
			 0, &lesc->sc_reg) != 0) {
		aprint_error(": cannot map registers\n");
		return;
	}

	/* Allocate buffer memory */
	sc->sc_memsize = MEMSIZE;

	/* Get a DMA handle */
	if ((error = bus_dmamap_create(dmatag, MEMSIZE, 1, MEMSIZE,
					LEDMA_BOUNDARY, BUS_DMA_NOWAIT,
					&lesc->sc_dmamap)) != 0) {
		aprint_error(": DMA map create error %d\n", error);
		return;
	}

	/* Allocate DMA buffer */
	if ((error = bus_dmamem_alloc(dmatag, MEMSIZE, 0, LEDMA_BOUNDARY,
				 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": DMA buffer alloc error %d\n",error);
		return;
	}

	/* Map DMA buffer into kernel space */
	if ((error = bus_dmamem_map(dmatag, &seg, rseg, MEMSIZE,
			       (void **)&sc->sc_mem,
			       BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": DMA buffer map error %d\n", error);
		bus_dmamem_free(dmatag, &seg, rseg);
		return;
	}

	/* Load DMA buffer */
	if ((error = bus_dmamap_load(dmatag, lesc->sc_dmamap, sc->sc_mem,
			MEMSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": DMA buffer map load error %d\n", error);
		bus_dmamem_free(dmatag, &seg, rseg);
		bus_dmamem_unmap(dmatag, sc->sc_mem, MEMSIZE);
		return;
	}

	lesc->sc_laddr = lesc->sc_dmamap->dm_segs[0].ds_addr;
	sc->sc_addr = lesc->sc_laddr & 0xffffff;
	sc->sc_conf3 = LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON;


	/* Assume SBus is grandparent */
	lesc->sc_sd.sd_reset = (void *)lance_reset;
	sbus_establish(&lesc->sc_sd, parent);

	sc->sc_mediachange = lemediachange;
	sc->sc_mediastatus = lemediastatus;
	sc->sc_supmedia = lemedia;
	sc->sc_nsupmedia = NLEMEDIA;
	sc->sc_defaultmedia = IFM_ETHER|IFM_AUTO;

	prom_getether(sa->sa_node, sc->sc_enaddr);

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	sc->sc_rdcsr = lerdcsr;
	sc->sc_wrcsr = lewrcsr;
	sc->sc_hwinit = lehwinit;
	sc->sc_nocarrier = lenocarrier;
	sc->sc_hwreset = lehwreset;

	/* Establish interrupt handler */
	if (sa->sa_nintr != 0)
		(void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET,
					 am7990_intr, sc);

	am7990_config(&lesc->sc_am7990);

	/* now initialize DMA */
	lehwreset(sc);
}
Пример #13
0
/*------------------------------------------------------------------------*
 *	usb_pc_alloc_mem - allocate DMA'able memory
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
uint8_t
usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
    usb_size_t size, usb_size_t align)
{
	struct usb_dma_parent_tag *uptag;
	struct usb_dma_tag *utag;
	bus_dmamap_t map;
	void *ptr;
	int err;

	uptag = pc->tag_parent;

	if (align != 1) {
		/*
	         * The alignment must be greater or equal to the
	         * "size" else the object can be split between two
	         * memory pages and we get a problem!
	         */
		while (align < size) {
			align *= 2;
			if (align == 0) {
				goto error;
			}
		}
#if 1
		/*
		 * XXX BUS-DMA workaround - FIXME later:
		 *
		 * We assume that that the aligment at this point of
		 * the code is greater than or equal to the size and
		 * less than two times the size, so that if we double
		 * the size, the size will be greater than the
		 * alignment.
		 *
		 * The bus-dma system has a check for "alignment"
		 * being less than "size". If that check fails we end
		 * up using contigmalloc which is page based even for
		 * small allocations. Try to avoid that to save
		 * memory, hence we sometimes to a large number of
		 * small allocations!
		 */
		if (size <= (USB_PAGE_SIZE / 2)) {
			size *= 2;
		}
#endif
	}
	/* get the correct DMA tag */
	utag = usb_dma_tag_find(uptag, size, align);
	if (utag == NULL) {
		goto error;
	}
	/* allocate memory */
	if (bus_dmamem_alloc(
	    utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
		goto error;
	}
	/* setup page cache */
	pc->buffer = ptr;
	pc->page_start = pg;
	pc->page_offset_buf = 0;
	pc->page_offset_end = size;
	pc->map = map;
	pc->tag = utag->tag;
	pc->ismultiseg = (align == 1);

	mtx_lock(uptag->mtx);

	/* load memory into DMA */
	err = bus_dmamap_load(
	    utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
	    pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));

	if (err == EINPROGRESS) {
		cv_wait(uptag->cv, uptag->mtx);
		err = 0;
	}
	mtx_unlock(uptag->mtx);

	if (err || uptag->dma_error) {
		bus_dmamem_free(utag->tag, ptr, map);
		goto error;
	}
	memset(ptr, 0, size);

	usb_pc_cpu_flush(pc);

	return (0);

error:
	/* reset most of the page cache */
	pc->buffer = NULL;
	pc->page_start = NULL;
	pc->page_offset_buf = 0;
	pc->page_offset_end = 0;
	pc->map = NULL;
	pc->tag = NULL;
	return (1);
}
Пример #14
0
/* ARGSUSED */
void
ie_pcctwo_attach(device_t parent, device_t self, void *aux)
{
	struct pcctwo_attach_args *pa;
	struct ie_pcctwo_softc *ps;
	struct ie_softc *sc;
	bus_dma_segment_t seg;
	int rseg;

	pa = aux;
	ps = device_private(self);
	sc = &ps->ps_ie;
	sc->sc_dev = self;

	/* Map the MPU controller registers in PCCTWO space */
	ps->ps_bust = pa->pa_bust;
	bus_space_map(pa->pa_bust, pa->pa_offset, IE_MPUREG_SIZE,
	    0, &ps->ps_bush);

	/* Get contiguous DMA-able memory for the IE chip */
	if (bus_dmamem_alloc(pa->pa_dmat, ether_data_buff_size, PAGE_SIZE, 0,
		&seg, 1, &rseg,
		BUS_DMA_NOWAIT | BUS_DMA_ONBOARD_RAM | BUS_DMA_24BIT) != 0) {
		aprint_error_dev(self, "Failed to allocate ether buffer\n");
		return;
	}
	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, ether_data_buff_size,
	    (void **) & sc->sc_maddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
		aprint_error_dev(self, "Failed to map ether buffer\n");
		bus_dmamem_free(pa->pa_dmat, &seg, rseg);
		return;
	}
	sc->bt = pa->pa_bust;
	sc->bh = (bus_space_handle_t) sc->sc_maddr;	/* XXXSCW Better way? */
	sc->sc_iobase = (void *) seg.ds_addr;
	sc->sc_msize = ether_data_buff_size;
	memset(sc->sc_maddr, 0, ether_data_buff_size);

	sc->hwreset = ie_reset;
	sc->hwinit = ie_hwinit;
	sc->chan_attn = ie_atten;
	sc->intrhook = ie_intrhook;
	sc->memcopyin = ie_copyin;
	sc->memcopyout = ie_copyout;
	sc->ie_bus_barrier = NULL;
	sc->ie_bus_read16 = ie_read_16;
	sc->ie_bus_write16 = ie_write_16;
	sc->ie_bus_write24 = ie_write_24;
	sc->sc_mediachange = NULL;
	sc->sc_mediastatus = NULL;

	sc->scp = 0;
	sc->iscp = sc->scp + ((IE_SCP_SZ + 15) & ~15);
	sc->scb = sc->iscp + IE_ISCP_SZ;
	sc->buf_area = sc->scb + IE_SCB_SZ;
	sc->buf_area_sz = sc->sc_msize - (sc->buf_area - sc->scp);

	/*
	 * BUS_USE -> Interrupt Active High (edge-triggered),
	 *            Lock function enabled,
	 *            Internal bus throttle timer triggering,
	 *            82586 operating mode.
	 */
	ie_write_16(sc, IE_SCP_BUS_USE(sc->scp), IE_BUS_USE);
	ie_write_24(sc, IE_SCP_ISCP(sc->scp), sc->iscp);
	ie_write_16(sc, IE_ISCP_SCB(sc->iscp), sc->scb);
	ie_write_24(sc, IE_ISCP_BASE(sc->iscp), sc->scp);

	/* This has the side-effect of resetting the chip */
	i82586_proberam(sc);

	/* Attach the MI back-end */
	i82586_attach(sc, "onboard", mvme_ea, NULL, 0, 0);

	/* Register the event counter */
	evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_INTR,
	    pcctwointr_evcnt(pa->pa_ipl), "ether", device_xname(self));

	/* Finally, hook the hardware interrupt */
	pcctwointr_establish(PCCTWOV_LANC_IRQ, i82586_intr, pa->pa_ipl, sc,
	    &ps->ps_evcnt);
}
Пример #15
0
/*
 * Allocate a drm dma handle, allocate memory fit for DMA, and map it.
 *
 * XXX This is called drm_pci_alloc for hysterical raisins; it is not
 * specific to PCI.
 *
 * XXX For now, we use non-blocking allocations because this is called
 * by ioctls with the drm global mutex held.
 *
 * XXX Error information is lost because this returns NULL on failure,
 * not even an error embedded in a pointer.
 */
struct drm_dma_handle *
drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
{
	int nsegs;
	int error;

	/*
	 * Allocate a drm_dma_handle record.
	 */
	struct drm_dma_handle *const dmah = kmem_alloc(sizeof(*dmah),
	    KM_NOSLEEP);
	if (dmah == NULL) {
		error = -ENOMEM;
		goto out;
	}
	dmah->dmah_tag = dev->dmat;

	/*
	 * Allocate the requested amount of DMA-safe memory.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamem_alloc(dmah->dmah_tag, size, align, 0,
	    &dmah->dmah_seg, 1, &nsegs, BUS_DMA_NOWAIT);
	if (error)
		goto fail0;
	KASSERT(nsegs == 1);

	/*
	 * XXX Old drm passed BUS_DMA_NOWAIT below but BUS_DMA_WAITOK
	 * above.  WTF?
	 */

	/*
	 * Map the DMA-safe memory into kernel virtual address space.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamem_map(dmah->dmah_tag, &dmah->dmah_seg, 1, size,
	    &dmah->vaddr,
	    (BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE));
	if (error)
		goto fail1;
	dmah->size = size;

	/*
	 * Create a map for DMA transfers.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamap_create(dmah->dmah_tag, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &dmah->dmah_map);
	if (error)
		goto fail2;

	/*
	 * Load the kva buffer into the map for DMA transfers.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamap_load(dmah->dmah_tag, dmah->dmah_map, dmah->vaddr,
	    size, NULL, (BUS_DMA_NOWAIT | BUS_DMA_NOCACHE));
	if (error)
		goto fail3;

	/* Record the bus address for convenient reference.  */
	dmah->busaddr = dmah->dmah_map->dm_segs[0].ds_addr;

	/* Zero the DMA buffer.  XXX Yikes!  Is this necessary?  */
	memset(dmah->vaddr, 0, size);

	/* Success!  */
	return dmah;

fail3:	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
fail2:	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
fail1:	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
fail0:	dmah->dmah_tag = NULL;	/* XXX paranoia */
	kmem_free(dmah, sizeof(*dmah));
out:	DRM_DEBUG("drm_pci_alloc failed: %d\n", error);
	return NULL;
}
Пример #16
0
static int
bt_mca_attach (device_t dev)
{
	struct bt_softc *	bt = device_get_softc(dev);
	int			error = 0;

	/* Allocate resources */      
	if ((error = bt_mca_alloc_resources(dev, BT_MCA_ATTACH))) {
		device_printf(dev, "Unable to allocate resources in bt_mca_attach()\n");
		return (error);
	}

	isa_dmacascade(rman_get_start(bt->drq));

	/* Allocate a dmatag for our CCB DMA maps */
	if (bus_dma_tag_create( /* parent	*/ NULL,
				/* alignemnt	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR_24BIT,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ btvlbouncefilter,
				/* filterarg	*/ bt,
				/* maxsize	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* nsegments	*/ ~0,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ busdma_lock_mutex,
				/* lockarg	*/ &Giant,
				&bt->parent_dmat) != 0) {
		bt_mca_release_resources(dev);
		return (ENOMEM);
	}

	if (bt_init(dev)) {
		bt_mca_release_resources(dev);
		return (ENOMEM);
	}

	/* DMA tag for our sense buffers */
	if (bus_dma_tag_create(	/* parent	*/ bt->parent_dmat,
				/* alignment	*/ 1, 
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR,    
				/* highaddr	*/ BUS_SPACE_MAXADDR,   
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ bt->max_ccbs *
						   sizeof(struct scsi_sense_data),
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ busdma_lock_mutex,
				/* lockarg	*/ &Giant,
				&bt->sense_dmat) != 0) {
		bt_mca_release_resources(dev);
		return (ENOMEM);
	}

	bt->init_level++;     

	/* Allocation of sense buffers */
	if (bus_dmamem_alloc(bt->sense_dmat,
			     (void **)&bt->sense_buffers,       
			     BUS_DMA_NOWAIT, &bt->sense_dmamap) != 0) {
		bt_mca_release_resources(dev);
		return (ENOMEM);
	}

	bt->init_level++;     

	/* And permanently map them */
	bus_dmamap_load(bt->sense_dmat, bt->sense_dmamap,       
			bt->sense_buffers,
			bt->max_ccbs * sizeof(*bt->sense_buffers),
			btmapsensebuffers, bt, /*flags*/0);     

	bt->init_level++;     

	if ((error = bt_attach(dev))) {
		bt_mca_release_resources(dev);
		return (error);
	}

	return (0);
}
Пример #17
0
static int
atiixp_pci_attach(device_t dev)
{
	struct atiixp_info *sc;
	int i;

	sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
	sc->lock = snd_mtxcreate(device_get_nameunit(dev), "sound softc");
	sc->dev = dev;
	/*
	 * Default DMA segments per playback / recording channel
	 */
	sc->dma_segs = ATI_IXP_DMA_CHSEGS;

	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
	pci_enable_busmaster(dev);

	sc->regid = PCIR_BAR(0);
	sc->regtype = SYS_RES_MEMORY;
	sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid,
								RF_ACTIVE);

	if (!sc->reg) {
		device_printf(dev, "unable to allocate register space\n");
		goto bad;
	}

	sc->st = rman_get_bustag(sc->reg);
	sc->sh = rman_get_bushandle(sc->reg);

	sc->bufsz = pcm_getbuffersize(dev, 4096, ATI_IXP_DEFAULT_BUFSZ, 65536);

	sc->irqid = 0;
	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
						RF_ACTIVE | RF_SHAREABLE);
	if (!sc->irq || 
			snd_setup_intr(dev, sc->irq, INTR_MPSAFE,
						atiixp_intr, sc, &sc->ih)) {
		device_printf(dev, "unable to map interrupt\n");
		goto bad;
	}

	/*
	 * Let the user choose the best DMA segments.
	 */
	 if (resource_int_value(device_get_name(dev),
			device_get_unit(dev), "dma_segs",
			&i) == 0) {
		if (i < ATI_IXP_DMA_CHSEGS_MIN)
			i = ATI_IXP_DMA_CHSEGS_MIN;
		if (i > ATI_IXP_DMA_CHSEGS_MAX)
			i = ATI_IXP_DMA_CHSEGS_MAX;
		sc->dma_segs = i;
	}

	/*
	 * round the value to the nearest ^2
	 */
	i = 0;
	while (sc->dma_segs >> i)
		i++;
	sc->dma_segs = 1 << (i - 1);
	if (sc->dma_segs < ATI_IXP_DMA_CHSEGS_MIN)
		sc->dma_segs = ATI_IXP_DMA_CHSEGS_MIN;
	else if (sc->dma_segs > ATI_IXP_DMA_CHSEGS_MAX)
		sc->dma_segs = ATI_IXP_DMA_CHSEGS_MAX;

	/*
	 * DMA tag for scatter-gather buffers and link pointers
	 */
	if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
		/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
		/*highaddr*/BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff,
		/*flags*/0,
		&sc->parent_dmat) != 0) {
		device_printf(dev, "unable to create dma tag\n");
		goto bad;
	}

	if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
		/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
		/*highaddr*/BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/sc->dma_segs * ATI_IXP_NCHANS *
						sizeof(struct atiixp_dma_op),
		/*nsegments*/1, /*maxsegz*/0x3ffff,
		/*flags*/0,
		&sc->sgd_dmat) != 0) {
		device_printf(dev, "unable to create dma tag\n");
		goto bad;
	}

	if (bus_dmamem_alloc(sc->sgd_dmat, (void **)&sc->sgd_table, 
				BUS_DMA_NOWAIT, &sc->sgd_dmamap) == -1)
		goto bad;

	if (bus_dmamap_load(sc->sgd_dmat, sc->sgd_dmamap, sc->sgd_table, 
				sc->dma_segs * ATI_IXP_NCHANS *
						sizeof(struct atiixp_dma_op),
				atiixp_dma_cb, sc, 0))
		goto bad;


	atiixp_chip_pre_init(sc);

	sc->delayed_attach.ich_func = atiixp_chip_post_init;
	sc->delayed_attach.ich_arg = sc;
	sc->delayed_attach.ich_desc = "snd_atiixp";
	if (cold == 0 ||
			config_intrhook_establish(&sc->delayed_attach) != 0) {
		sc->delayed_attach.ich_func = NULL;
		atiixp_chip_post_init(sc);
	}

	return 0;

bad:
	atiixp_release_resource(sc);
	return ENXIO;
}
Пример #18
0
static int
ct_isa_attach(device_t dev)
{
	struct ct_isa_softc *pct = device_get_softc(dev);
	struct ct_softc *ct = &pct->sc_ct;
	struct ct_bus_access_handle *chp = &ct->sc_ch;
	struct scsi_low_softc *slp = &ct->sc_sclow;
	struct bshw_softc *bs = &pct->sc_bshw;
	struct bshw *hw;
	int irq_rid, drq_rid, chiprev;
	u_int8_t *vaddr;
	bus_addr_t addr;
	intrmask_t s;

	hw = ct_find_hw(dev);
	if (ct_space_map(dev, hw, &ct->port_res, &ct->mem_res) != 0) {
		device_printf(dev, "bus io mem map failed\n");
		return ENXIO;
	}

	bzero(chp, sizeof(*chp));
	chp->ch_iot = rman_get_bustag(ct->port_res);
	chp->ch_ioh = rman_get_bushandle(ct->port_res);
	if (ct->mem_res) {
		chp->ch_memt = rman_get_bustag(ct->mem_res);
		chp->ch_memh = rman_get_bushandle(ct->mem_res);
	}
	chp->ch_bus_weight = ct_isa_bus_access_weight;

	irq_rid = 0;
	ct->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq_rid,
					     RF_ACTIVE);
	drq_rid = 0;
	ct->drq_res = bus_alloc_resource_any(dev, SYS_RES_DRQ, &drq_rid,
					     RF_ACTIVE);
	if (ct->irq_res == NULL || ct->drq_res == NULL) {
		ct_space_unmap(dev, ct);
		return ENXIO;
	}

	if (ctprobesubr(chp, 0, BSHW_DEFAULT_HOSTID,
			BSHW_DEFAULT_CHIPCLK, &chiprev) == 0)
	{
		device_printf(dev, "hardware missing\n");
		ct_space_unmap(dev, ct);
		return ENXIO;
	}

	/* setup DMA map */
	if (bus_dma_tag_create(NULL, 1, 0,
			       BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
			       NULL, NULL, MAXBSIZE, 1,
			       BUS_SPACE_MAXSIZE_32BIT,
			       BUS_DMA_ALLOCNOW, NULL, NULL,
			       &ct->sc_dmat) != 0) {
		device_printf(dev, "can't set up ISA DMA map\n");
		ct_space_unmap(dev, ct);
		return ENXIO;
	}

	if (bus_dmamem_alloc(ct->sc_dmat, (void **)&vaddr, BUS_DMA_NOWAIT,
			     &ct->sc_dmamapt) != 0) {
		device_printf(dev, "can't set up ISA DMA map\n");
		ct_space_unmap(dev, ct);
		return ENXIO;
	}

	bus_dmamap_load(ct->sc_dmat, ct->sc_dmamapt, vaddr, MAXBSIZE,
			ct_dmamap, &addr, BUS_DMA_NOWAIT);

	/* setup machdep softc */
	bs->sc_hw = hw;
	bs->sc_io_control = 0;
	bs->sc_bounce_phys = (u_int8_t *)addr;
	bs->sc_bounce_addr = vaddr;
	bs->sc_bounce_size = MAXBSIZE;
	bs->sc_minphys = (1 << 24);
	bs->sc_dmasync_before = ct_isa_dmasync_before;
	bs->sc_dmasync_after = ct_isa_dmasync_after;
	bshw_read_settings(chp, bs);

	/* setup ct driver softc */
	ct->ct_hw = bs;
	ct->ct_dma_xfer_start = bshw_dma_xfer_start;
	ct->ct_pio_xfer_start = bshw_smit_xfer_start;
	ct->ct_dma_xfer_stop = bshw_dma_xfer_stop;
	ct->ct_pio_xfer_stop = bshw_smit_xfer_stop;
	ct->ct_bus_reset = bshw_bus_reset;
	ct->ct_synch_setup = bshw_synch_setup;

	ct->sc_xmode = CT_XMODE_DMA;
	if (chp->ch_memh != NULL)
		ct->sc_xmode |= CT_XMODE_PIO;

	ct->sc_chiprev = chiprev;
	switch (chiprev)
	{
	case CT_WD33C93:
		/* s = "WD33C93"; */
		ct->sc_chipclk = 8;
		break;
	case CT_WD33C93_A:
		if (DVCFG_MAJOR(device_get_flags(dev)) > 0)
		{
			/* s = "AM33C93_A"; */
			ct->sc_chipclk = 20;
			ct->sc_chiprev = CT_AM33C93_A;
		}
		else
		{
			/* s = "WD33C93_A"; */
			ct->sc_chipclk = 10;
		}
		break;

	case CT_AM33C93_A:
		/* s = "AM33C93_A"; */
		ct->sc_chipclk = 20;
		break;

	default:
	case CT_WD33C93_B:
		/* s = "WD33C93_B"; */
		ct->sc_chipclk = 20;
		break;
	}
#if	0
	printf("%s: chiprev %s chipclk %d MHz\n", 
		slp->sl_dev.dv_xname, s, ct->sc_chipclk);
#endif

	slp->sl_dev = dev;
	slp->sl_hostid = bs->sc_hostid;
	slp->sl_cfgflags = device_get_flags(dev);

	s = splcam();
	ctattachsubr(ct);
	splx(s);

	if (bus_setup_intr(dev, ct->irq_res, INTR_TYPE_CAM,
			   NULL, (driver_intr_t *)ctintr, ct, &ct->sc_ih)) {
		ct_space_unmap(dev, ct);
		return ENXIO;
	}

	return 0;
}
Пример #19
0
static int
at91_mci_attach(device_t dev)
{
	struct at91_mci_softc *sc = device_get_softc(dev);
	struct sysctl_ctx_list *sctx;
	struct sysctl_oid *soid;
	device_t child;
	int err, i;

	sctx = device_get_sysctl_ctx(dev);
	soid = device_get_sysctl_tree(dev);

	sc->dev = dev;
	sc->sc_cap = 0;
	if (at91_is_rm92())
		sc->sc_cap |= CAP_NEEDS_BYTESWAP;
	/*
	 * MCI1 Rev 2 controllers need some workarounds, flag if so.
	 */
	if (at91_mci_is_mci1rev2xx())
		sc->sc_cap |= CAP_MCI1_REV2XX;

	err = at91_mci_activate(dev);
	if (err)
		goto out;

	AT91_MCI_LOCK_INIT(sc);

	at91_mci_fini(dev);
	at91_mci_init(dev);

	/*
	 * Allocate DMA tags and maps and bounce buffers.
	 *
	 * The parms in the tag_create call cause the dmamem_alloc call to
	 * create each bounce buffer as a single contiguous buffer of BBSIZE
	 * bytes aligned to a 4096 byte boundary.
	 *
	 * Do not use DMA_COHERENT for these buffers because that maps the
	 * memory as non-cachable, which prevents cache line burst fills/writes,
	 * which is something we need since we're trying to overlap the
	 * byte-swapping with the DMA operations.
	 */
	err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag);
	if (err != 0)
		goto out;

	for (i = 0; i < BBCOUNT; ++i) {
		err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i],
		    BUS_DMA_NOWAIT, &sc->bbuf_map[i]);
		if (err != 0)
			goto out;
	}

	/*
	 * Activate the interrupt
	 */
	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
	    NULL, at91_mci_intr, sc, &sc->intrhand);
	if (err) {
		AT91_MCI_LOCK_DESTROY(sc);
		goto out;
	}

	/*
	 * Allow 4-wire to be initially set via #define.
	 * Allow a device hint to override that.
	 * Allow a sysctl to override that.
	 */
#if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0
	sc->has_4wire = 1;
#endif
	resource_int_value(device_get_name(dev), device_get_unit(dev),
			   "4wire", &sc->has_4wire);
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire",
	    CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus");
	if (sc->has_4wire)
		sc->sc_cap |= CAP_HAS_4WIRE;

	sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK;
	resource_int_value(device_get_name(dev), device_get_unit(dev),
			   "allow_overclock", &sc->allow_overclock);
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock",
	    CTLFLAG_RW, &sc->allow_overclock, 0,
	    "Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less.");

	/*
	 * Our real min freq is master_clock/512, but upper driver layers are
	 * going to set the min speed during card discovery, and the right speed
	 * for that is 400kHz, so advertise a safe value just under that.
	 *
	 * For max speed, while the rm9200 manual says the max is 50mhz, it also
	 * says it supports only the SD v1.0 spec, which means the real limit is
	 * 25mhz. On the other hand, historical use has been to slightly violate
	 * the standard by running the bus at 30MHz.  For more information on
	 * that, see the comments at the top of this file.
	 */
	sc->host.f_min = 375000;
	sc->host.f_max = at91_master_clock / 2;
	if (sc->host.f_max > 25000000)
		sc->host.f_max = 25000000;
	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
	sc->host.caps = 0;
	if (sc->sc_cap & CAP_HAS_4WIRE)
		sc->host.caps |= MMC_CAP_4_BIT_DATA;

	child = device_add_child(dev, "mmc", 0);
	device_set_ivars(dev, &sc->host);
	err = bus_generic_attach(dev);
out:
	if (err)
		at91_mci_deactivate(dev);
	return (err);
}
Пример #20
0
/*
 * Attach all the sub-devices we can find
 */
static int
bt_isa_attach(device_t dev)
{
	struct	bt_softc *bt = device_get_softc(dev);
	bus_dma_filter_t *filter;
	void		 *filter_arg;
	bus_addr_t	 lowaddr;
	int		 error, drq;

	/* Initialise softc */
	error = bt_isa_alloc_resources(dev, 0, ~0);
	if (error) {
		device_printf(dev, "can't allocate resources in bt_isa_attach\n");
		return error;
	}

	/* Program the DMA channel for external control */
	if ((drq = isa_get_drq(dev)) != -1)
		isa_dmacascade(drq);

	/* Allocate our parent dmatag */
	filter = NULL;
	filter_arg = NULL;
	lowaddr = BUS_SPACE_MAXADDR_24BIT;
	if (bt->model[0] == '4') {
		/*
		 * This is a VL adapter.  Typically, VL devices have access
		 * to the full 32bit address space.  On BT-445S adapters
		 * prior to revision E, there is a hardware bug that causes
		 * corruption of transfers to/from addresses in the range of
		 * the BIOS modulo 16MB.  The only properly functioning
		 * BT-445S Host Adapters have firmware version 3.37.
		 * If we encounter one of these adapters and the BIOS is
		 * installed, install a filter function for our bus_dma_map
		 * that will catch these accesses and bounce them to a safe
		 * region of memory.
		 */
		if (bt->bios_addr != 0
		 && strcmp(bt->model, "445S") == 0
		 && strcmp(bt->firmware_ver, "3.37") < 0) {
			filter = btvlbouncefilter;
			filter_arg = bt;
		} else {
			lowaddr = BUS_SPACE_MAXADDR_32BIT;
		}
	}
			
	/* XXX Should be a child of the ISA or VL bus dma tag */
	if (bus_dma_tag_create(	/* parent	*/ bus_get_dma_tag(dev),
				/* alignemnt	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ lowaddr,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ filter,
				/* filterarg	*/ filter_arg,
				/* maxsize	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* nsegments	*/ ~0,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&bt->parent_dmat) != 0) {
		bt_isa_release_resources(dev);
                return (ENOMEM);
        }                              

        error = bt_init(dev);
        if (error) {
		bt_isa_release_resources(dev);
                return (ENOMEM);
        }

	if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
		/* DMA tag for our sense buffers */
		if (bus_dma_tag_create(
				/* parent	*/ bt->parent_dmat,
				/* alignment	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ bt->max_ccbs *
						   sizeof(struct scsi_sense_data),
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&bt->sense_dmat) != 0) {
			bt_isa_release_resources(dev);
			return (ENOMEM);
		}

		bt->init_level++;

		/* Allocation of sense buffers */
		if (bus_dmamem_alloc(bt->sense_dmat,
				     (void **)&bt->sense_buffers,
				     BUS_DMA_NOWAIT, &bt->sense_dmamap) != 0) {
			bt_isa_release_resources(dev);
			return (ENOMEM);
		}

		bt->init_level++;

		/* And permanently map them */
		bus_dmamap_load(bt->sense_dmat, bt->sense_dmamap,
       				bt->sense_buffers,
				bt->max_ccbs * sizeof(*bt->sense_buffers),
				btmapsensebuffers, bt, /*flags*/0);

		bt->init_level++;
	}

	error = bt_attach(dev);
	if (error) {
		bt_isa_release_resources(dev);
		return (error);
	}

	return (0);
}
Пример #21
0
static int
mpt_dma_mem_alloc(struct mpt_softc *mpt)
{
	size_t len;
	struct mpt_map_info mi;

	/* Check if we alreay have allocated the reply memory */
	if (mpt->reply_phys != 0) {
		return 0;
	}

	len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt);
#ifdef	RELENG_4
	mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK);
	if (mpt->request_pool == NULL) {
		mpt_prt(mpt, "cannot allocate request pool\n");
		return (1);
	}
	memset(mpt->request_pool, 0, len);
#else
	mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
	if (mpt->request_pool == NULL) {
		mpt_prt(mpt, "cannot allocate request pool\n");
		return (1);
	}
#endif

	/*
	 * Create a parent dma tag for this device.
	 *
	 * Align at byte boundaries,
	 * Limit to 32-bit addressing for request/reply queues.
	 */
	if (mpt_dma_tag_create(mpt, /*parent*/bus_get_dma_tag(mpt->dev),
	    /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
	    /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL,
	    /*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
	    /*nsegments*/BUS_SPACE_UNRESTRICTED,
	    /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0,
	    &mpt->parent_dmat) != 0) {
		mpt_prt(mpt, "cannot create parent dma tag\n");
		return (1);
	}

	/* Create a child tag for reply buffers */
	if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
	    NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0,
	    &mpt->reply_dmat) != 0) {
		mpt_prt(mpt, "cannot create a dma tag for replies\n");
		return (1);
	}

	/* Allocate some DMA accessible memory for replies */
	if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply,
	    BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) {
		mpt_prt(mpt, "cannot allocate %lu bytes of reply memory\n",
		    (u_long) (2 * PAGE_SIZE));
		return (1);
	}

	mi.mpt = mpt;
	mi.error = 0;

	/* Load and lock it into "bus space" */
	bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply,
	    2 * PAGE_SIZE, mpt_map_rquest, &mi, 0);

	if (mi.error) {
		mpt_prt(mpt, "error %d loading dma map for DMA reply queue\n",
		    mi.error);
		return (1);
	}
	mpt->reply_phys = mi.phys;

	return (0);
}
Пример #22
0
void
beattach(device_t parent, device_t self, void *aux)
{
	struct sbus_attach_args *sa = aux;
	struct qec_softc *qec = device_private(parent);
	struct be_softc *sc = device_private(self);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mii_data *mii = &sc->sc_mii;
	struct mii_softc *child;
	int node = sa->sa_node;
	bus_dma_tag_t dmatag = sa->sa_dmatag;
	bus_dma_segment_t seg;
	bus_size_t size;
	int instance;
	int rseg, error;
	uint32_t v;

	sc->sc_dev = self;

	if (sa->sa_nreg < 3) {
		printf(": only %d register sets\n", sa->sa_nreg);
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[0].oa_space, sa->sa_reg[0].oa_base),
	    (bus_size_t)sa->sa_reg[0].oa_size,
	    0, &sc->sc_cr) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[1].oa_space, sa->sa_reg[1].oa_base),
	    (bus_size_t)sa->sa_reg[1].oa_size,
	    0, &sc->sc_br) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[2].oa_space, sa->sa_reg[2].oa_base),
	    (bus_size_t)sa->sa_reg[2].oa_size,
	    0, &sc->sc_tr) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	sc->sc_bustag = sa->sa_bustag;
	sc->sc_qec = qec;
	sc->sc_qr = qec->sc_regs;

	sc->sc_rev = prom_getpropint(node, "board-version", -1);
	printf(": rev %x,", sc->sc_rev);

	callout_init(&sc->sc_tick_ch, 0);

	sc->sc_channel = prom_getpropint(node, "channel#", -1);
	if (sc->sc_channel == -1)
		sc->sc_channel = 0;

	sc->sc_burst = prom_getpropint(node, "burst-sizes", -1);
	if (sc->sc_burst == -1)
		sc->sc_burst = qec->sc_burst;

	/* Clamp at parent's burst sizes */
	sc->sc_burst &= qec->sc_burst;

	/* Establish interrupt handler */
	if (sa->sa_nintr)
		(void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET,
		    beintr, sc);

	prom_getether(node, sc->sc_enaddr);
	printf(" address %s\n", ether_sprintf(sc->sc_enaddr));

	/*
	 * Allocate descriptor ring and buffers.
	 */

	/* for now, allocate as many bufs as there are ring descriptors */
	sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
	sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;

	size =
	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
	    sc->sc_rb.rb_ntbuf * BE_PKT_BUF_SZ +
	    sc->sc_rb.rb_nrbuf * BE_PKT_BUF_SZ;

	/* Get a DMA handle */
	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
		aprint_error_dev(self, "DMA map create error %d\n", error);
		return;
	}

	/* Allocate DMA buffer */
	if ((error = bus_dmamem_alloc(sa->sa_dmatag, size, 0, 0,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(self, "DMA buffer alloc error %d\n", error);
		return;
	}

	/* Map DMA memory in CPU addressable space */
	if ((error = bus_dmamem_map(sa->sa_dmatag, &seg, rseg, size,
	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(self, "DMA buffer map error %d\n", error);
		bus_dmamem_free(sa->sa_dmatag, &seg, rseg);
		return;
	}

	/* Load the buffer */
	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
	    sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(self, "DMA buffer map load error %d\n", error);
		bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
		bus_dmamem_free(dmatag, &seg, rseg);
		return;
	}
	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;

	/*
	 * Initialize our media structures and MII info.
	 */
	mii->mii_ifp = ifp;
	mii->mii_readreg = be_mii_readreg;
	mii->mii_writereg = be_mii_writereg;
	mii->mii_statchg = be_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, be_ifmedia_upd, be_ifmedia_sts);

	/*
	 * Initialize transceiver and determine which PHY connection to use.
	 */
	be_mii_sync(sc);
	v = bus_space_read_4(sc->sc_bustag, sc->sc_tr, BE_TRI_MGMTPAL);

	instance = 0;

	if ((v & MGMT_PAL_EXT_MDIO) != 0) {

		mii_attach(self, mii, 0xffffffff, BE_PHY_EXTERNAL,
		    MII_OFFSET_ANY, 0);

		child = LIST_FIRST(&mii->mii_phys);
		if (child == NULL) {
			/* No PHY attached */
			ifmedia_add(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, instance),
			    0, NULL);
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, instance));
		} else {
			/*
			 * Note: we support just one PHY on the external
			 * MII connector.
			 */
#ifdef DIAGNOSTIC
			if (LIST_NEXT(child, mii_list) != NULL) {
				aprint_error_dev(self,
				    "spurious MII device %s attached\n",
				    device_xname(child->mii_dev));
			}
#endif
			if (child->mii_phy != BE_PHY_EXTERNAL ||
			    child->mii_inst > 0) {
				aprint_error_dev(self,
				    "cannot accommodate MII device %s"
				    " at phy %d, instance %d\n",
				       device_xname(child->mii_dev),
				       child->mii_phy, child->mii_inst);
			} else {
				sc->sc_phys[instance] = child->mii_phy;
			}

			/*
			 * XXX - we can really do the following ONLY if the
			 * phy indeed has the auto negotiation capability!!
			 */
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance));

			/* Mark our current media setting */
			be_pal_gate(sc, BE_PHY_EXTERNAL);
			instance++;
		}

	}

	if ((v & MGMT_PAL_INT_MDIO) != 0) {
		/*
		 * The be internal phy looks vaguely like MII hardware,
		 * but not enough to be able to use the MII device
		 * layer. Hence, we have to take care of media selection
		 * ourselves.
		 */

		sc->sc_mii_inst = instance;
		sc->sc_phys[instance] = BE_PHY_INTERNAL;

		/* Use `ifm_data' to store BMCR bits */
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, instance),
		    0, NULL);
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, instance),
		    BMCR_S100, NULL);
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance),
		    0, NULL);

		printf("on-board transceiver at %s: 10baseT, 100baseTX, auto\n",
		    device_xname(self));

		be_mii_reset(sc, BE_PHY_INTERNAL);
		/* Only set default medium here if there's no external PHY */
		if (instance == 0) {
			be_pal_gate(sc, BE_PHY_INTERNAL);
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance));
		} else
			be_mii_writereg(self,
			    BE_PHY_INTERNAL, MII_BMCR, BMCR_ISO);
	}

	memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_start = bestart;
	ifp->if_ioctl = beioctl;
	ifp->if_watchdog = bewatchdog;
	ifp->if_init = beinit;
	ifp->if_stop = bestop;
	ifp->if_flags =
	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	/* claim 802.1q capability */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/* Attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);
}
Пример #23
0
static void
sq_attach(struct device *parent, struct device *self, void *aux)
{
	int i, err;
	char* macaddr;
	struct sq_softc *sc = (void *)self;
	struct hpc_attach_args *haa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	sc->sc_hpct = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
				       haa->ha_dmaoff,
				       HPC_ENET_REGS_SIZE,
				       &sc->sc_hpch)) != 0) {
		printf(": unable to map HPC DMA registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_regt = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
				       haa->ha_devoff,
				       HPC_ENET_DEVREGS_SIZE,
				       &sc->sc_regh)) != 0) {
		printf(": unable to map Seeq registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_dmat = haa->ha_dmat;

	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to allocate control data, error = %d\n", err);
		goto fail_0;
	}

	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
				  sizeof(struct sq_control),
				  (caddr_t *)&sc->sc_control,
				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		printf(": unable to map control data, error = %d\n", err);
		goto fail_1;
	}

	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
				     1, sizeof(struct sq_control), PAGE_SIZE,
				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
		printf(": unable to create DMA map for control data, error "
			"= %d\n", err);
		goto fail_2;
	}

	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
				   sizeof(struct sq_control),
				   NULL, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to load DMA map for control data, error "
			"= %d\n", err);
		goto fail_3;
	}

	memset(sc->sc_control, 0, sizeof(struct sq_control));

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NTXDESC; i++) {
	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
					 0, BUS_DMA_NOWAIT,
					 &sc->sc_txmap[i])) != 0) {
		    printf(": unable to create tx DMA map %d, error = %d\n",
			   i, err);
		    goto fail_4;
	    }
	}

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NRXDESC; i++) {
	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
					 0, BUS_DMA_NOWAIT,
					 &sc->sc_rxmap[i])) != 0) {
		    printf(": unable to create rx DMA map %d, error = %d\n",
			   i, err);
		    goto fail_5;
	    }
	}

	/* Pre-allocate the receive buffers.  */
	for (i = 0; i < SQ_NRXDESC; i++) {
		if ((err = sq_add_rxbuf(sc, i)) != 0) {
			printf(": unable to allocate or map rx buffer %d\n,"
			       " error = %d\n", i, err);
			goto fail_6;
		}
	}

	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
		printf(": unable to get MAC address!\n");
		goto fail_6;
	}

	evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
					      self->dv_xname, "intr");

	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
		printf(": unable to establish interrupt!\n");
		goto fail_6;
	}

	/* Reset the chip to a known state. */
	sq_reset(sc);

	/*
	 * Determine if we're an 8003 or 80c03 by setting the first
	 * MAC address register to non-zero, and then reading it back.
	 * If it's zero, we have an 80c03, because we will have read
	 * the TxCollLSB register.
	 */
	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
		sc->sc_type = SQ_TYPE_80C03;
	else
		sc->sc_type = SQ_TYPE_8003;
	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);

	printf(": SGI Seeq %s\n",
	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");

	enaddr_aton(macaddr, sc->sc_enaddr);

	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
					   ether_sprintf(sc->sc_enaddr));

	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = sq_init;
	ifp->if_stop = sq_stop;
	ifp->if_start = sq_start;
	ifp->if_ioctl = sq_ioctl;
	ifp->if_watchdog = sq_watchdog;
	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);

	memset(&sq_trace, 0, sizeof(sq_trace));
	/* Done! */
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
fail_6:
	for (i = 0; i < SQ_NRXDESC; i++) {
		if (sc->sc_rxmbuf[i] != NULL) {
			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
			m_freem(sc->sc_rxmbuf[i]);
		}
	}
fail_5:
	for (i = 0; i < SQ_NRXDESC; i++) {
	    if (sc->sc_rxmap[i] != NULL)
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
	}
fail_4:
	for (i = 0; i < SQ_NTXDESC; i++) {
	    if (sc->sc_txmap[i] !=  NULL)
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
				      sizeof(struct sq_control));
fail_1:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
fail_0:
	return;
}
Пример #24
0
static int
msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
    uint32_t desc_size, uint32_t align)
{
	int nsegments;
	int err;
	int i;

	nsegments = chan->descs_num;

	dprintf("%s: nseg %d\n", __func__, nsegments);

	err = bus_dma_tag_create(
	    bus_get_dma_tag(sc->dev),
	    align, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    desc_size, 1,		/* maxsize, nsegments*/
	    desc_size, 0,		/* maxsegsize, flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &chan->dma_tag);
	if (err) {
		device_printf(sc->dev,
		    "%s: Can't create bus_dma tag.\n", __func__);
		return (-1);
	}

	/* Descriptors. */
	chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
	    M_DEVBUF, (M_WAITOK | M_ZERO));
	if (chan->descs == NULL) {
		device_printf(sc->dev,
		    "%s: Can't allocate memory.\n", __func__);
		return (-1);
	}
	chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
	    M_DEVBUF, (M_WAITOK | M_ZERO));
	chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
	    M_DEVBUF, (M_WAITOK | M_ZERO));

	/* Allocate bus_dma memory for each descriptor. */
	for (i = 0; i < nsegments; i++) {
		err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
		    BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
		if (err) {
			device_printf(sc->dev,
			    "%s: Can't allocate memory for descriptors.\n",
			    __func__);
			return (-1);
		}

		chan->map_err = 0;
		chan->map_descr = i;
		err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
		    desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
		if (err) {
			device_printf(sc->dev,
			    "%s: Can't load DMA map.\n", __func__);
			return (-1);
		}

		if (chan->map_err != 0) {
			device_printf(sc->dev,
			    "%s: Can't load DMA map.\n", __func__);
			return (-1);
		}
	}

	return (0);
}
Пример #25
0
/*
 * Function name:	tw_osli_alloc_mem
 * Description:		Allocates memory needed both by CL and OSL.
 *
 * Input:		sc	-- OSL internal controller context
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
static TW_INT32
tw_osli_alloc_mem(struct twa_softc *sc)
{
	struct tw_osli_req_context	*req;
	TW_UINT32			max_sg_elements;
	TW_UINT32			non_dma_mem_size;
	TW_UINT32			dma_mem_size;
	TW_INT32			error;
	TW_INT32			i;

	tw_osli_dbg_dprintf(3, sc, "entered");

	sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
	sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;

	max_sg_elements = (sizeof(bus_addr_t) == 8) ?
		TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;

	if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
			sc->device_id, TW_OSLI_MAX_NUM_REQUESTS,  TW_OSLI_MAX_NUM_AENS,
			&(sc->alignment), &(sc->sg_size_factor),
			&non_dma_mem_size, &dma_mem_size
			))) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2008,
			"Can't get Common Layer's memory requirements",
			error);
		return(error);
	}

	if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
				M_WAITOK)) == NULL) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2009,
			"Can't allocate non-dma memory",
			ENOMEM);
		return(ENOMEM);
	}

	/* Create the parent dma tag. */
	if (bus_dma_tag_create(NULL,			/* parent */
				sc->alignment,		/* alignment */
				TW_OSLI_DMA_BOUNDARY,	/* boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR, 	/* highaddr */
				NULL, NULL, 		/* filter, filterarg */
				TW_CL_MAX_IO_SIZE,	/* maxsize */
				max_sg_elements,	/* nsegments */
				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
				0,			/* flags */
				NULL,			/* lockfunc */
				NULL,			/* lockfuncarg */
				&sc->parent_tag		/* tag */)) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x200A,
			"Can't allocate parent DMA tag",
			ENOMEM);
		return(ENOMEM);
	}

	/* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
				sc->alignment,		/* alignment */
				0,			/* boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR, 	/* highaddr */
				NULL, NULL, 		/* filter, filterarg */
				dma_mem_size,		/* maxsize */
				1,			/* nsegments */
				BUS_SPACE_MAXSIZE,	/* maxsegsize */
				0,			/* flags */
				NULL,			/* lockfunc */
				NULL,			/* lockfuncarg */
				&sc->cmd_tag		/* tag */)) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x200B,
			"Can't allocate DMA tag for Common Layer's "
			"DMA'able memory",
			ENOMEM);
		return(ENOMEM);
	}

	if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
		BUS_DMA_NOWAIT, &sc->cmd_map)) {
		/* Try a second time. */
		if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
			BUS_DMA_NOWAIT, &sc->cmd_map)) {
			tw_osli_printf(sc, "error = %d",
				TW_CL_SEVERITY_ERROR_STRING,
				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
				0x200C,
				"Can't allocate DMA'able memory for the"
				"Common Layer",
				ENOMEM);
			return(ENOMEM);
		}
	}

	bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
		dma_mem_size, twa_map_load_callback,
		&sc->dma_mem_phys, 0);

	/*
	 * Create a dma tag for data buffers; size will be the maximum
	 * possible I/O size (128kB).
	 */
	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
				sc->alignment,		/* alignment */
				0,			/* boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR, 	/* highaddr */
				NULL, NULL, 		/* filter, filterarg */
				TW_CL_MAX_IO_SIZE,	/* maxsize */
				max_sg_elements,	/* nsegments */
				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
				BUS_DMA_ALLOCNOW,	/* flags */
				twa_busdma_lock,	/* lockfunc */
				sc->io_lock,		/* lockfuncarg */
				&sc->dma_tag		/* tag */)) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x200F,
			"Can't allocate DMA tag for data buffers",
			ENOMEM);
		return(ENOMEM);
	}

	/*
	 * Create a dma tag for ioctl data buffers; size will be the maximum
	 * possible I/O size (128kB).
	 */
	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
				sc->alignment,		/* alignment */
				0,			/* boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR, 	/* highaddr */
				NULL, NULL, 		/* filter, filterarg */
				TW_CL_MAX_IO_SIZE,	/* maxsize */
				max_sg_elements,	/* nsegments */
				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
				BUS_DMA_ALLOCNOW,	/* flags */
				twa_busdma_lock,	/* lockfunc */
				sc->io_lock,		/* lockfuncarg */
				&sc->ioctl_tag		/* tag */)) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2010,
			"Can't allocate DMA tag for ioctl data buffers",
			ENOMEM);
		return(ENOMEM);
	}

	/* Create just one map for all ioctl request data buffers. */
	if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2011,
			"Can't create ioctl map",
			ENOMEM);
		return(ENOMEM);
	}


	/* Initialize request queues. */
	tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
	tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);

	if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
			malloc((sizeof(struct tw_osli_req_context) *
				TW_OSLI_MAX_NUM_REQUESTS),
				TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2012,
			"Failed to allocate request packets",
			ENOMEM);
		return(ENOMEM);
	}
	bzero(sc->req_ctx_buf,
		sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);

	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
		req = &(sc->req_ctx_buf[i]);
		req->ctlr = sc;
		if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
			tw_osli_printf(sc, "request # = %d, error = %d",
				TW_CL_SEVERITY_ERROR_STRING,
				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
				0x2013,
				"Can't create dma map",
				i, ENOMEM);
			return(ENOMEM);
		}

		/* Initialize the ioctl wakeup/ timeout mutex */
		req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
		mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);

		/* Insert request into the free queue. */
		tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
	}

	return(0);
}
Пример #26
0
void
oosiop_attach(struct oosiop_softc *sc)
{
	struct scsibus_attach_args saa;
	bus_size_t scrsize;
	bus_dma_segment_t seg;
	struct oosiop_cb *cb;
	int err, i, nseg;

	/*
	 * Allocate DMA-safe memory for the script and map it.
	 */
	scrsize = round_page(sizeof(oosiop_script));
	err = bus_dmamem_alloc(sc->sc_dmat, scrsize, PAGE_SIZE, 0, &seg, 1,
	    &nseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
	if (err) {
		printf(": failed to allocate script memory, err=%d\n", err);
		return;
	}
	err = bus_dmamem_map(sc->sc_dmat, &seg, nseg, scrsize,
	    (caddr_t *)&sc->sc_scr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
	if (err) {
		printf(": failed to map script memory, err=%d\n", err);
		return;
	}
	err = bus_dmamap_create(sc->sc_dmat, scrsize, 1, scrsize, 0,
	    BUS_DMA_NOWAIT, &sc->sc_scrdma);
	if (err) {
		printf(": failed to create script map, err=%d\n", err);
		return;
	}
	err = bus_dmamap_load_raw(sc->sc_dmat, sc->sc_scrdma,
	    &seg, nseg, scrsize, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
	if (err) {
		printf(": failed to load script map, err=%d\n", err);
		return;
	}
	sc->sc_scrbase = sc->sc_scrdma->dm_segs[0].ds_addr;

	/* Initialize command block array */
	TAILQ_INIT(&sc->sc_free_cb);
	TAILQ_INIT(&sc->sc_cbq);
	if (oosiop_alloc_cb(sc, OOSIOP_NCB) != 0)
		return;

	/* Use first cb to reselection msgin buffer */
	cb = TAILQ_FIRST(&sc->sc_free_cb);
	sc->sc_reselbuf = cb->xferdma->dm_segs[0].ds_addr +
	    offsetof(struct oosiop_xfer, msgin[0]);

	for (i = 0; i < OOSIOP_NTGT; i++) {
		sc->sc_tgt[i].nexus = NULL;
		sc->sc_tgt[i].flags = 0;
	}

	/* Setup asynchronous clock divisor parameters */
	if (sc->sc_freq <= 25000000) {
		sc->sc_ccf = 10;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_1;
	} else if (sc->sc_freq <= 37500000) {
		sc->sc_ccf = 15;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_1_5;
	} else if (sc->sc_freq <= 50000000) {
		sc->sc_ccf = 20;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_2;
	} else {
		sc->sc_ccf = 30;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_3;
	}

	if (sc->sc_chip == OOSIOP_700)
		sc->sc_minperiod = oosiop_period(sc, 4, sc->sc_ccf);
	else
		sc->sc_minperiod = oosiop_period(sc, 4, 10);

	if (sc->sc_minperiod < 25)
		sc->sc_minperiod = 25;	/* limit to 10MB/s */

	mtx_init(&sc->sc_cb_mtx, IPL_BIO);
	scsi_iopool_init(&sc->sc_iopool, sc, oosiop_cb_alloc, oosiop_cb_free);

	printf(": NCR53C700%s rev %d, %dMHz\n",
	    sc->sc_chip == OOSIOP_700_66 ? "-66" : "",
	    oosiop_read_1(sc, OOSIOP_CTEST7) >> 4,
	    sc->sc_freq / 1000000);
	/*
	 * Reset all
	 */
	oosiop_reset(sc, TRUE);
	oosiop_reset_bus(sc);

	/*
	 * Start SCRIPTS processor
	 */
	oosiop_load_script(sc);
	sc->sc_active = 0;
	oosiop_write_4(sc, OOSIOP_DSP, sc->sc_scrbase + Ent_wait_reselect);

	/*
	 * Fill in the sc_link.
	 */
	sc->sc_link.adapter = &oosiop_adapter;
	sc->sc_link.adapter_softc = sc;
	sc->sc_link.openings = 1;	/* XXX */
	sc->sc_link.adapter_buswidth = OOSIOP_NTGT;
	sc->sc_link.adapter_target = sc->sc_id;
	sc->sc_link.pool = &sc->sc_iopool;
	sc->sc_link.quirks = ADEV_NODOORLOCK;

	bzero(&saa, sizeof(saa));
	saa.saa_sc_link = &sc->sc_link;

	/*
	 * Now try to attach all the sub devices.
	 */
	config_found(&sc->sc_dev, &saa, scsiprint);
}
Пример #27
0
/*
 * Start the board, ready for normal operation
 */
int
aha_init(struct aha_softc* aha)
{
	/* Announce the Adapter */
	device_printf(aha->dev, "AHA-%s FW Rev. %c.%c (ID=%x) ",
	    aha->model, aha->fw_major, aha->fw_minor, aha->boardid);

	if (aha->diff_bus != 0)
		printf("Diff ");

	printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", aha->scsi_id,
	    aha->max_ccbs);

	/*
	 * Create our DMA tags.  These tags define the kinds of device
	 * accessible memory allocations and memory mappings we will
	 * need to perform during normal operation.
	 *
	 * Unless we need to further restrict the allocation, we rely
	 * on the restrictions of the parent dmat, hence the common
	 * use of MAXADDR and MAXSIZE.
	 */

	/* DMA tag for mapping buffers into device visible space. */
	if (bus_dma_tag_create( /* parent	*/ aha->parent_dmat,
				/* alignment	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ DFLTPHYS,
				/* nsegments	*/ AHA_NSEG,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_24BIT,
				/* flags	*/ BUS_DMA_ALLOCNOW,
				/* lockfunc	*/ busdma_lock_mutex,
				/* lockarg	*/ &aha->lock,
				&aha->buffer_dmat) != 0) {
		goto error_exit;
	}

	aha->init_level++;
	/* DMA tag for our mailboxes */
	if (bus_dma_tag_create(	/* parent	*/ aha->parent_dmat,
				/* alignment	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ aha->num_boxes *
						   (sizeof(aha_mbox_in_t) +
						    sizeof(aha_mbox_out_t)),
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_24BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&aha->mailbox_dmat) != 0) {
		goto error_exit;
        }

	aha->init_level++;

	/* Allocation for our mailboxes */
	if (bus_dmamem_alloc(aha->mailbox_dmat, (void **)&aha->out_boxes,
	    BUS_DMA_NOWAIT, &aha->mailbox_dmamap) != 0)
		goto error_exit;

	aha->init_level++;

	/* And permanently map them */
	bus_dmamap_load(aha->mailbox_dmat, aha->mailbox_dmamap,
	    aha->out_boxes, aha->num_boxes * (sizeof(aha_mbox_in_t) +
	    sizeof(aha_mbox_out_t)), ahamapmboxes, aha, /*flags*/0);

	aha->init_level++;

	aha->in_boxes = (aha_mbox_in_t *)&aha->out_boxes[aha->num_boxes];

	ahainitmboxes(aha);

	/* DMA tag for our ccb structures */
	if (bus_dma_tag_create(	/* parent	*/ aha->parent_dmat,
				/* alignment	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ aha->max_ccbs *
						   sizeof(struct aha_ccb),
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_24BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&aha->ccb_dmat) != 0) {
		goto error_exit;
        }

	aha->init_level++;

	/* Allocation for our ccbs */
	if (bus_dmamem_alloc(aha->ccb_dmat, (void **)&aha->aha_ccb_array,
	    BUS_DMA_NOWAIT, &aha->ccb_dmamap) != 0)
		goto error_exit;

	aha->init_level++;

	/* And permanently map them */
	bus_dmamap_load(aha->ccb_dmat, aha->ccb_dmamap, aha->aha_ccb_array,
	    aha->max_ccbs * sizeof(struct aha_ccb), ahamapccbs, aha, /*flags*/0);

	aha->init_level++;

	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
	if (bus_dma_tag_create(	/* parent	*/ aha->parent_dmat,
				/* alignment	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ BUS_SPACE_MAXADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ PAGE_SIZE,
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_24BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&aha->sg_dmat) != 0)
		goto error_exit;

	aha->init_level++;

	/* Perform initial CCB allocation */
	bzero(aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb));
	ahaallocccbs(aha);

	if (aha->num_ccbs == 0) {
		device_printf(aha->dev,
		    "aha_init - Unable to allocate initial ccbs\n");
		goto error_exit;
	}

	/*
	 * Note that we are going and return (to probe)
	 */
	return (0);

error_exit:

	return (ENXIO);
}
Пример #28
0
int
oosiop_alloc_cb(struct oosiop_softc *sc, int ncb)
{
	struct oosiop_cb *cb;
	struct oosiop_xfer *xfer;
	bus_size_t xfersize;
	bus_dma_segment_t seg;
	int i, s, err, nseg;

	/*
	 * Allocate oosiop_cb.
	 */
	cb = malloc(sizeof(*cb) * ncb, M_DEVBUF, M_NOWAIT | M_ZERO);
	if (cb == NULL) {
		printf(": failed to allocate cb memory\n");
		return (ENOMEM);
	}

	/*
	 * Allocate DMA-safe memory for the oosiop_xfer and map it.
	 */
	xfersize = sizeof(struct oosiop_xfer) * ncb;
	err = bus_dmamem_alloc(sc->sc_dmat, xfersize, PAGE_SIZE, 0, &seg, 1,
	    &nseg, BUS_DMA_NOWAIT);
	if (err) {
		printf(": failed to allocate xfer block memory, err=%d\n", err);
		return (err);
	}
	err = bus_dmamem_map(sc->sc_dmat, &seg, nseg, xfersize,
	    (caddr_t *)(void *)&xfer, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
	if (err) {
		printf(": failed to map xfer block memory, err=%d\n", err);
		return (err);
	}

	/* Initialize each command block */
	for (i = 0; i < ncb; i++) {
		err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
		    0, BUS_DMA_NOWAIT, &cb->cmddma);
		if (err) {
			printf(": failed to create cmddma map, err=%d\n", err);
			return (err);
		}

		err = bus_dmamap_create(sc->sc_dmat, OOSIOP_MAX_XFER,
		    OOSIOP_NSG, OOSIOP_DBC_MAX, 0, BUS_DMA_NOWAIT,
		    &cb->datadma);
		if (err) {
			printf(": failed to create datadma map, err=%d\n", err);
			return (err);
		}

		err = bus_dmamap_create(sc->sc_dmat,
		    sizeof(struct oosiop_xfer), 1, sizeof(struct oosiop_xfer),
		    0, BUS_DMA_NOWAIT, &cb->xferdma);
		if (err) {
			printf(": failed to create xfer block map, err=%d\n",
			    err);
			return (err);
		}
		err = bus_dmamap_load(sc->sc_dmat, cb->xferdma, xfer,
		    sizeof(struct oosiop_xfer), NULL, BUS_DMA_NOWAIT);
		if (err) {
			printf(": failed to load xfer block, err=%d\n", err);
			return (err);
		}

		cb->xfer = xfer;

		s = splbio();
		TAILQ_INSERT_TAIL(&sc->sc_free_cb, cb, chain);
		splx(s);

		cb++;
		xfer++;
	}

	return (0);
}
Пример #29
0
static int
amr_sglist_map(struct amr_softc *sc)
{
    size_t	segsize;
    void	*p;
    int		error;

    debug_called(1);

    /*
     * Create a single tag describing a region large enough to hold all of
     * the s/g lists we will need.
     *
     * Note that we could probably use AMR_LIMITCMD here, but that may become
     * tunable.
     */
    if (AMR_IS_SG64(sc))
	segsize = sizeof(struct amr_sg64entry) * AMR_NSEG * AMR_MAXCMD;
    else
	segsize = sizeof(struct amr_sgentry) * AMR_NSEG * AMR_MAXCMD;

    error = bus_dma_tag_create(sc->amr_parent_dmat, 	/* parent */
			       512, 0, 			/* alignment,boundary */
			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
			       BUS_SPACE_MAXADDR, 	/* highaddr */
			       NULL, NULL, 		/* filter, filterarg */
			       segsize, 1,		/* maxsize, nsegments */
			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			       0,			/* flags */
			       NULL, NULL,		/* lockfunc, lockarg */
			       &sc->amr_sg_dmat);
    if (error != 0) {
	device_printf(sc->amr_dev, "can't allocate scatter/gather DMA tag\n");
	return(ENOMEM);
    }

    /*
     * Allocate enough s/g maps for all commands and permanently map them into
     * controller-visible space.
     *	
     * XXX this assumes we can get enough space for all the s/g maps in one 
     * contiguous slab.  We may need to switch to a more complex arrangement
     * where we allocate in smaller chunks and keep a lookup table from slot
     * to bus address.
     *
     * XXX HACK ALERT:	at least some controllers don't like the s/g memory
     *			being allocated below 0x2000.  We leak some memory if
     *			we get some below this mark and allocate again.  We
     *			should be able to avoid this with the tag setup, but
     *			that does't seem to work.
     */
retry:
    error = bus_dmamem_alloc(sc->amr_sg_dmat, (void **)&p, BUS_DMA_NOWAIT, &sc->amr_sg_dmamap);
    if (error) {
	device_printf(sc->amr_dev, "can't allocate s/g table\n");
	return(ENOMEM);
    }
    bus_dmamap_load(sc->amr_sg_dmat, sc->amr_sg_dmamap, p, segsize, amr_sglist_helper, &sc->amr_sgbusaddr, 0);
    if (sc->amr_sgbusaddr < 0x2000) {
	debug(1, "s/g table too low (0x%x), reallocating\n", sc->amr_sgbusaddr);
	goto retry;
    }

    if (AMR_IS_SG64(sc))
	sc->amr_sg64table = (struct amr_sg64entry *)p;
    sc->amr_sgtable = (struct amr_sgentry *)p;

    return(0);
}
Пример #30
0
static int
adv_isa_probe(device_t dev)
{
	int	port_index;
	int	max_port_index;
	u_long	iobase, iocount, irq;
	int	user_iobase = 0;
	int	rid = 0;
	void	*ih;
	struct resource	*iores, *irqres;

	/*
	 * We don't know of any PnP ID's for these cards.
	 */
	if (isa_get_logicalid(dev) != 0)
		return (ENXIO);

	/*
	 * Default to scanning all possible device locations.
	 */
	port_index = 0;
	max_port_index = MAX_ISA_IOPORT_INDEX;

	if (bus_get_resource(dev, SYS_RES_IOPORT, 0, &iobase, &iocount) == 0) {
		user_iobase = 1;
		for (;port_index <= max_port_index; port_index++)
			if (iobase <= adv_isa_ioports[port_index])
				break;
		if ((port_index > max_port_index)
		 || (iobase != adv_isa_ioports[port_index])) {
			if (bootverbose)
				device_printf(dev,
				    "Invalid baseport of 0x%lx specified. "
				    "Nearest valid baseport is 0x%x.  Failing "
				    "probe.\n", iobase,
				    (port_index <= max_port_index) ?
				    adv_isa_ioports[port_index] :
				    adv_isa_ioports[max_port_index]);
			return ENXIO;
		}
		max_port_index = port_index;
	}

	/* Perform the actual probing */
	adv_set_isapnp_wait_for_key();
	for (;port_index <= max_port_index; port_index++) {
		u_int16_t port_addr = adv_isa_ioports[port_index];
		bus_size_t maxsegsz;
		bus_size_t maxsize;
		bus_addr_t lowaddr;
		int error;
		struct adv_softc *adv;

		if (port_addr == 0)
			/* Already been attached */
			continue;
		
		if (bus_set_resource(dev, SYS_RES_IOPORT, 0, port_addr, 1))
			continue;

		/* XXX what is the real portsize? */
		iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
					       RF_ACTIVE);
		if (iores == NULL)
			continue;

		if (adv_find_signature(iores) == 0) {
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			continue;
		}

		/*
		 * Got one.  Now allocate our softc
		 * and see if we can initialize the card.
		 */
		adv = adv_alloc(dev, iores, 0);
		if (adv == NULL) {
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		/*
		 * Stop the chip.
		 */
		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
		ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
		/*
		 * Determine the chip version.
		 */
		adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION);
		if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL)
		    && (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) {
			adv->type = ADV_VL;
			maxsegsz = ADV_VL_MAX_DMA_COUNT;
			maxsize = BUS_SPACE_MAXSIZE_32BIT;
			lowaddr = ADV_VL_MAX_DMA_ADDR;
			bus_delete_resource(dev, SYS_RES_DRQ, 0);
		} else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA)
			   && (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) {
			if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) {
				adv->type = ADV_ISAPNP;
				ADV_OUTB(adv, ADV_REG_IFC,
					 ADV_IFC_INIT_DEFAULT);
			} else {
				adv->type = ADV_ISA;
			}
			maxsegsz = ADV_ISA_MAX_DMA_COUNT;
			maxsize = BUS_SPACE_MAXSIZE_24BIT;
			lowaddr = ADV_ISA_MAX_DMA_ADDR;
			adv->isa_dma_speed = ADV_DEF_ISA_DMA_SPEED;
			adv->isa_dma_channel = adv_get_isa_dma_channel(adv);
			bus_set_resource(dev, SYS_RES_DRQ, 0,
					 adv->isa_dma_channel, 1);
		} else {
			panic("advisaprobe: Unknown card revision\n");
		}

		/*
		 * Allocate a parent dmatag for all tags created
		 * by the MI portions of the advansys driver
		 */
		error = bus_dma_tag_create(
				/* parent	*/ bus_get_dma_tag(dev),
				/* alignemnt	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ lowaddr,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ maxsize,
				/* nsegments	*/ ~0,
				/* maxsegsz	*/ maxsegsz,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&adv->parent_dmat); 

		if (error != 0) {
			device_printf(dev,
			    "Could not allocate DMA tag - error %d\n", error);
			adv_free(adv); 
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		adv->init_level += 2;

		if (overrun_buf == NULL) {
			/* Need to allocate our overrun buffer */
			if (bus_dma_tag_create(
				/* parent	*/ adv->parent_dmat,
				/* alignment	*/ 8,
				/* boundary	*/ 0,
				/* lowaddr	*/ ADV_ISA_MAX_DMA_ADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ ADV_OVERRUN_BSIZE,
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&overrun_dmat) != 0) {
				adv_free(adv);
				bus_release_resource(dev, SYS_RES_IOPORT, 0,
						     iores);
				break;
			}
			if (bus_dmamem_alloc(overrun_dmat,
					     (void **)&overrun_buf,
					     BUS_DMA_NOWAIT,
					     &overrun_dmamap) != 0) {
				bus_dma_tag_destroy(overrun_dmat);
				adv_free(adv);
				bus_release_resource(dev, SYS_RES_IOPORT, 0,
						     iores);
				break;
			}
			/* And permanently map it in */  
			bus_dmamap_load(overrun_dmat, overrun_dmamap,
					overrun_buf, ADV_OVERRUN_BSIZE,
					adv_map, &overrun_physbase,
					/*flags*/0);
		}

		adv->overrun_physbase = overrun_physbase;

		if (adv_init(adv) != 0) {
			bus_dmamap_unload(overrun_dmat, overrun_dmamap);
			bus_dmamem_free(overrun_dmat, overrun_buf,
			    overrun_dmamap);
			bus_dma_tag_destroy(overrun_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		switch (adv->type) {
		case ADV_ISAPNP:
			if (adv->chip_version == ADV_CHIP_VER_ASYN_BUG) {
				adv->bug_fix_control
				    |= ADV_BUG_FIX_ASYN_USE_SYN;
				adv->fix_asyn_xfer = ~0;
			}
			/* Fall Through */
		case ADV_ISA:
			adv->max_dma_count = ADV_ISA_MAX_DMA_COUNT;
			adv->max_dma_addr = ADV_ISA_MAX_DMA_ADDR;
			adv_set_isa_dma_settings(adv);
			break;

		case ADV_VL:
			adv->max_dma_count = ADV_VL_MAX_DMA_COUNT;
			adv->max_dma_addr = ADV_VL_MAX_DMA_ADDR;
			break;
		default:
			panic("advisaprobe: Invalid card type\n");
		}
			
		/* Determine our IRQ */
		if (bus_get_resource(dev, SYS_RES_IRQ, 0, &irq, NULL))
			bus_set_resource(dev, SYS_RES_IRQ, 0,
					 adv_get_chip_irq(adv), 1);
		else
			adv_set_chip_irq(adv, irq);

		irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
						RF_ACTIVE);
		if (irqres == NULL ||
		    bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|
		    INTR_MPSAFE, NULL, adv_intr, adv, &ih) != 0) {
			if (irqres != NULL)
				bus_release_resource(dev, SYS_RES_IRQ, rid,
				    irqres);
			bus_dmamap_unload(overrun_dmat, overrun_dmamap);
			bus_dmamem_free(overrun_dmat, overrun_buf,
			    overrun_dmamap);
			bus_dma_tag_destroy(overrun_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		/* Mark as probed */
		adv_isa_ioports[port_index] = 0;
		return 0;
	}

	if (user_iobase)
		bus_set_resource(dev, SYS_RES_IOPORT, 0, iobase, iocount);
	else
		bus_delete_resource(dev, SYS_RES_IOPORT, 0);

	return ENXIO;
}