Esempio n. 1
0
static void
cn_drvinit(void *unused)
{
	struct tty *tp;

	if (bvm_consdev.cn_pri != CN_DEAD) {
		tp = tty_alloc(&bvm_ttydevsw, NULL);
		callout_init_mtx(&bvm_timer, tty_getlock(tp), 0);
		tty_makedev(tp, NULL, "bvmcons");
	}
}
Esempio n. 2
0
int
pdq_ifattach(pdq_softc_t *sc, const pdq_uint8_t *llc, pdq_type_t type)
{
    struct ifnet *ifp;

    ifp = PDQ_IFNET(sc) = if_alloc(IFT_FDDI);
    if (ifp == NULL) {
	device_printf(sc->dev, "can not if_alloc()\n");
	return (ENOSPC);
    }

    mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
	MTX_DEF);
    callout_init_mtx(&sc->watchdog, &sc->mtx, 0);

    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
    ifp->if_softc = sc;
    ifp->if_init = pdq_ifinit;
    ifp->if_snd.ifq_maxlen = ifqmaxlen;
    ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;

    ifp->if_ioctl = pdq_ifioctl;
    ifp->if_start = pdq_ifstart;

#if defined(IFM_FDDI)
    {
	const int media = sc->sc_ifmedia.ifm_media;
	ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
		     pdq_ifmedia_change, pdq_ifmedia_status);
	ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
	ifmedia_set(&sc->sc_ifmedia, media);
    }
#endif
  
    sc->sc_pdq = pdq_initialize(sc->mem_bst, sc->mem_bsh, ifp->if_xname, -1,
	sc, type);
    if (sc->sc_pdq == NULL) {
	device_printf(sc->dev, "Initialization failed.\n");
	return (ENXIO);
    }

    fddi_ifattach(ifp, llc, FDDI_BPF_SUPPORTED);
    return (0);
}
Esempio n. 3
0
static struct adv_ccb_info *
adv_alloc_ccb_info(struct adv_softc *adv)
{
	int error;
	struct adv_ccb_info *cinfo;

	cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
	cinfo->state = ACCB_FREE;
	callout_init_mtx(&cinfo->timer, &adv->lock, 0);
	error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
				  &cinfo->dmamap);
	if (error != 0) {
		device_printf(adv->dev, "Unable to allocate CCB info "
		    "dmamap - error %d\n", error);
		return (NULL);
	}
	adv->ccb_infos_allocated++;
	return (cinfo);
}
void
ieee80211_scan_attach(struct ieee80211com *ic)
{
	struct scan_state *ss;

	ss = (struct scan_state *) malloc(sizeof(struct scan_state),
		M_80211_SCAN, M_NOWAIT | M_ZERO);
	if (ss == NULL) {
		ic->ic_scan = NULL;
		return;
	}
	callout_init_mtx(&ss->ss_scan_timer, IEEE80211_LOCK_OBJ(ic), 0);
	cv_init(&ss->ss_scan_cv, "scan");
	TASK_INIT(&ss->ss_scan_task, 0, scan_task, ss);
	ic->ic_scan = &ss->base;
	ss->base.ss_ic = ic;

	ic->ic_scan_curchan = scan_curchan;
	ic->ic_scan_mindwell = scan_mindwell;
}
Esempio n. 5
0
/**
 * @brief This callback method asks the user to create a timer and provide
 *        a handle for this timer for use in further timer interactions.
 *
 * @warning The "timer_callback" method should be executed in a mutually
 *          exlusive manner from the controller completion handler
 *          handler (refer to scic_controller_get_handler_methods()).
 *
 * @param[in]  timer_callback This parameter specifies the callback method
 *             to be invoked whenever the timer expires.
 * @param[in]  controller This parameter specifies the controller with
 *             which this timer is to be associated.
 * @param[in]  cookie This parameter specifies a piece of information that
 *             the user must retain.  This cookie is to be supplied by the
 *             user anytime a timeout occurs for the created timer.
 *
 * @return This method returns a handle to a timer object created by the
 *         user.  The handle will be utilized for all further interactions
 *         relating to this timer.
 */
void *
scif_cb_timer_create(SCI_CONTROLLER_HANDLE_T scif_controller,
                     SCI_TIMER_CALLBACK_T timer_callback, void *cookie)
{
    struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
            sci_object_get_association(scif_controller);
    struct ISCI_TIMER *timer;

    sci_pool_get(isci_controller->timer_pool, timer);

    callout_init_mtx(&timer->callout, &isci_controller->lock, FALSE);

    timer->callback = timer_callback;
    timer->cookie = cookie;
    timer->is_started = FALSE;

    isci_log_message(3, "TIMER", "create %p %p %p\n", timer, timer_callback, cookie);

    return (timer);
}
Esempio n. 6
0
/*
 * Allocate driver resources.  Must be called from the
 * bus specific device allocation routine.  Caller must
 * ensure to call cmx_release_resources to free the
 * resources when detaching.
 * Return zero if successful, and ENOMEM if the resources
 * could not be allocated.
 */
int
cmx_alloc_resources(device_t dev)
{
	struct cmx_softc *sc = device_get_softc(dev);
#ifdef CMX_INTR
	int rv;
#endif

	sc->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
			&sc->ioport_rid, RF_ACTIVE);
	if (!sc->ioport) {
		device_printf(dev, "failed to allocate io port\n");
		return ENOMEM;
	}
	sc->bst = rman_get_bustag(sc->ioport);
	sc->bsh = rman_get_bushandle(sc->ioport);

#ifdef CMX_INTR
	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
			&sc->irq_rid, RF_ACTIVE);
	if (!sc->irq) {
		device_printf(dev, "failed to allocate irq\n");
		return ENOMEM;
	}
	if ((rv = bus_setup_intr(dev, sc->irq, INTR_TYPE_TTY,
			cmx_intr, sc, &sc->ih)) != 0) {
		device_printf(dev, "failed to set up irq\n");
		return ENOMEM;
	}
#endif

	mtx_init(&sc->mtx, device_get_nameunit(dev),
			"cmx softc lock",
			MTX_DEF | MTX_RECURSE);
	callout_init_mtx(&sc->ch, &sc->mtx, 0);

	return 0;
}
Esempio n. 7
0
static void
ata_cam_request_sense(device_t dev, struct ata_request *request)
{
	struct ata_channel *ch = device_get_softc(dev);
	union ccb *ccb = request->ccb;

	ch->requestsense = 1;

	bzero(request, sizeof(*request));
	request->dev = NULL;
	request->parent = dev;
	request->unit = ccb->ccb_h.target_id;
	request->data = (void *)&ccb->csio.sense_data;
	request->bytecount = ccb->csio.sense_len;
	request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE;
	request->u.atapi.ccb[4] = ccb->csio.sense_len;
	request->flags |= ATA_R_ATAPI;
	if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
		request->flags |= ATA_R_ATAPI16;
	if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
		request->flags |= ATA_R_DMA;
	request->flags |= ATA_R_READ;
	request->transfersize = min(request->bytecount,
	    ch->curr[ccb->ccb_h.target_id].bytecount);
	request->retries = 0;
	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
	request->ccb = ccb;

	ch->running = request;
	ch->state = ATA_ACTIVE;
	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
		ch->running = NULL;
		ch->state = ATA_IDLE;
		ata_cam_end_transaction(dev, request);
		return;
	}
}
Esempio n. 8
0
static void
ata_cam_begin_transaction(device_t dev, union ccb *ccb)
{
	struct ata_channel *ch = device_get_softc(dev);
	struct ata_request *request;

	if (!(request = ata_alloc_request())) {
		device_printf(dev, "FAILURE - out of memory in start\n");
		ccb->ccb_h.status = CAM_REQ_INVALID;
		xpt_done(ccb);
		return;
	}
	bzero(request, sizeof(*request));

	/* setup request */
	request->dev = NULL;
	request->parent = dev;
	request->unit = ccb->ccb_h.target_id;
	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
		request->data = ccb->ataio.data_ptr;
		request->bytecount = ccb->ataio.dxfer_len;
		request->u.ata.command = ccb->ataio.cmd.command;
		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
					  (uint16_t)ccb->ataio.cmd.features;
		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
					(uint16_t)ccb->ataio.cmd.sector_count;
		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
			request->flags |= ATA_R_48BIT;
			request->u.ata.lba =
				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
		} else {
			request->u.ata.lba =
				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
		}
		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
				       (uint64_t)ccb->ataio.cmd.lba_low;
		if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)
			request->flags |= ATA_R_NEEDRESULT;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
			request->flags |= ATA_R_DMA;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			request->flags |= ATA_R_READ;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
			request->flags |= ATA_R_WRITE;
		if (ccb->ataio.cmd.command == ATA_READ_MUL ||
		    ccb->ataio.cmd.command == ATA_READ_MUL48 ||
		    ccb->ataio.cmd.command == ATA_WRITE_MUL ||
		    ccb->ataio.cmd.command == ATA_WRITE_MUL48) {
			request->transfersize = min(request->bytecount,
			    ch->curr[ccb->ccb_h.target_id].bytecount);
		} else
			request->transfersize = min(request->bytecount, 512);
	} else {
		request->data = ccb->csio.data_ptr;
		request->bytecount = ccb->csio.dxfer_len;
		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
		    request->u.atapi.ccb, ccb->csio.cdb_len);
		request->flags |= ATA_R_ATAPI;
		if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
			request->flags |= ATA_R_ATAPI16;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
			request->flags |= ATA_R_DMA;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			request->flags |= ATA_R_READ;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
			request->flags |= ATA_R_WRITE;
		request->transfersize = min(request->bytecount,
		    ch->curr[ccb->ccb_h.target_id].bytecount);
	}
	request->retries = 0;
	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
	request->ccb = ccb;
	request->flags |= ATA_R_DATA_IN_CCB;

	ch->running = request;
	ch->state = ATA_ACTIVE;
	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
	    ch->running = NULL;
	    ch->state = ATA_IDLE;
	    ata_cam_end_transaction(dev, request);
	    return;
	}
}
Esempio n. 9
0
int
ex_attach(device_t dev)
{
	struct ex_softc *	sc = device_get_softc(dev);
	struct ifnet *		ifp;
	struct ifmedia *	ifm;
	int			error;
	uint16_t		temp;

	ifp = sc->ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "can not if_alloc()\n");
		return (ENOSPC);
	}
	/* work out which set of irq <-> internal tables to use */
	if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) {
		sc->irq2ee = plus_irq2eemap;
		sc->ee2irq = plus_ee2irqmap;
	} else {
		sc->irq2ee = irq2eemap;
		sc->ee2irq = ee2irqmap;
	}

	sc->mem_size = CARD_RAM_SIZE;	/* XXX This should be read from the card itself. */

	/*
	 * Initialize the ifnet structure.
	 */
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
	ifp->if_start = ex_start;
	ifp->if_ioctl = ex_ioctl;
	ifp->if_init = ex_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);

	ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts);
	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);
	callout_init_mtx(&sc->timer, &sc->lock, 0);

	temp = ex_eeprom_read(sc, EE_W5);
	if (temp & EE_W5_PORT_TPE)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
	if (temp & EE_W5_PORT_BNC)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
	if (temp & EE_W5_PORT_AUI)
		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);

	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL);
	ifmedia_set(&sc->ifmedia, ex_get_media(sc));

	ifm = &sc->ifmedia;
	ifm->ifm_media = ifm->ifm_cur->ifm_media;	
	ex_ifmedia_upd(ifp);

	/*
	 * Attach the interface.
	 */
	ether_ifattach(ifp, sc->enaddr);

	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
				NULL, ex_intr, (void *)sc, &sc->ih);
	if (error) {
		device_printf(dev, "bus_setup_intr() failed!\n");
		ether_ifdetach(ifp);
		mtx_destroy(&sc->lock);
		return (error);
	}

	return(0);
}
Esempio n. 10
0
int
smc_attach(device_t dev)
{
	int			type, error;
	uint16_t		val;
	u_char			eaddr[ETHER_ADDR_LEN];
	struct smc_softc	*sc;
	struct ifnet		*ifp;

	sc = device_get_softc(dev);
	error = 0;

	sc->smc_dev = dev;

	ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		error = ENOSPC;
		goto done;
	}

	mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);

	/* Set up watchdog callout. */
	callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0);

	type = SYS_RES_IOPORT;
	if (sc->smc_usemem)
		type = SYS_RES_MEMORY;

	sc->smc_reg_rid = 0;
	sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0,
	    16, RF_ACTIVE);
	if (sc->smc_reg == NULL) {
		error = ENXIO;
		goto done;
	}

	sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0,
	    ~0, 1, RF_ACTIVE | RF_SHAREABLE);
	if (sc->smc_irq == NULL) {
		error = ENXIO;
		goto done;
	}

	SMC_LOCK(sc);
	smc_reset(sc);
	SMC_UNLOCK(sc);

	smc_select_bank(sc, 3);
	val = smc_read_2(sc, REV);
	sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
	sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT;
	if (bootverbose)
		device_printf(dev, "revision %x\n", sc->smc_rev);

	callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx,
	    CALLOUT_RETURNUNLOCKED);
	if (sc->smc_chip >= REV_CHIP_91110FD) {
		(void)mii_attach(dev, &sc->smc_miibus, ifp,
		    smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK,
		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
		if (sc->smc_miibus != NULL) {
			sc->smc_mii_tick = smc_mii_tick;
			sc->smc_mii_mediachg = smc_mii_mediachg;
			sc->smc_mii_mediaioctl = smc_mii_mediaioctl;
		}
	}

	smc_select_bank(sc, 1);
	eaddr[0] = smc_read_1(sc, IAR0);
	eaddr[1] = smc_read_1(sc, IAR1);
	eaddr[2] = smc_read_1(sc, IAR2);
	eaddr[3] = smc_read_1(sc, IAR3);
	eaddr[4] = smc_read_1(sc, IAR4);
	eaddr[5] = smc_read_1(sc, IAR5);

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_init = smc_init;
	ifp->if_ioctl = smc_ioctl;
	ifp->if_start = smc_start;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = ifp->if_capenable = 0;

#ifdef DEVICE_POLLING
	ifp->if_capabilities |= IFCAP_POLLING;
#endif

	ether_ifattach(ifp, eaddr);

	/* Set up taskqueue */
	TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
	TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
	TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
	sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
	    taskqueue_thread_enqueue, &sc->smc_tq);
	taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq",
	    device_get_nameunit(sc->smc_dev));

	/* Mask all interrupts. */
	sc->smc_mask = 0;
	smc_write_1(sc, MSK, 0);

	/* Wire up interrupt */
	error = bus_setup_intr(dev, sc->smc_irq,
	    INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih);
	if (error != 0)
		goto done;

done:
	if (error != 0)
		smc_detach(dev);
	return (error);
}
Esempio n. 11
0
static int
at91_udp_attach(device_t dev)
{
	struct at91_udp_softc *sc = device_get_softc(dev);
	int err;
	int rid;

	/* setup AT9100 USB device controller interface softc */

	sc->sc_dci.sc_clocks_on = &at91_udp_clocks_on;
	sc->sc_dci.sc_clocks_off = &at91_udp_clocks_off;
	sc->sc_dci.sc_clocks_arg = sc;
	sc->sc_dci.sc_pull_up = &at91_udp_pull_up;
	sc->sc_dci.sc_pull_down = &at91_udp_pull_down;
	sc->sc_dci.sc_pull_arg = sc;

	/* initialise some bus fields */
	sc->sc_dci.sc_bus.parent = dev;
	sc->sc_dci.sc_bus.devices = sc->sc_dci.sc_devices;
	sc->sc_dci.sc_bus.devices_max = AT91_MAX_DEVICES;

	/* get all DMA memory */
	if (usb_bus_mem_alloc_all(&sc->sc_dci.sc_bus,
	    USB_GET_DMA_TAG(dev), NULL)) {
		return (ENOMEM);
	}
	callout_init_mtx(&sc->sc_vbus, &sc->sc_dci.sc_bus.bus_mtx, 0);

	/*
	 * configure VBUS input pin, enable deglitch and enable
	 * interrupt :
	 */
	at91_pio_use_gpio(VBUS_BASE, VBUS_MASK);
	at91_pio_gpio_input(VBUS_BASE, VBUS_MASK);
	at91_pio_gpio_set_deglitch(VBUS_BASE, VBUS_MASK, 1);
	at91_pio_gpio_set_interrupt(VBUS_BASE, VBUS_MASK, 0);

	/*
	 * configure PULLUP output pin :
	 */
	at91_pio_use_gpio(PULLUP_BASE, PULLUP_MASK);
	at91_pio_gpio_output(PULLUP_BASE, PULLUP_MASK, 0);

	at91_udp_pull_down(sc);

	/* wait 10ms for pulldown to stabilise */
	usb_pause_mtx(NULL, hz / 100);

	sc->sc_mclk = at91_pmc_clock_ref("mck");
	sc->sc_iclk = at91_pmc_clock_ref("udc_clk");
	sc->sc_fclk = at91_pmc_clock_ref("udpck");

	rid = MEM_RID;
	sc->sc_dci.sc_io_res =
	    bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);

	if (!(sc->sc_dci.sc_io_res)) {
		err = ENOMEM;
		goto error;
	}
	sc->sc_dci.sc_io_tag = rman_get_bustag(sc->sc_dci.sc_io_res);
	sc->sc_dci.sc_io_hdl = rman_get_bushandle(sc->sc_dci.sc_io_res);
	sc->sc_dci.sc_io_size = rman_get_size(sc->sc_dci.sc_io_res);

	rid = 0;
	sc->sc_dci.sc_irq_res =
	    bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
	if (!(sc->sc_dci.sc_irq_res)) {
		goto error;
	}
	sc->sc_dci.sc_bus.bdev = device_add_child(dev, "usbus", -1);
	if (!(sc->sc_dci.sc_bus.bdev)) {
		goto error;
	}
	device_set_ivars(sc->sc_dci.sc_bus.bdev, &sc->sc_dci.sc_bus);

	err = bus_setup_intr(dev, sc->sc_dci.sc_irq_res, INTR_TYPE_TTY | INTR_MPSAFE,
	    at91dci_filter_interrupt, at91dci_interrupt, sc, &sc->sc_dci.sc_intr_hdl);
	if (err) {
		sc->sc_dci.sc_intr_hdl = NULL;
		goto error;
	}

	err = at91dci_init(&sc->sc_dci);
	if (!err) {
		err = device_probe_and_attach(sc->sc_dci.sc_bus.bdev);
	}
	if (err) {
		goto error;
	} else {
		/* poll VBUS one time */
		USB_BUS_LOCK(&sc->sc_dci.sc_bus);
		at91_vbus_poll(sc);
		USB_BUS_UNLOCK(&sc->sc_dci.sc_bus);
	}
	return (0);

error:
	at91_udp_detach(dev);
	return (ENXIO);
}
Esempio n. 12
0
static int
kr_attach(device_t dev)
{
	uint8_t			eaddr[ETHER_ADDR_LEN];
	struct ifnet		*ifp;
	struct kr_softc		*sc;
	int			error = 0, rid;
	int			unit;

	sc = device_get_softc(dev);
	unit = device_get_unit(dev);
	sc->kr_dev = dev;

	mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);
	callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0);
	TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc);
	pci_enable_busmaster(dev);

	/* Map control/status registers. */
	sc->kr_rid = 0;
	sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid, 
	    RF_ACTIVE);

	if (sc->kr_res == NULL) {
		device_printf(dev, "couldn't map memory\n");
		error = ENXIO;
		goto fail;
	}

	sc->kr_btag = rman_get_bustag(sc->kr_res);
	sc->kr_bhandle = rman_get_bushandle(sc->kr_res);

	/* Allocate interrupts */
	rid = 0;
	sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ,
	    KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_rx_irq == NULL) {
		device_printf(dev, "couldn't map rx interrupt\n");
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ,
	    KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_tx_irq == NULL) {
		device_printf(dev, "couldn't map tx interrupt\n");
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 
	    KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_rx_und_irq == NULL) {
		device_printf(dev, "couldn't map rx underrun interrupt\n");
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 
	    KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);

	if (sc->kr_tx_ovr_irq == NULL) {
		device_printf(dev, "couldn't map tx overrun interrupt\n");
		error = ENXIO;
		goto fail;
	}

	/* Allocate ifnet structure. */
	ifp = sc->kr_ifp = if_alloc(IFT_ETHER);

	if (ifp == NULL) {
		device_printf(dev, "couldn't allocate ifnet structure\n");
		error = ENOSPC;
		goto fail;
	}
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = kr_ioctl;
	ifp->if_start = kr_start;
	ifp->if_init = kr_init;

	/* XXX: add real size */
	IFQ_SET_MAXLEN(&ifp->if_snd, 9);
	ifp->if_snd.ifq_maxlen = 9;
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capenable = ifp->if_capabilities;

	eaddr[0] = 0x00;
	eaddr[1] = 0x0C;
	eaddr[2] = 0x42;
	eaddr[3] = 0x09;
	eaddr[4] = 0x5E;
	eaddr[5] = 0x6B;

	if (kr_dma_alloc(sc) != 0) {
		error = ENXIO;
		goto fail;
	}

	/* TODO: calculate prescale */
	CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);

	CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
	DELAY(1000);
	CSR_WRITE_4(sc, KR_MIIMCFG, 0);

	/* Do MII setup. */
	error = mii_attach(dev, &sc->kr_miibus, ifp, kr_ifmedia_upd,
	    kr_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
	if (error != 0) {
		device_printf(dev, "attaching PHYs failed\n");
		goto fail;
	}

	/* Call MI attach routine. */
	ether_ifattach(ifp, eaddr);

	/* Hook interrupt last to avoid having to lock softc */
	error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up rx irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

	error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up tx irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

	error = bus_setup_intr(dev, sc->kr_rx_und_irq, 
	    INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc, 
	    &sc->kr_rx_und_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up rx underrun irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

	error = bus_setup_intr(dev, sc->kr_tx_ovr_irq, 
	    INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc, 
	    &sc->kr_tx_ovr_intrhand);

	if (error) {
		device_printf(dev, "couldn't set up tx overrun irq\n");
		ether_ifdetach(ifp);
		goto fail;
	}

fail:
	if (error) 
		kr_detach(dev);

	return (error);
}
Esempio n. 13
0
static int
jz4780_mmc_attach(device_t dev)
{
	struct jz4780_mmc_softc *sc;
	struct sysctl_ctx_list *ctx;
	struct sysctl_oid_list *tree;
	device_t child;
	ssize_t len;
	pcell_t prop;
	phandle_t node;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_req = NULL;
	if (bus_alloc_resources(dev, jz4780_mmc_res_spec, sc->sc_res) != 0) {
		device_printf(dev, "cannot allocate device resources\n");
		return (ENXIO);
	}
	sc->sc_bst = rman_get_bustag(sc->sc_res[JZ_MSC_MEMRES]);
	sc->sc_bsh = rman_get_bushandle(sc->sc_res[JZ_MSC_MEMRES]);
	if (bus_setup_intr(dev, sc->sc_res[JZ_MSC_IRQRES],
	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, jz4780_mmc_intr, sc,
	    &sc->sc_intrhand)) {
		bus_release_resources(dev, jz4780_mmc_res_spec, sc->sc_res);
		device_printf(dev, "cannot setup interrupt handler\n");
		return (ENXIO);
	}
	sc->sc_timeout = 10;
	ctx = device_get_sysctl_ctx(dev);
	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
	    &sc->sc_timeout, 0, "Request timeout in seconds");
	mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), "jz4780_mmc",
	    MTX_DEF);
	callout_init_mtx(&sc->sc_timeoutc, &sc->sc_mtx, 0);

	/* Reset controller. */
	if (jz4780_mmc_reset(sc) != 0) {
		device_printf(dev, "cannot reset the controller\n");
		goto fail;
	}
	if (jz4780_mmc_pio_mode == 0 && jz4780_mmc_setup_dma(sc) != 0) {
		device_printf(sc->sc_dev, "Couldn't setup DMA!\n");
		jz4780_mmc_pio_mode = 1;
	}
	if (bootverbose)
		device_printf(sc->sc_dev, "DMA status: %s\n",
		    jz4780_mmc_pio_mode ? "disabled" : "enabled");

	node = ofw_bus_get_node(dev);
	/* Determine max operating frequency */
	sc->sc_host.f_max = 24000000;
	len = OF_getencprop(node, "max-frequency", &prop, sizeof(prop));
	if (len / sizeof(prop) == 1)
		sc->sc_host.f_max = prop;
	sc->sc_host.f_min = sc->sc_host.f_max / 128;

	sc->sc_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
	sc->sc_host.caps = MMC_CAP_HSPEED;
	sc->sc_host.mode = mode_sd;
	/*
	 * Check for bus-width property, default to both 4 and 8 bit
	 * if no bus width is specified.
	 */
	len = OF_getencprop(node, "bus-width", &prop, sizeof(prop));
	if (len / sizeof(prop) != 1)
		sc->sc_host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
	else if (prop == 8)
		sc->sc_host.caps |= MMC_CAP_8_BIT_DATA;
	else if (prop == 4)
		sc->sc_host.caps |= MMC_CAP_4_BIT_DATA;
	/* Activate the module clock. */
	if (jz4780_mmc_enable_clock(sc) != 0) {
		device_printf(dev, "cannot activate mmc clock\n");
		goto fail;
	}

	child = device_add_child(dev, "mmc", -1);
	if (child == NULL) {
		device_printf(dev, "attaching MMC bus failed!\n");
		goto fail;
	}
	if (device_probe_and_attach(child) != 0) {
		device_printf(dev, "attaching MMC child failed!\n");
		device_delete_child(dev, child);
		goto fail;
	}

	return (0);

fail:
	callout_drain(&sc->sc_timeoutc);
	mtx_destroy(&sc->sc_mtx);
	bus_teardown_intr(dev, sc->sc_res[JZ_MSC_IRQRES], sc->sc_intrhand);
	bus_release_resources(dev, jz4780_mmc_res_spec, sc->sc_res);
	if (sc->sc_clk != NULL)
		clk_release(sc->sc_clk);
	return (ENXIO);
}
Esempio n. 14
0
static void
nmdm_clone(void *arg, struct ucred *cred, char *name, int nameen,
    struct cdev **dev)
{
	struct nmdmsoftc *ns;
	struct tty *tp;
	char *end;
	int error;
	char endc;

	if (*dev != NULL)
		return;
	if (strncmp(name, "nmdm", 4) != 0)
		return;
	if (strlen(name) <= strlen("nmdmX"))
		return;

	/* Device name must be "nmdm%s%c", where %c is 'A' or 'B'. */
	end = name + strlen(name) - 1;
	endc = *end;
	if (endc != 'A' && endc != 'B')
		return;

	ns = malloc(sizeof(*ns), M_NMDM, M_WAITOK | M_ZERO);
	mtx_init(&ns->ns_mtx, "nmdm", NULL, MTX_DEF);

	/* Hook the pairs together. */
	ns->ns_part1.np_pair = ns;
	ns->ns_part1.np_other = &ns->ns_part2;
	TASK_INIT(&ns->ns_part1.np_task, 0, nmdm_task_tty, &ns->ns_part1);
	callout_init_mtx(&ns->ns_part1.np_callout, &ns->ns_mtx, 0);

	ns->ns_part2.np_pair = ns;
	ns->ns_part2.np_other = &ns->ns_part1;
	TASK_INIT(&ns->ns_part2.np_task, 0, nmdm_task_tty, &ns->ns_part2);
	callout_init_mtx(&ns->ns_part2.np_callout, &ns->ns_mtx, 0);

	/* Create device nodes. */
	tp = ns->ns_part1.np_tty = tty_alloc_mutex(&nmdm_class, &ns->ns_part1,
	    &ns->ns_mtx);
	*end = 'A';
	error = tty_makedevf(tp, NULL, endc == 'A' ? TTYMK_CLONING : 0,
	    "%s", name);
	if (error) {
		*end = endc;
		mtx_destroy(&ns->ns_mtx);
		free(ns, M_NMDM);
		return;
	}

	tp = ns->ns_part2.np_tty = tty_alloc_mutex(&nmdm_class, &ns->ns_part2,
	    &ns->ns_mtx);
	*end = 'B';
	error = tty_makedevf(tp, NULL, endc == 'B' ? TTYMK_CLONING : 0,
	    "%s", name);
	if (error) {
		*end = endc;
		mtx_lock(&ns->ns_mtx);
		/* see nmdm_free() */
		ns->ns_part1.np_other = NULL;
		atomic_add_int(&nmdm_count, 1);
		tty_rel_gone(ns->ns_part1.np_tty);
		return;
	}

	if (endc == 'A')
		*dev = ns->ns_part1.np_tty->t_dev;
	else
		*dev = ns->ns_part2.np_tty->t_dev;

	*end = endc;
	atomic_add_int(&nmdm_count, 1);
}
Esempio n. 15
0
static int
athp_pci_attach(device_t dev)
{
	struct ath10k_pci *ar_pci = device_get_softc(dev);
	struct ath10k *ar = &ar_pci->sc_sc;
	int rid, i;
	int err = 0;
	int ret;

	ar->sc_dev = dev;
	ar->sc_invalid = 1;

	/* XXX TODO: initialize sc_debug from TUNABLE */
#if 0
	ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC |
	    ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC |
	    ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT;
#endif
	ar->sc_psc = ar_pci;

	/* Load-time tunable/sysctl tree */
	athp_attach_sysctl(ar);

	/* Enable WMI/HTT RX for now */
	ar->sc_rx_wmi = 1;
	ar->sc_rx_htt = 1;

	/* Fetch pcie capability offset */
	ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off);
	if (ret != 0) {
		device_printf(dev,
		    "%s: failed to find pci-express capability offset\n",
		    __func__);
		return (ret);
	}

	/*
	 * Initialise ath10k core bits.
	 */
	if (ath10k_core_init(ar) < 0)
		goto bad0;

	/*
	 * Initialise ath10k freebsd bits.
	 */
	sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev));
	mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK,
	    MTX_DEF);

	sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev));
	mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF);

	sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev));
	mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF);

	sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev));
	mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf",
	    MTX_DEF | MTX_RECURSE);

	sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev));
	mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF);

	sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev));
	mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF);

	sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev));
	mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data",
	    MTX_DEF);

	/*
	 * Initialise ath10k BMI/PCIDIAG bits.
	 */
	ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req",
	    4, 1024);
	ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp",
	    4, 1024);
	if (ret != 0) {
		device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n",
		    __func__);
		goto bad0;
	}

	/*
	 * Initialise HTT descriptors/memory.
	 */
	ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt);
	if (ret != 0) {
		device_printf(dev, "%s: failed to alloc HTT RX descriptors\n",
		    __func__);
		goto bad;
	}

	/* XXX here instead of in core_init because we need the lock init'ed */
	callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0);

	ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT,
	    NULL, ar_pci);
	(void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq",
	    device_get_nameunit(dev));
	if (ar_pci->pipe_taskq == NULL) {
		device_printf(dev, "%s: couldn't create pipe taskq\n",
		    __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Look at the device/vendor ID and choose which register offset
	 * mapping to use.  This is used by a lot of the register access
	 * pieces to get the correct device-specific windows.
	 */
	ar_pci->sc_vendorid = pci_get_vendor(dev);
	ar_pci->sc_deviceid = pci_get_device(dev);
	if (athp_pci_hw_lookup(ar_pci) != 0) {
		device_printf(dev, "%s: hw lookup failed\n", __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Enable bus mastering.
	 */
	pci_enable_busmaster(dev);

	/*
	 * Setup memory-mapping of PCI registers.
	 */
	rid = BS_BAR;
	ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (ar_pci->sc_sr == NULL) {
		device_printf(dev, "cannot map register space\n");
		err = ENXIO;
		goto bad;
	}

	/* Driver copy; hopefully we can delete this */
	ar->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/* Local copy for bus operations */
	ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/*
	 * Mark device invalid so any interrupts (shared or otherwise)
	 * that arrive before the HAL is setup are discarded.
	 */
	ar->sc_invalid = 1;

	printf("%s: msicount=%d, msixcount=%d\n",
	    __func__,
	    pci_msi_count(dev),
	    pci_msix_count(dev));

	/*
	 * Arrange interrupt line.
	 *
	 * XXX TODO: this is effictively ath10k_pci_init_irq().
	 * Refactor it out later.
	 *
	 * First - attempt MSI.  If we get it, then use it.
	 */
	i = MSI_NUM_REQUEST;
	if (pci_alloc_msi(dev, &i) == 0) {
		device_printf(dev, "%s: %d MSI interrupts\n", __func__, i);
		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
	} else {
		i = 1;
		if (pci_alloc_msi(dev, &i) == 0) {
			device_printf(dev, "%s: 1 MSI interrupt\n", __func__);
			ar_pci->num_msi_intrs = 1;
		} else {
			device_printf(dev, "%s: legacy interrupts\n", __func__);
			ar_pci->num_msi_intrs = 0;
		}
	}
	err = ath10k_pci_request_irq(ar_pci);
	if (err != 0)
		goto bad1;

	/*
	 * Attach register ops - needed for the caller to do register IO.
	 */
	ar->sc_regio.reg_read = athp_pci_regio_read_reg;
	ar->sc_regio.reg_write = athp_pci_regio_write_reg;
	ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg;
	ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg;
	ar->sc_regio.reg_flush = athp_pci_regio_flush_reg;
	ar->sc_regio.reg_arg = ar_pci;

	/*
	 * TODO: abstract this out to be a bus/hif specific
	 * attach path.
	 *
	 * I'm not sure what USB/SDIO will look like here, but
	 * I'm pretty sure it won't involve PCI/CE setup.
	 * It'll still have WME/HIF/BMI, but it'll be done over
	 * USB endpoints.
	 */

	if (athp_pci_setup_bufs(ar_pci) != 0) {
		err = ENXIO;
		goto bad4;
	}

	/* HIF ops attach */
	ar->hif.ops = &ath10k_pci_hif_ops;
	ar->hif.bus = ATH10K_BUS_PCI;

	/* Alloc pipes */
	ret = ath10k_pci_alloc_pipes(ar);
	if (ret) {
		device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n",
		    __func__,
		    ret);
		/* XXX cleanup */
		err = ENXIO;
		goto bad4;
	}

	/* deinit ce */
	ath10k_pci_ce_deinit(ar);

	/* disable irq */
	ret = ath10k_pci_irq_disable(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* init IRQ */
	ret = ath10k_pci_init_irq(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: init_irq failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* Ok, gate open the interrupt handler */
	ar->sc_invalid = 0;

	/* pci_chip_reset */
	ret = ath10k_pci_chip_reset(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* read SoC/chip version */
	ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs));

	/* Verify chip version is something we can use */
	device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid);
	if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) {
		device_printf(ar->sc_dev,
		    "%s: unsupported chip; chipid: 0x%08x\n", __func__,
		    ar->sc_chipid);
		err = ENXIO;
		goto bad4;
	}

	/* Call main attach method with given info */
	ar->sc_preinit_hook.ich_func = athp_attach_preinit;
	ar->sc_preinit_hook.ich_arg = ar;
	if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) {
		device_printf(ar->sc_dev,
		    "%s: couldn't establish preinit hook\n", __func__);
		goto bad4;
	}

	return (0);

	/* Fallthrough for setup failure */
bad4:
	athp_pci_free_bufs(ar_pci);
	/* Ensure we disable interrupts from the device */
	ath10k_pci_deinit_irq(ar_pci);
	ath10k_pci_free_irq(ar_pci);
bad1:
	bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr);
bad:

	ath10k_htt_rx_free_desc(ar, &ar->htt);

	athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf);
	athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf);

	/* XXX disable busmaster? */
	mtx_destroy(&ar_pci->ps_mtx);
	mtx_destroy(&ar_pci->ce_mtx);
	mtx_destroy(&ar->sc_conf_mtx);
	mtx_destroy(&ar->sc_data_mtx);
	mtx_destroy(&ar->sc_buf_mtx);
	mtx_destroy(&ar->sc_dma_mtx);
	mtx_destroy(&ar->sc_mtx);
	if (ar_pci->pipe_taskq) {
		taskqueue_drain_all(ar_pci->pipe_taskq);
		taskqueue_free(ar_pci->pipe_taskq);
	}

	/* Shutdown ioctl handler */
	athp_ioctl_teardown(ar);

	ath10k_core_destroy(ar);
bad0:
	return (err);
}
Esempio n. 16
0
/*
 * Install interface into kernel networking data structures
 */
int
ed_attach(device_t dev)
{
	struct ed_softc *sc = device_get_softc(dev);
	struct ifnet *ifp;

	sc->dev = dev;
	ED_LOCK_INIT(sc);
	ifp = sc->ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "can not if_alloc()\n");
		ED_LOCK_DESTROY(sc);
		return (ENOSPC);
	}

	if (sc->readmem == NULL) {
		if (sc->mem_shared) {
			if (sc->isa16bit)
				sc->readmem = ed_shmem_readmem16;
			else
				sc->readmem = ed_shmem_readmem8;
		} else {
			sc->readmem = ed_pio_readmem;
		}
	}
	if (sc->sc_write_mbufs == NULL) {
		device_printf(dev, "No write mbufs routine set\n");
		return (ENXIO);
	}

	callout_init_mtx(&sc->tick_ch, ED_MUTEX(sc), 0);
	/*
	 * Set interface to stopped condition (reset)
	 */
	ed_stop_hw(sc);

	/*
	 * Initialize ifnet structure
	 */
	ifp->if_softc = sc;
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_start = ed_start;
	ifp->if_ioctl = ed_ioctl;
	ifp->if_init = ed_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
	IFQ_SET_READY(&ifp->if_snd);
	ifp->if_linkmib = &sc->mibdata;
	ifp->if_linkmiblen = sizeof sc->mibdata;
	/*
	 * XXX - should do a better job.
	 */
	if (sc->chip_type == ED_CHIP_TYPE_WD790)
		sc->mibdata.dot3StatsEtherChipSet =
			DOT3CHIPSET(dot3VendorWesternDigital,
				    dot3ChipSetWesternDigital83C790);
	else
		sc->mibdata.dot3StatsEtherChipSet =
			DOT3CHIPSET(dot3VendorNational, 
				    dot3ChipSetNational8390);
	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;

	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	/*
	 * Set default state for LINK2 flag (used to disable the 
	 * tranceiver for AUI operation), based on config option.
	 * We only set this flag before we attach the device, so there's
	 * no race.  It is convenient to allow users to turn this off
	 * by default in the kernel config, but given our more advanced
	 * boot time configuration options, this might no longer be needed.
	 */
	if (device_get_flags(dev) & ED_FLAGS_DISABLE_TRANCEIVER)
		ifp->if_flags |= IFF_LINK2;

	/*
	 * Attach the interface
	 */
	ether_ifattach(ifp, sc->enaddr);
	/* device attach does transition from UNCONFIGURED to IDLE state */

	sc->tx_mem = sc->txb_cnt * ED_PAGE_SIZE * ED_TXBUF_SIZE;
	sc->rx_mem = (sc->rec_page_stop - sc->rec_page_start) * ED_PAGE_SIZE;
	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    0, "type", CTLFLAG_RD, sc->type_str, 0,
	    "Type of chip in card");
	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    1, "TxMem", CTLFLAG_RD, &sc->tx_mem, 0,
	    "Memory set aside for transmitting packets");
	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    2, "RxMem", CTLFLAG_RD, &sc->rx_mem, 0,
	    "Memory  set aside for receiving packets");
	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
	    3, "Mem", CTLFLAG_RD, &sc->mem_size, 0,
	    "Total Card Memory");
	if (bootverbose) {
		if (sc->type_str && (*sc->type_str != 0))
			device_printf(dev, "type %s ", sc->type_str);
		else
			device_printf(dev, "type unknown (0x%x) ", sc->type);

#ifdef ED_HPP
		if (sc->vendor == ED_VENDOR_HP)
			printf("(%s %s IO)",
			    (sc->hpp_id & ED_HPP_ID_16_BIT_ACCESS) ?
			    "16-bit" : "32-bit",
			    sc->hpp_mem_start ? "memory mapped" : "regular");
		else
#endif
			printf("%s", sc->isa16bit ? "(16 bit)" : "(8 bit)");

#if defined(ED_HPP) || defined(ED_3C503)
		printf("%s", (((sc->vendor == ED_VENDOR_3COM) ||
				    (sc->vendor == ED_VENDOR_HP)) &&
			   (ifp->if_flags & IFF_LINK2)) ?
		    " tranceiver disabled" : "");
#endif
		printf("\n");
	}
	return (0);
}
Esempio n. 17
0
static int
ida_eisa_attach(device_t dev)
{
    struct ida_softc	*ida;
    struct ida_board	*board;
    int			error;
    int			rid;

    ida = device_get_softc(dev);
    ida->dev = dev;

    board = ida_eisa_match(eisa_get_id(dev));
    ida->cmd = *board->accessor;
    ida->flags = board->flags;
    mtx_init(&ida->lock, "ida", NULL, MTX_DEF);
    callout_init_mtx(&ida->ch, &ida->lock, 0);

    ida->regs_res_type = SYS_RES_IOPORT;
    ida->regs_res_id = 0;
    ida->regs = bus_alloc_resource_any(dev, ida->regs_res_type,
                                       &ida->regs_res_id, RF_ACTIVE);
    if (ida->regs == NULL) {
        device_printf(dev, "can't allocate register resources\n");
        return (ENOMEM);
    }

    error = bus_dma_tag_create(
                /* parent	*/	bus_get_dma_tag(dev),
                /* alignment	*/	0,
                /* boundary	*/	0,
                /* lowaddr	*/	BUS_SPACE_MAXADDR_32BIT,
                /* highaddr	*/	BUS_SPACE_MAXADDR,
                /* filter	*/	NULL,
                /* filterarg	*/	NULL,
                /* maxsize	*/	MAXBSIZE,
                /* nsegments	*/	IDA_NSEG,
                /* maxsegsize	*/	BUS_SPACE_MAXSIZE_32BIT,
                /* flags	*/	BUS_DMA_ALLOCNOW,
                /* lockfunc	*/	NULL,
                /* lockarg	*/	NULL,
                &ida->parent_dmat);

    if (error != 0) {
        device_printf(dev, "can't allocate DMA tag\n");
        ida_free(ida);
        return (ENOMEM);
    }

    rid = 0;
    ida->irq_res_type = SYS_RES_IRQ;
    ida->irq = bus_alloc_resource_any(dev, ida->irq_res_type, &rid,
                                      RF_ACTIVE | RF_SHAREABLE);
    if (ida->irq == NULL) {
        ida_free(ida);
        return (ENOMEM);
    }

    error = bus_setup_intr(dev, ida->irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE,
                           NULL, ida_intr, ida, &ida->ih);
    if (error) {
        device_printf(dev, "can't setup interrupt\n");
        ida_free(ida);
        return (ENOMEM);
    }

    error = ida_init(ida);
    if (error) {
        ida_free(ida);
        return (error);
    }

    return (0);
}
Esempio n. 18
0
static void
do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
    struct vmspace *vm2, struct file *fp_procdesc)
{
	struct proc *p1, *pptr;
	int trypid;
	struct filedesc *fd;
	struct filedesc_to_leader *fdtol;
	struct sigacts *newsigacts;

	sx_assert(&proctree_lock, SX_SLOCKED);
	sx_assert(&allproc_lock, SX_XLOCKED);

	p1 = td->td_proc;

	trypid = fork_findpid(fr->fr_flags);

	sx_sunlock(&proctree_lock);

	p2->p_state = PRS_NEW;		/* protect against others */
	p2->p_pid = trypid;
	AUDIT_ARG_PID(p2->p_pid);
	LIST_INSERT_HEAD(&allproc, p2, p_list);
	allproc_gen++;
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	tidhash_add(td2);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	sx_xunlock(&allproc_lock);

	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    __rangeof(struct proc, p_startcopy, p_endcopy));
	pargs_hold(p2->p_args);

	PROC_UNLOCK(p1);

	bzero(&p2->p_startzero,
	    __rangeof(struct proc, p_startzero, p_endzero));

	/* Tell the prison that we exist. */
	prison_proc_hold(p2->p_ucred->cr_prison);

	PROC_UNLOCK(p2);

	/*
	 * Malloc things while we don't hold any locks.
	 */
	if (fr->fr_flags & RFSIGSHARE)
		newsigacts = NULL;
	else
		newsigacts = sigacts_alloc();

	/*
	 * Copy filedesc.
	 */
	if (fr->fr_flags & RFCFDG) {
		fd = fdinit(p1->p_fd, false);
		fdtol = NULL;
	} else if (fr->fr_flags & RFFDG) {
		fd = fdcopy(p1->p_fd);
		fdtol = NULL;
	} else {
		fd = fdshare(p1->p_fd);
		if (p1->p_fdtol == NULL)
			p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
			    p1->p_leader);
		if ((fr->fr_flags & RFTHREAD) != 0) {
			/*
			 * Shared file descriptor table, and shared
			 * process leaders.
			 */
			fdtol = p1->p_fdtol;
			FILEDESC_XLOCK(p1->p_fd);
			fdtol->fdl_refcount++;
			FILEDESC_XUNLOCK(p1->p_fd);
		} else {
			/* 
			 * Shared file descriptor table, and different
			 * process leaders.
			 */
			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
			    p1->p_fd, p2);
		}
	}
	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */

	PROC_LOCK(p2);
	PROC_LOCK(p1);

	bzero(&td2->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));

	bcopy(&td->td_startcopy, &td2->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
	td2->td_sigstk = td->td_sigstk;
	td2->td_flags = TDF_INMEM;
	td2->td_lend_user_pri = PRI_MAX;

#ifdef VIMAGE
	td2->td_vnet = NULL;
	td2->td_vnet_lpush = NULL;
#endif

	/*
	 * Allow the scheduler to initialize the child.
	 */
	thread_lock(td);
	sched_fork(td, td2);
	thread_unlock(td);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 */
	p2->p_flag = P_INMEM;
	p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP);
	p2->p_swtick = ticks;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);

	/*
	 * Whilst the proc lock is held, copy the VM domain data out
	 * using the VM domain method.
	 */
	vm_domain_policy_init(&p2->p_vm_dom_policy);
	vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
	    &p1->p_vm_dom_policy);

	if (fr->fr_flags & RFSIGSHARE) {
		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
	} else {
		sigacts_copy(newsigacts, p1->p_sigacts);
		p2->p_sigacts = newsigacts;
	}

	if (fr->fr_flags & RFTSIGZMB)
	        p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
	else if (fr->fr_flags & RFLINUXTHPN)
	        p2->p_sigparent = SIGUSR1;
	else
	        p2->p_sigparent = SIGCHLD;

	p2->p_textvp = p1->p_textvp;
	p2->p_fd = fd;
	p2->p_fdtol = fdtol;

	if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
		p2->p_flag |= P_PROTECTED;
		p2->p_flag2 |= P2_INHERIT_PROTECTED;
	}

	/*
	 * p_limit is copy-on-write.  Bump its refcount.
	 */
	lim_fork(p1, p2);

	thread_cow_get_proc(td2, p2);

	pstats_fork(p1->p_stats, p2->p_stats);

	PROC_UNLOCK(p1);
	PROC_UNLOCK(p2);

	/* Bump references to the text vnode (for procfs). */
	if (p2->p_textvp)
		vrefact(p2->p_textvp);

	/*
	 * Set up linkage for kernel based threading.
	 */
	if ((fr->fr_flags & RFTHREAD) != 0) {
		mtx_lock(&ppeers_lock);
		p2->p_peers = p1->p_peers;
		p1->p_peers = p2;
		p2->p_leader = p1->p_leader;
		mtx_unlock(&ppeers_lock);
		PROC_LOCK(p1->p_leader);
		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
			PROC_UNLOCK(p1->p_leader);
			/*
			 * The task leader is exiting, so process p1 is
			 * going to be killed shortly.  Since p1 obviously
			 * isn't dead yet, we know that the leader is either
			 * sending SIGKILL's to all the processes in this
			 * task or is sleeping waiting for all the peers to
			 * exit.  We let p1 complete the fork, but we need
			 * to go ahead and kill the new process p2 since
			 * the task leader may not get a chance to send
			 * SIGKILL to it.  We leave it on the list so that
			 * the task leader will wait for this new process
			 * to commit suicide.
			 */
			PROC_LOCK(p2);
			kern_psignal(p2, SIGKILL);
			PROC_UNLOCK(p2);
		} else
			PROC_UNLOCK(p1->p_leader);
	} else {
		p2->p_peers = NULL;
		p2->p_leader = p2;
	}

	sx_xlock(&proctree_lock);
	PGRP_LOCK(p1->p_pgrp);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	/*
	 * Preserve some more flags in subprocess.  P_PROFIL has already
	 * been preserved.
	 */
	p2->p_flag |= p1->p_flag & P_SUGID;
	td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
	SESS_LOCK(p1->p_session);
	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		p2->p_flag |= P_CONTROLT;
	SESS_UNLOCK(p1->p_session);
	if (fr->fr_flags & RFPPWAIT)
		p2->p_flag |= P_PPWAIT;

	p2->p_pgrp = p1->p_pgrp;
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	PGRP_UNLOCK(p1->p_pgrp);
	LIST_INIT(&p2->p_children);
	LIST_INIT(&p2->p_orphans);

	callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);

	/*
	 * If PF_FORK is set, the child process inherits the
	 * procfs ioctl flags from its parent.
	 */
	if (p1->p_pfsflags & PF_FORK) {
		p2->p_stops = p1->p_stops;
		p2->p_pfsflags = p1->p_pfsflags;
	}

	/*
	 * This begins the section where we must prevent the parent
	 * from being swapped.
	 */
	_PHOLD(p1);
	PROC_UNLOCK(p1);

	/*
	 * Attach the new process to its parent.
	 *
	 * If RFNOWAIT is set, the newly created process becomes a child
	 * of init.  This effectively disassociates the child from the
	 * parent.
	 */
	if ((fr->fr_flags & RFNOWAIT) != 0) {
		pptr = p1->p_reaper;
		p2->p_reaper = pptr;
	} else {
		p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
		    p1 : p1->p_reaper;
		pptr = p1;
	}
	p2->p_pptr = pptr;
	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
	LIST_INIT(&p2->p_reaplist);
	LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
	if (p2->p_reaper == p1)
		p2->p_reapsubtree = p2->p_pid;
	sx_xunlock(&proctree_lock);

	/* Inform accounting that we have forked. */
	p2->p_acflag = AFORK;
	PROC_UNLOCK(p2);

#ifdef KTRACE
	ktrprocfork(p1, p2);
#endif

	/*
	 * Finish creating the child process.  It will return via a different
	 * execution path later.  (ie: directly into user mode)
	 */
	vm_forkproc(td, p2, td2, vm2, fr->fr_flags);

	if (fr->fr_flags == (RFFDG | RFPROC)) {
		VM_CNT_INC(v_forks);
		VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
		VM_CNT_INC(v_vforks);
		VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (p1 == &proc0) {
		VM_CNT_INC(v_kthreads);
		VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else {
		VM_CNT_INC(v_rforks);
		VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	}

	/*
	 * Associate the process descriptor with the process before anything
	 * can happen that might cause that process to need the descriptor.
	 * However, don't do this until after fork(2) can no longer fail.
	 */
	if (fr->fr_flags & RFPROCDESC)
		procdesc_new(p2, fr->fr_pd_flags);

	/*
	 * Both processes are set up, now check if any loadable modules want
	 * to adjust anything.
	 */
	EVENTHANDLER_INVOKE(process_fork, p1, p2, fr->fr_flags);

	/*
	 * Set the child start time and mark the process as being complete.
	 */
	PROC_LOCK(p2);
	PROC_LOCK(p1);
	microuptime(&p2->p_stats->p_start);
	PROC_SLOCK(p2);
	p2->p_state = PRS_NORMAL;
	PROC_SUNLOCK(p2);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the new process so that any
	 * tracepoints inherited from the parent can be removed. We have to do
	 * this only after p_state is PRS_NORMAL since the fasttrap module will
	 * use pfind() later on.
	 */
	if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
		dtrace_fasttrap_fork(p1, p2);
#endif
	/*
	 * Hold the process so that it cannot exit after we make it runnable,
	 * but before we wait for the debugger.
	 */
	_PHOLD(p2);
	if (p1->p_ptevents & PTRACE_FORK) {
		/*
		 * Arrange for debugger to receive the fork event.
		 *
		 * We can report PL_FLAG_FORKED regardless of
		 * P_FOLLOWFORK settings, but it does not make a sense
		 * for runaway child.
		 */
		td->td_dbgflags |= TDB_FORK;
		td->td_dbg_forked = p2->p_pid;
		td2->td_dbgflags |= TDB_STOPATFORK;
	}
	if (fr->fr_flags & RFPPWAIT) {
		td->td_pflags |= TDP_RFPPWAIT;
		td->td_rfppwait_p = p2;
		td->td_dbgflags |= TDB_VFORK;
	}
	PROC_UNLOCK(p2);

	/*
	 * Now can be swapped.
	 */
	_PRELE(p1);
	PROC_UNLOCK(p1);

	/*
	 * Tell any interested parties about the new process.
	 */
	knote_fork(p1->p_klist, p2->p_pid);
	SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);

	if (fr->fr_flags & RFPROCDESC) {
		procdesc_finit(p2->p_procdesc, fp_procdesc);
		fdrop(fp_procdesc, td);
	}

	if ((fr->fr_flags & RFSTOPPED) == 0) {
		/*
		 * If RFSTOPPED not requested, make child runnable and
		 * add to run queue.
		 */
		thread_lock(td2);
		TD_SET_CAN_RUN(td2);
		sched_add(td2, SRQ_BORING);
		thread_unlock(td2);
		if (fr->fr_pidp != NULL)
			*fr->fr_pidp = p2->p_pid;
	} else {
		*fr->fr_procp = p2;
	}

	PROC_LOCK(p2);
	/*
	 * Wait until debugger is attached to child.
	 */
	while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
		cv_wait(&p2->p_dbgwait, &p2->p_mtx);
	_PRELE(p2);
	racct_proc_fork_done(p2);
	PROC_UNLOCK(p2);
}
Esempio n. 19
0
static int
mlx_pci_attach(device_t dev)
{
    struct mlx_softc	*sc;
    struct mlx_ident	*m;
    int			error;

    debug_called(1);

    pci_enable_busmaster(dev);

    sc = device_get_softc(dev);
    sc->mlx_dev = dev;

    /*
     * Work out what sort of adapter this is (we need to know this in order
     * to map the appropriate interface resources).
     */
    m = mlx_pci_match(dev);
    if (m == NULL)		/* shouldn't happen */
	return(ENXIO);
    sc->mlx_iftype = m->iftype;

    mtx_init(&sc->mlx_io_lock, "mlx I/O", NULL, MTX_DEF);
    sx_init(&sc->mlx_config_lock, "mlx config");
    callout_init_mtx(&sc->mlx_timeout, &sc->mlx_io_lock, 0);

    /*
     * Allocate the PCI register window.
     */
    
    /* type 2/3 adapters have an I/O region we don't prefer at base 0 */
    switch(sc->mlx_iftype) {
    case MLX_IFTYPE_2:
    case MLX_IFTYPE_3:
	sc->mlx_mem_type = SYS_RES_MEMORY;
	sc->mlx_mem_rid = MLX_CFG_BASE1;
	sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type,
		&sc->mlx_mem_rid, RF_ACTIVE);
	if (sc->mlx_mem == NULL) {
	    sc->mlx_mem_type = SYS_RES_IOPORT;
	    sc->mlx_mem_rid = MLX_CFG_BASE0;
	    sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type,
		&sc->mlx_mem_rid, RF_ACTIVE);
	}
	break;
    case MLX_IFTYPE_4:
    case MLX_IFTYPE_5:
	sc->mlx_mem_type = SYS_RES_MEMORY;
	sc->mlx_mem_rid = MLX_CFG_BASE0;
	sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type,
		&sc->mlx_mem_rid, RF_ACTIVE);
	break;
    }
    if (sc->mlx_mem == NULL) {
	device_printf(sc->mlx_dev, "couldn't allocate mailbox window\n");
	mlx_free(sc);
	return(ENXIO);
    }

    /*
     * Allocate the parent bus DMA tag appropriate for PCI.
     */
    error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* PCI parent */
			       1, 0, 			/* alignment, boundary */
			       BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
			       BUS_SPACE_MAXADDR, 	/* highaddr */
			       NULL, NULL, 		/* filter, filterarg */
			       MAXBSIZE, MLX_NSEG,	/* maxsize, nsegments */
			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			       BUS_DMA_ALLOCNOW,	/* flags */
			       NULL,			/* lockfunc */
			       NULL,			/* lockarg */
			       &sc->mlx_parent_dmat);
    if (error != 0) {
	device_printf(dev, "can't allocate parent DMA tag\n");
	mlx_free(sc);
	return(ENOMEM);
    }

    /*
     * Do bus-independant initialisation.
     */
    error = mlx_attach(sc);
    if (error != 0) {
	mlx_free(sc);
	return(error);
    }
    
    /*
     * Start the controller.
     */
    mlx_startup(sc);
    return(0);
}
Esempio n. 20
0
static int
ixl_attach(device_t dev)
{
	struct ixl_pf	*pf;
	struct i40e_hw	*hw;
	struct ixl_vsi  *vsi;
	enum i40e_status_code status;
	int             error = 0;

	INIT_DEBUGOUT("ixl_attach: begin");

	/* Allocate, clear, and link in our primary soft structure */
	pf = device_get_softc(dev);
	pf->dev = pf->osdep.dev = dev;
	hw = &pf->hw;

	/*
	** Note this assumes we have a single embedded VSI,
	** this could be enhanced later to allocate multiple
	*/
	vsi = &pf->vsi;
	vsi->dev = pf->dev;
	vsi->back = pf;

	/* Save tunable values */
	error = ixl_save_pf_tunables(pf);
	if (error)
		return (error);

	/* Core Lock Init*/
	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));

	/* Set up the timer callout */
	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);

	/* Do PCI setup - map BAR0, etc */
	if (ixl_allocate_pci_resources(pf)) {
		device_printf(dev, "Allocation of PCI resources failed\n");
		error = ENXIO;
		goto err_out;
	}

	/* Establish a clean starting point */
	i40e_clear_hw(hw);
	status = i40e_pf_reset(hw);
	if (status) {
		device_printf(dev, "PF reset failure %s\n",
		    i40e_stat_str(hw, status));
		error = EIO;
		goto err_out;
	}

	/* Initialize the shared code */
	status = i40e_init_shared_code(hw);
	if (status) {
		device_printf(dev, "Unable to initialize shared code, error %s\n",
		    i40e_stat_str(hw, status));
		error = EIO;
		goto err_out;
	}

	/* Set up the admin queue */
	hw->aq.num_arq_entries = IXL_AQ_LEN;
	hw->aq.num_asq_entries = IXL_AQ_LEN;
	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;

	status = i40e_init_adminq(hw);
	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
		    i40e_stat_str(hw, status));
		error = EIO;
		goto err_out;
	}
	ixl_print_nvm_version(pf);

	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
		device_printf(dev, "The driver for the device stopped "
		    "because the NVM image is newer than expected.\n");
		device_printf(dev, "You must install the most recent version of "
		    "the network driver.\n");
		error = EIO;
		goto err_out;
	}

        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
		device_printf(dev, "The driver for the device detected "
		    "a newer version of the NVM image than expected.\n");
		device_printf(dev, "Please install the most recent version "
		    "of the network driver.\n");
	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
		device_printf(dev, "The driver for the device detected "
		    "an older version of the NVM image than expected.\n");
		device_printf(dev, "Please update the NVM image.\n");
	}

	/* Clear PXE mode */
	i40e_clear_pxe_mode(hw);

	/* Get capabilities from the device */
	error = ixl_get_hw_capabilities(pf);
	if (error) {
		device_printf(dev, "HW capabilities failure!\n");
		goto err_get_cap;
	}

	/*
	 * Allocate interrupts and figure out number of queues to use
	 * for PF interface
	 */
	pf->msix = ixl_init_msix(pf);

	/* Set up host memory cache */
	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
	    hw->func_caps.num_rx_qp, 0, 0);
	if (status) {
		device_printf(dev, "init_lan_hmc failed: %s\n",
		    i40e_stat_str(hw, status));
		goto err_get_cap;
	}

	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
	if (status) {
		device_printf(dev, "configure_lan_hmc failed: %s\n",
		    i40e_stat_str(hw, status));
		goto err_mac_hmc;
	}

	/* Init queue allocation manager */
	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
	if (error) {
		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
		    error);
		goto err_mac_hmc;
	}
	/* reserve a contiguous allocation for the PF's VSI */
	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
	if (error) {
		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
		    error);
		goto err_mac_hmc;
	}
	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
	    pf->qtag.num_allocated, pf->qtag.num_active);

	/* Disable LLDP from the firmware for certain NVM versions */
	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
	    (pf->hw.aq.fw_maj_ver < 4)) {
		i40e_aq_stop_lldp(hw, TRUE, NULL);
		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
	}

	/* Get MAC addresses from hardware */
	i40e_get_mac_addr(hw, hw->mac.addr);
	error = i40e_validate_mac_addr(hw->mac.addr);
	if (error) {
		device_printf(dev, "validate_mac_addr failed: %d\n", error);
		goto err_mac_hmc;
	}
	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
	i40e_get_port_mac_addr(hw, hw->mac.port_addr);

	/* Query device FW LLDP status */
	ixl_get_fw_lldp_status(pf);
	/* Tell FW to apply DCB config on link up */
	if ((hw->mac.type != I40E_MAC_X722)
	    && ((pf->hw.aq.api_maj_ver > 1)
	    || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7)))
		i40e_aq_set_dcb_parameters(hw, true, NULL);

	/* Initialize mac filter list for VSI */
	SLIST_INIT(&vsi->ftl);

	/* Set up SW VSI and allocate queue memory and rings */
	if (ixl_setup_stations(pf)) { 
		device_printf(dev, "setup stations failed!\n");
		error = ENOMEM;
		goto err_mac_hmc;
	}

	/* Setup OS network interface / ifnet */
	if (ixl_setup_interface(dev, vsi)) {
		device_printf(dev, "interface setup failed!\n");
		error = EIO;
		goto err_late;
	}

	/* Determine link state */
	if (ixl_attach_get_link_status(pf)) {
		error = EINVAL;
		goto err_late;
	}

	error = ixl_switch_config(pf);
	if (error) {
		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
		     error);
		goto err_late;
	}

	/* Limit PHY interrupts to link, autoneg, and modules failure */
	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
	    NULL);
        if (status) {
		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
		    " aq_err %s\n", i40e_stat_str(hw, status),
		    i40e_aq_str(hw, hw->aq.asq_last_status));
		goto err_late;
	}

	/* Get the bus configuration and set the shared code's config */
	ixl_get_bus_info(pf);

	/*
	 * In MSI-X mode, initialize the Admin Queue interrupt,
	 * so userland tools can communicate with the adapter regardless of
	 * the ifnet interface's status.
	 */
	if (pf->msix > 1) {
		error = ixl_setup_adminq_msix(pf);
		if (error) {
			device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
			    error);
			goto err_late;
		}
		error = ixl_setup_adminq_tq(pf);
		if (error) {
			device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
			    error);
			goto err_late;
		}
		ixl_configure_intr0_msix(pf);
		ixl_enable_intr0(hw);

		error = ixl_setup_queue_msix(vsi);
		if (error)
			device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
			    error);
		error = ixl_setup_queue_tqs(vsi);
		if (error)
			device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
			    error);
	} else {
		error = ixl_setup_legacy(pf);

		error = ixl_setup_adminq_tq(pf);
		if (error) {
			device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
			    error);
			goto err_late;
		}

		error = ixl_setup_queue_tqs(vsi);
		if (error)
			device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
			    error);
	}

	if (error) {
		device_printf(dev, "interrupt setup error: %d\n", error);
	}

	/* Set initial advertised speed sysctl value */
	ixl_set_initial_advertised_speeds(pf);

	/* Initialize statistics & add sysctls */
	ixl_add_device_sysctls(pf);

	ixl_pf_reset_stats(pf);
	ixl_update_stats_counters(pf);
	ixl_add_hw_stats(pf);

	/* Register for VLAN events */
	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);

#ifdef PCI_IOV
	ixl_initialize_sriov(pf);
#endif

#ifdef DEV_NETMAP
	if (vsi->num_rx_desc == vsi->num_tx_desc) {
		vsi->queues[0].num_desc = vsi->num_rx_desc;
		ixl_netmap_attach(vsi);
	} else
		device_printf(dev,
		    "Netmap is not supported when RX and TX descriptor ring sizes differ\n");

#endif /* DEV_NETMAP */

#ifdef IXL_IW
	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
		if (pf->iw_enabled) {
			error = ixl_iw_pf_attach(pf);
			if (error) {
				device_printf(dev,
				    "interfacing to iwarp driver failed: %d\n",
				    error);
				goto err_late;
			} else
				device_printf(dev, "iWARP ready\n");
		} else
			device_printf(dev,
			    "iwarp disabled on this device (no msix vectors)\n");
	} else {
		pf->iw_enabled = false;
		device_printf(dev, "The device is not iWARP enabled\n");
	}
#endif

	INIT_DEBUGOUT("ixl_attach: end");
	return (0);

err_late:
	if (vsi->ifp != NULL) {
		ether_ifdetach(vsi->ifp);
		if_free(vsi->ifp);
	}
err_mac_hmc:
	i40e_shutdown_lan_hmc(hw);
err_get_cap:
	i40e_shutdown_adminq(hw);
err_out:
	ixl_free_pci_resources(pf);
	ixl_free_vsi(vsi);
	IXL_PF_LOCK_DESTROY(pf);
	return (error);
}
Esempio n. 21
0
static int
nicvf_attach(device_t dev)
{
	struct nicvf *nic;
	int rid, qcount;
	int err = 0;
	uint8_t hwaddr[ETHER_ADDR_LEN];
	uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};

	nic = device_get_softc(dev);
	nic->dev = dev;
	nic->pnicvf = nic;

	NICVF_CORE_LOCK_INIT(nic);
	/* Enable HW TSO on Pass2 */
	if (!pass1_silicon(dev))
		nic->hw_tso = TRUE;

	rid = VNIC_VF_REG_RID;
	nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (nic->reg_base == NULL) {
		device_printf(dev, "Could not allocate registers memory\n");
		return (ENXIO);
	}

	qcount = MAX_CMP_QUEUES_PER_QS;
	nic->max_queues = qcount;

	err = nicvf_set_qset_resources(nic);
	if (err != 0)
		goto err_free_res;

	/* Check if PF is alive and get MAC address for this VF */
	err = nicvf_allocate_misc_interrupt(nic);
	if (err != 0)
		goto err_free_res;

	NICVF_CORE_LOCK(nic);
	err = nicvf_enable_misc_interrupt(nic);
	NICVF_CORE_UNLOCK(nic);
	if (err != 0)
		goto err_release_intr;

	err = nicvf_allocate_net_interrupts(nic);
	if (err != 0) {
		device_printf(dev,
		    "Could not allocate network interface interrupts\n");
		goto err_free_ifnet;
	}

	/* If no MAC address was obtained we generate random one */
	if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
		nicvf_hw_addr_random(hwaddr);
		memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
		NICVF_CORE_LOCK(nic);
		nicvf_hw_set_mac_addr(nic, hwaddr);
		NICVF_CORE_UNLOCK(nic);
	}

	/* Configure CPI alorithm */
	nic->cpi_alg = CPI_ALG_NONE;
	NICVF_CORE_LOCK(nic);
	nicvf_config_cpi(nic);
	/* Configure receive side scaling */
	if (nic->qs->rq_cnt > 1)
		nicvf_rss_init(nic);
	NICVF_CORE_UNLOCK(nic);

	err = nicvf_setup_ifnet(nic);
	if (err != 0) {
		device_printf(dev, "Could not set-up ifnet\n");
		goto err_release_intr;
	}

	err = nicvf_setup_ifmedia(nic);
	if (err != 0) {
		device_printf(dev, "Could not set-up ifmedia\n");
		goto err_free_ifnet;
	}

	mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
	callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);

	ether_ifattach(nic->ifp, nic->hwaddr);

	return (0);

err_free_ifnet:
	if_free(nic->ifp);
err_release_intr:
	nicvf_release_all_interrupts(nic);
err_free_res:
	bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
	    nic->reg_base);

	return (err);
}