Beispiel #1
0
static int au1k_irda_init(void)
{
	static unsigned version_printed = 0;
	struct au1k_private *aup;
	struct net_device *dev;
	int err;

	if (version_printed++ == 0) printk(version);

	dev = alloc_irdadev(sizeof(struct au1k_private));
	if (!dev)
		return -ENOMEM;

	dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
	err = au1k_irda_net_init(dev);
	if (err)
		goto out;
	err = register_netdev(dev);
	if (err)
		goto out1;
	ir_devs[0] = dev;
	printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
	return 0;

out1:
	aup = netdev_priv(dev);
	dma_free((void *)aup->db[0].vaddr,
		MAX_BUF_SIZE * 2*NUM_IR_DESC);
	dma_free((void *)aup->rx_ring[0],
		2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
	kfree(aup->rx_buff.head);
out:
	free_netdev(dev);
	return err;
}
Beispiel #2
0
int
sd_thin_pages(struct sd_softc *sc, int flags)
{
	struct scsi_vpd_hdr *pg;
	size_t len = 0;
	u_int8_t *pages;
	int i, score = 0;
	int rv;

	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
	if (pg == NULL)
		return (ENOMEM);

	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
	    SI_PG_SUPPORTED, flags);
	if (rv != 0)
		goto done;

	len = _2btol(pg->page_length);

	dma_free(pg, sizeof(*pg));
	pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
	if (pg == NULL)
		return (ENOMEM);

	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
	    SI_PG_SUPPORTED, flags);
	if (rv != 0)
		goto done;

	pages = (u_int8_t *)(pg + 1);
	if (pages[0] != SI_PG_SUPPORTED) {
		rv = EIO;
		goto done;
	}

	for (i = 1; i < len; i++) {
		switch (pages[i]) {
		case SI_PG_DISK_LIMITS:
		case SI_PG_DISK_THIN:
			score++;
			break;
		}
	}

	if (score < 2)
		rv = EOPNOTSUPP;

 done:
	dma_free(pg, sizeof(*pg) + len);
	return (rv);
}
Beispiel #3
0
static void __exit au1k_irda_exit(void)
{
	struct net_device *dev = ir_devs[0];
	struct au1k_private *aup = netdev_priv(dev);

	unregister_netdev(dev);

	dma_free((void *)aup->db[0].vaddr,
		MAX_BUF_SIZE * 2*NUM_IR_DESC);
	dma_free((void *)aup->rx_ring[0],
		2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
	kfree(aup->rx_buff.head);
	free_netdev(dev);
}
Beispiel #4
0
void IODMARegion::free()
{
	if(_wrapped)
		dma_free(_wrapped);

	super::free();
}
Beispiel #5
0
int
safte_detach(struct device *self, int flags)
{
	struct safte_softc		*sc = (struct safte_softc *)self;
	int				i;

	rw_enter_write(&sc->sc_lock);

#if NBIO > 0
	if (sc->sc_nslots > 0)
		bio_unregister(self);
#endif

	if (sc->sc_nsensors > 0) {
		sensordev_deinstall(&sc->sc_sensordev);
		sensor_task_unregister(sc->sc_sensortask);

		for (i = 0; i < sc->sc_nsensors; i++)
			sensor_detach(&sc->sc_sensordev,
			    &sc->sc_sensors[i].se_sensor);
		free(sc->sc_sensors, M_DEVBUF, 0);
	}

	if (sc->sc_encbuf != NULL)
		dma_free(sc->sc_encbuf, sc->sc_encbuflen);

	rw_exit_write(&sc->sc_lock);

	return (0);
}
Beispiel #6
0
int
sd_vpd_thin(struct sd_softc *sc, int flags)
{
	struct scsi_vpd_disk_thin *pg;
	int rv;

	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
	if (pg == NULL)
		return (ENOMEM);

	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
	    SI_PG_DISK_THIN, flags);
	if (rv != 0)
		goto done;

#ifdef notyet
	if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
		sc->sc_delete = sd_unmap;
	else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
		sc->sc_delete = sd_write_same_16;
		sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
	} else
		rv = EOPNOTSUPP;
#endif

 done:
	dma_free(pg, sizeof(*pg));
	return (rv);
}
Beispiel #7
0
static void __exit au1000_cleanup_module(void)
{
	int i, j;
	struct net_device *dev;
	struct au1000_private *aup;

	for (i = 0; i < num_ifs; i++) {
		dev = iflist[i].dev;
		if (dev) {
			aup = (struct au1000_private *) dev->priv;
			unregister_netdev(dev);
			if (aup->mii)
				kfree(aup->mii);
			for (j = 0; j < NUM_RX_DMA; j++) {
				if (aup->rx_db_inuse[j])
					ReleaseDB(aup, aup->rx_db_inuse[j]);
			}
			for (j = 0; j < NUM_TX_DMA; j++) {
				if (aup->tx_db_inuse[j])
					ReleaseDB(aup, aup->tx_db_inuse[j]);
			}
			dma_free((void *)aup->vaddr, MAX_BUF_SIZE * 
					(NUM_TX_BUFFS+NUM_RX_BUFFS));
			kfree(dev);
			release_region(iflist[i].base_addr, MAC_IOSIZE);
		}
	}
}
Beispiel #8
0
int
sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
{
	struct scsi_vpd_serial *vpd;

	vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);

	bzero(di, sizeof(struct dk_inquiry));
	scsi_strvis(di->vendor, sc->sc_link->inqdata.vendor,
	    sizeof(sc->sc_link->inqdata.vendor));
	scsi_strvis(di->product, sc->sc_link->inqdata.product,
	    sizeof(sc->sc_link->inqdata.product));
	scsi_strvis(di->revision, sc->sc_link->inqdata.revision,
	    sizeof(sc->sc_link->inqdata.revision));

	/* the serial vpd page is optional */
	if (scsi_inquire_vpd(sc->sc_link, vpd, sizeof(*vpd),
	    SI_PG_SERIAL, 0) == 0)
		scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
	else
		strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));

	dma_free(vpd, sizeof(*vpd));
	return (0);
}
Beispiel #9
0
int
sd_vpd_block_limits(struct sd_softc *sc, int flags)
{
	struct scsi_vpd_disk_limits *pg;
	int rv;

	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
	if (pg == NULL)
		return (ENOMEM);

	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
	    SI_PG_DISK_LIMITS, flags);
	if (rv != 0)
		goto done;

	if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
		sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
		sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
	} else
		rv = EOPNOTSUPP;

 done:
	dma_free(pg, sizeof(*pg));
	return (rv);
}
Beispiel #10
0
int
emc_inquiry(struct emc_softc *sc, char *model, char *serial)
{
	u_int8_t *buffer;
	struct scsi_inquiry *cdb;
	struct scsi_xfer *xs;
	size_t length;
	int error;
	u_int8_t slen, mlen;

	length = MIN(sc->sc_path.p_link->inqdata.additional_length + 5, 255);
	if (length < 160) {
		printf("%s: FC (Legacy)\n");
		return (0);
	}

	buffer = dma_alloc(length, PR_WAITOK);

	xs = scsi_xs_get(sc->sc_path.p_link, scsi_autoconf);
	if (xs == NULL) {
		error = EBUSY;
		goto done;
	}

	cdb = (struct scsi_inquiry *)xs->cmd;
	cdb->opcode = INQUIRY;
	_lto2b(length, cdb->length);

	xs->cmdlen = sizeof(*cdb);
	xs->flags |= SCSI_DATA_IN;
	xs->data = buffer;
	xs->datalen = length;

	error = scsi_xs_sync(xs);
	scsi_xs_put(xs);

	if (error != 0)
		goto done;

	slen = buffer[160];
	if (slen == 0 || slen + 161 > length) {
		error = EIO;
		goto done;
	}

	mlen = buffer[99];
	if (mlen == 0 || slen + mlen + 161 > length) {
		error = EIO;
		goto done;
	}

	scsi_strvis(serial, buffer + 161, slen);
	scsi_strvis(model, buffer + 161 + slen, mlen);

	error = 0;
done:
	dma_free(buffer, length);
	return (error);
}
Beispiel #11
0
/**
* \brief Stop the XPRO_LCD automated character display mode.
*
* \note This function also disables the DMA channel associated with the
* XPRO_LCD module.
*/
static inline void xpro_lcd_automated_char_stop(void)
{
    /* Disable automated character mode */
    slcd_disable_automated_character();
    is_scrolling = false;

    /* Disable DMA channel */
    dma_free(&example_resource);
}
Beispiel #12
0
int
emc_detach(struct device *self, int flags)
{
	struct emc_softc *sc = (struct emc_softc *)self;

	dma_free(sc->sc_pg, sizeof(*sc->sc_pg));

	return (0);
}
Beispiel #13
0
/**
* \brief Stop the XPRO_LCD automated bit mapping display mode.
*
* \note This function also disables the DMA channel associated with the
* XPRO_LCD module.
*/
static inline void xpro_lcd_automated_bit_stop(void)
{
    /* Disable automated bit mapping mode */
    slcd_disable_automated_bit();
    is_bitmapping = false;

    /* Disable DMA channel */
    dma_free(&example_resource);
}
Beispiel #14
0
void
safte_attach(struct device *parent, struct device *self, void *aux)
{
	struct safte_softc		*sc = (struct safte_softc *)self;
	struct scsi_attach_args		*sa = aux;
	int				i = 0;

	sc->sc_link = sa->sa_sc_link;
	sa->sa_sc_link->device_softc = sc;
	rw_init(&sc->sc_lock, DEVNAME(sc));

	printf("\n");

	sc->sc_encbuf = NULL;
	sc->sc_nsensors = 0;
#if NBIO > 0
	sc->sc_nslots = 0;
#endif

	if (safte_read_config(sc) != 0) {
		printf("%s: unable to read enclosure configuration\n",
		    DEVNAME(sc));
		return;
	}

	if (sc->sc_nsensors > 0) {
		sc->sc_sensortask = sensor_task_register(sc,
		    safte_read_encstat, 10);
		if (sc->sc_sensortask == NULL) {
			printf("%s: unable to register update task\n",
			    DEVNAME(sc));
			sc->sc_nsensors = sc->sc_ntemps = 0;
			free(sc->sc_sensors, M_DEVBUF, 0);
		} else {
			for (i = 0; i < sc->sc_nsensors; i++)
				sensor_attach(&sc->sc_sensordev,
				    &sc->sc_sensors[i].se_sensor);
			sensordev_install(&sc->sc_sensordev);
		}
	}

#if NBIO > 0
	if (sc->sc_nslots > 0 &&
	    bio_register(self, safte_ioctl) != 0) {
		printf("%s: unable to register ioctl with bio\n", DEVNAME(sc));
		sc->sc_nslots = 0;
	} else
		i++;
#endif

	if (i) /* if we're doing something, then preinit encbuf and sensors */
		safte_read_encstat(sc);
	else {
		dma_free(sc->sc_encbuf, sc->sc_encbuflen);
		sc->sc_encbuf = NULL;
	}
}
Beispiel #15
0
void fb_destroy(struct fb_t * fb, struct render_t * render)
{
	if(render)
	{
		sw_render_destroy_data(render);
		dma_free(render->pixels);
		free(render);
	}
}
Beispiel #16
0
static void dos_extended_partition(struct block_device *blk, struct partition_desc *pd,
		struct partition *partition, uint32_t signature)
{
	uint8_t *buf = dma_alloc(SECTOR_SIZE);
	uint32_t ebr_sector = partition->first_sec;
	struct partition_entry *table = (struct partition_entry *)&buf[0x1be];
	unsigned partno = 5;

	while (pd->used_entries < ARRAY_SIZE(pd->parts)) {
		int rc, i;
		int n = pd->used_entries;

		dev_dbg(blk->dev, "expect EBR in sector %x\n", ebr_sector);

		rc = block_read(blk, buf, ebr_sector, 1);
		if (rc != 0) {
			dev_err(blk->dev, "Cannot read EBR partition table\n");
			goto out;
		}

		/* sanity checks */
		if (buf[0x1fe] != 0x55 || buf[0x1ff] != 0xaa) {
			dev_err(blk->dev, "sector %x doesn't contain an EBR signature\n", ebr_sector);
			goto out;
		}

		for (i = 0x1de; i < 0x1fe; ++i)
			if (buf[i]) {
				dev_err(blk->dev, "EBR's third or fourth partition non-empty\n");
				goto out;
			}
		/* /sanity checks */

		/* the first entry defines the extended partition */
		pd->parts[n].first_sec = ebr_sector +
			get_unaligned_le32(&table[0].partition_start);
		pd->parts[n].size = get_unaligned_le32(&table[0].partition_size);
		pd->parts[n].dos_partition_type = table[0].type;
		if (signature)
			sprintf(pd->parts[n].partuuid, "%08x-%02u",
				signature, partno);
		pd->used_entries++;
		partno++;

		/* the second entry defines the start of the next ebr if != 0 */
		if (get_unaligned_le32(&table[1].partition_start))
			ebr_sector = partition->first_sec +
				get_unaligned_le32(&table[1].partition_start);
		else
			break;
	}

out:
	dma_free(buf);
	return;
}
Beispiel #17
0
static int dm_test_dma_rx(struct unit_test_state *uts)
{
	struct udevice *dev;
	struct dma dma_tx, dma_rx;
	u8 src_buf[512];
	u8 dst_buf[512];
	void *dst_ptr;
	size_t len = 512;
	u32 meta1, meta2;
	int i;

	ut_assertok(uclass_get_device_by_name(UCLASS_DMA, "dma", &dev));

	ut_assertok(dma_get_by_name(dev, "tx0", &dma_tx));
	ut_assertok(dma_get_by_name(dev, "rx0", &dma_rx));

	ut_assertok(dma_enable(&dma_tx));
	ut_assertok(dma_enable(&dma_rx));

	memset(dst_buf, 0, len);
	for (i = 0; i < len; i++)
		src_buf[i] = i;
	meta1 = 0xADADDEAD;
	meta2 = 0;
	dst_ptr = NULL;

	ut_assertok(dma_prepare_rcv_buf(&dma_tx, dst_buf, len));

	ut_assertok(dma_send(&dma_tx, src_buf, len, &meta1));

	ut_asserteq(len, dma_receive(&dma_rx, &dst_ptr, &meta2));
	ut_asserteq(0xADADDEAD, meta2);
	ut_asserteq_ptr(dst_buf, dst_ptr);

	ut_assertok(dma_disable(&dma_tx));
	ut_assertok(dma_disable(&dma_rx));

	ut_assertok(dma_free(&dma_tx));
	ut_assertok(dma_free(&dma_rx));
	ut_assertok(memcmp(src_buf, dst_buf, len));

	return 0;
}
Beispiel #18
0
static void __exit au1k_irda_exit(void)
{
	struct net_device *dev = ir_devs[0];
	struct au1k_private *aup = (struct au1k_private *) dev->priv;

	if (!dev) {
		printk(KERN_ERR "au1k_ircc no dev found\n");
		return;
	}
	if (aup->db[0].vaddr)  {
		dma_free((void *)aup->db[0].vaddr, 
				MAX_BUF_SIZE * 2*NUM_IR_DESC);
		aup->db[0].vaddr = 0;
	}
	if (aup->rx_ring[0]) {
		dma_free((void *)aup->rx_ring[0], 
				2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
		aup->rx_ring[0] = 0;
	}
	rtnl_lock();
	unregister_netdevice(dev);
	rtnl_unlock();
	ir_devs[0] = 0;
}
Beispiel #19
0
int
sd_read_cap_16(struct sd_softc *sc, int flags)
{
	struct scsi_read_capacity_16 cdb;
	struct scsi_read_cap_data_16 *rdcap;
	struct scsi_xfer *xs;
	int rv = ENOMEM;

	CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);

	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
	if (rdcap == NULL)
		return (ENOMEM);

	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
	if (xs == NULL)
		goto done;

	bzero(&cdb, sizeof(cdb));
	cdb.opcode = READ_CAPACITY_16;
	cdb.byte2 = SRC16_SERVICE_ACTION;
	_lto4b(sizeof(*rdcap), cdb.length);

	memcpy(xs->cmd, &cdb, sizeof(cdb));
	xs->cmdlen = sizeof(cdb);
	xs->data = (void *)rdcap;
	xs->datalen = sizeof(*rdcap);
	xs->timeout = 20000;

	rv = scsi_xs_sync(xs);
	scsi_xs_put(xs);

	if (rv == 0) {
		sc->params.disksize = _8btol(rdcap->addr) + 1;
		sc->params.secsize = _4btol(rdcap->length);
		if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
			SET(sc->flags, SDF_THIN);
		else
			CLR(sc->flags, SDF_THIN);
	}

 done:
	dma_free(rdcap, sizeof(*rdcap));
	return (rv);
}
Beispiel #20
0
void optimsoc_dma_transfer(void *local, uint32_t remote_tile, void *remote,
                           size_t size, dma_direction_t dir)
{
    dma_transfer_handle_t dma_handle;

    /* allocate transfer handle */
    while(dma_alloc(&dma_handle) != DMA_SUCCESS) {
        optimsoc_thread_yield(optimsoc_thread_current());
    }

    assert(size % 4 == 0);
    dma_transfer(local, remote_tile, remote, size/4, dir, dma_handle);

    dma_wait(dma_handle);

    dma_free(dma_handle);
}
Beispiel #21
0
//#undef printf
int dma_wait(dma_chain_t * chain)
{
  if (chain == NULL)
    return -1;

  if (irq_inside_int()) {
    panic("dma_wait called inside an irq !\n");
    dma_free(chain);
    return -1;
  }

  printf("\n !!!!!!!!!! waiting ... \n");
  sem_wait(chain->sema);
/*   if (sem_wait_timed(chain->sema, 1000)) { */
/*     printf("dma timeout...\n"); */
/*     dma_free(chain); */
/*     return -1; */
/*   } */
  printf("\n !!!!!!!!!! done !\n");
  free_chain(chain);
  return 0;
}
Beispiel #22
0
int
emc_sp_info(struct emc_softc *sc)
{
	struct emc_vpd_sp_info *pg;
	int error;

	pg = dma_alloc(sizeof(*pg), PR_WAITOK);

	error = scsi_inquire_vpd(sc->sc_path.p_link, &pg, sizeof(pg),
	    EMC_VPD_SP_INFO, scsi_autoconf);
	if (error != 0)
		goto done;

	sc->sc_sp = pg->current_sp;
	sc->sc_port = pg->port;
	sc->sc_lun_state = pg->lun_state;

	error = 0;
done:
	dma_free(pg, sizeof(*pg));
	return (error);
}
static int __init
au1000_probe1(struct net_device *dev, long ioaddr, int irq, int port_num)
{
	static unsigned version_printed = 0;
	struct au1000_private *aup = NULL;
	int i, retval = 0;
	db_dest_t *pDB, *pDBfree;
	char *pmac, *argptr;
	char ethaddr[6];

	if (!request_region(ioaddr, MAC_IOSIZE, "Au1000 ENET")) {
		 return -ENODEV;
	}

	if (version_printed++ == 0) printk(version);

	if (!dev) {
		dev = init_etherdev(0, sizeof(struct au1000_private));
	}
	if (!dev) {
		 printk (KERN_ERR "au1000 eth: init_etherdev failed\n");  
		 return -ENODEV;
	}

	printk("%s: Au1xxx ethernet found at 0x%lx, irq %d\n", 
			dev->name, ioaddr, irq);

	/* Initialize our private structure */
	if (dev->priv == NULL) {
		aup = (struct au1000_private *) 
			kmalloc(sizeof(*aup), GFP_KERNEL);
		if (aup == NULL) {
			retval = -ENOMEM;
			goto free_region;
		}
		dev->priv = aup;
	}

	aup = dev->priv;
	memset(aup, 0, sizeof(*aup));


	/* Allocate the data buffers */
	aup->vaddr = (u32)dma_alloc(MAX_BUF_SIZE * 
			(NUM_TX_BUFFS+NUM_RX_BUFFS), &aup->dma_addr);
	if (!aup->vaddr) {
		retval = -ENOMEM;
		goto free_region;
	}

	/* aup->mac is the base address of the MAC's registers */
	aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
	/* Setup some variables for quick register address access */
	switch (ioaddr) {
	case AU1000_ETH0_BASE:
	case AU1500_ETH0_BASE:
		/* check env variables first */
		if (!get_ethernet_addr(ethaddr)) { 
			memcpy(au1000_mac_addr, ethaddr, sizeof(dev->dev_addr));
		} else {
			/* Check command line */
			argptr = prom_getcmdline();
			if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
				printk(KERN_INFO "%s: No mac address found\n", 
						dev->name);
				/* use the hard coded mac addresses */
			} else {
				str2eaddr(ethaddr, pmac + strlen("ethaddr="));
				memcpy(au1000_mac_addr, ethaddr, 
						sizeof(dev->dev_addr));
			}
		}
		if (ioaddr == AU1000_ETH0_BASE)
			aup->enable = (volatile u32 *) 
				((unsigned long)AU1000_MAC0_ENABLE);
		else
			aup->enable = (volatile u32 *) 
				((unsigned long)AU1500_MAC0_ENABLE);
		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(dev->dev_addr));
		setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
			break;
	case AU1000_ETH1_BASE:
	case AU1500_ETH1_BASE:
		if (ioaddr == AU1000_ETH1_BASE)
			aup->enable = (volatile u32 *) 
				((unsigned long)AU1000_MAC1_ENABLE);
		else
			aup->enable = (volatile u32 *) 
				((unsigned long)AU1500_MAC1_ENABLE);
		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(dev->dev_addr));
		dev->dev_addr[4] += 0x10;
		setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
			break;
	default:
		printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
		break;

	}

	aup->phy_addr = PHY_ADDRESS;

	/* bring the device out of reset, otherwise probing the mii
	 * will hang */
	*aup->enable = MAC_EN_CLOCK_ENABLE;
	au_sync_delay(2);
	*aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | 
		MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
	au_sync_delay(2);

	if (mii_probe(dev) != 0) {
		 goto free_region;
	}

	pDBfree = NULL;
	/* setup the data buffer descriptors and attach a buffer to each one */
	pDB = aup->db;
	for (i=0; i<(NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
		pDB->pnext = pDBfree;
		pDBfree = pDB;
		pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
		pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
		pDB++;
	}
	aup->pDBfree = pDBfree;

	for (i=0; i<NUM_RX_DMA; i++) {
		pDB = GetFreeDB(aup);
		if (!pDB) goto free_region;
		aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
		aup->rx_db_inuse[i] = pDB;
	}
	for (i=0; i<NUM_TX_DMA; i++) {
		pDB = GetFreeDB(aup);
		if (!pDB) goto free_region;
		aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
		aup->tx_dma_ring[i]->len = 0;
		aup->tx_db_inuse[i] = pDB;
	}

	spin_lock_init(&aup->lock);
	dev->base_addr = ioaddr;
	dev->irq = irq;
	dev->open = au1000_open;
	dev->hard_start_xmit = au1000_tx;
	dev->stop = au1000_close;
	dev->get_stats = au1000_get_stats;
	dev->set_multicast_list = &set_rx_mode;
	dev->do_ioctl = &au1000_ioctl;
	dev->set_config = &au1000_set_config;
	dev->tx_timeout = au1000_tx_timeout;
	dev->watchdog_timeo = ETH_TX_TIMEOUT;


	/* Fill in the fields of the device structure with ethernet values. */
	ether_setup(dev);

	/* 
	 * The boot code uses the ethernet controller, so reset it to start 
	 * fresh.  au1000_init() expects that the device is in reset state.
	 */
	reset_mac(dev);
	return 0;

free_region:
	release_region(ioaddr, MAC_IOSIZE);
	unregister_netdev(dev);
	if (aup->vaddr) 
		dma_free((void *)aup->vaddr, 
				MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS));
	if (dev->priv != NULL)
		kfree(dev->priv);
	printk(KERN_ERR "%s: au1000_probe1 failed.  Returns %d\n",
	       dev->name, retval);
	kfree(dev);
	return retval;
}
Beispiel #24
0
static int au1k_irda_net_init(struct net_device *dev)
{
	struct au1k_private *aup = NULL;
	int i, retval = 0, err;
	db_dest_t *pDB, *pDBfree;
	dma_addr_t temp;

	dev->priv = kmalloc(sizeof(struct au1k_private), GFP_KERNEL);
	if (dev->priv == NULL) {
		retval = -ENOMEM;
		goto out;
	}
	memset(dev->priv, 0, sizeof(struct au1k_private));
	aup = dev->priv;

	err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
	if (err)
		goto out;

	dev->open = au1k_irda_start;
	dev->hard_start_xmit = au1k_irda_hard_xmit;
	dev->stop = au1k_irda_stop;
	dev->get_stats = au1k_irda_stats;
	dev->do_ioctl = au1k_irda_ioctl;
	dev->tx_timeout = au1k_tx_timeout;

	irda_device_setup(dev);
	irda_init_max_qos_capabilies(&aup->qos);

	/* The only value we must override it the baudrate */
	aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
		IR_115200|IR_576000 |(IR_4000000 << 8);
	
	aup->qos.min_turn_time.bits = qos_mtt_bits;
	irda_qos_bits_to_value(&aup->qos);


	/* Tx ring follows rx ring + 512 bytes */
	/* we need a 1k aligned buffer */
	aup->rx_ring[0] = (ring_dest_t *)
		dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);

	/* allocate the data buffers */
	aup->db[0].vaddr = 
		(void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
	if (!aup->db[0].vaddr || !aup->rx_ring[0]) {
		retval = -ENOMEM;
		goto out;
	}

	setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);

	pDBfree = NULL;
	pDB = aup->db;
	for (i=0; i<(2*NUM_IR_DESC); i++) {
		pDB->pnext = pDBfree;
		pDBfree = pDB;
		pDB->vaddr = 
			(u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
		pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
		pDB++;
	}
	aup->pDBfree = pDBfree;

	/* attach a data buffer to each descriptor */
	for (i=0; i<NUM_IR_DESC; i++) {
		pDB = GetFreeDB(aup);
		if (!pDB) goto out;
		aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
		aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
		aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
		aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
		aup->rx_db_inuse[i] = pDB;
	}
	for (i=0; i<NUM_IR_DESC; i++) {
		pDB = GetFreeDB(aup);
		if (!pDB) goto out;
		aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
		aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
		aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
		aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
		aup->tx_ring[i]->count_0 = 0;
		aup->tx_ring[i]->count_1 = 0;
		aup->tx_ring[i]->flags = 0;
		aup->tx_db_inuse[i] = pDB;
	}

#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
	/* power on */
	bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
	bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL;
	au_sync();
#endif

	return 0;

out:
	if (aup->db[0].vaddr) 
		dma_free((void *)aup->db[0].vaddr, 
				MAX_BUF_SIZE * 2*NUM_IR_DESC);
	if (aup->rx_ring[0])
		kfree((void *)aup->rx_ring[0]);
	if (aup->rx_buff.head)
		kfree(aup->rx_buff.head);
	if (dev->priv != NULL)
		kfree(dev->priv);
	unregister_netdevice(dev);
	printk(KERN_ERR "%s: au1k_init_module failed.  Returns %d\n",
	       dev->name, retval);
	return retval;
}
Beispiel #25
0
static struct net_device *
au1000_probe(u32 ioaddr, int irq, int port_num)
{
	static unsigned version_printed = 0;
	struct au1000_private *aup = NULL;
	struct net_device *dev = NULL;
	db_dest_t *pDB, *pDBfree;
	char *pmac, *argptr;
	char ethaddr[6];
	int i, err;

	if (!request_region(ioaddr, MAC_IOSIZE, "Au1x00 ENET"))
		return NULL;

	if (version_printed++ == 0) 
		printk(version);

	dev = alloc_etherdev(sizeof(struct au1000_private));
	if (!dev) {
		printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");  
		return NULL;
	}

	if ((err = register_netdev(dev))) {
		printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
				err);
		kfree(dev);
		return NULL;
	}

	printk("%s: Au1x Ethernet found at 0x%x, irq %d\n", 
			dev->name, ioaddr, irq);

	aup = dev->priv;

	/* Allocate the data buffers */
	aup->vaddr = (u32)dma_alloc(MAX_BUF_SIZE * 
			(NUM_TX_BUFFS+NUM_RX_BUFFS), &aup->dma_addr);
	if (!aup->vaddr) {
		kfree(dev);
		release_region(ioaddr, MAC_IOSIZE);
		return NULL;
	}

	/* aup->mac is the base address of the MAC's registers */
	aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
	/* Setup some variables for quick register address access */
	if (ioaddr == iflist[0].base_addr)
	{
		/* check env variables first */
		if (!get_ethernet_addr(ethaddr)) { 
			memcpy(au1000_mac_addr, ethaddr, sizeof(dev->dev_addr));
		} else {
			/* Check command line */
			argptr = prom_getcmdline();
			if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
				printk(KERN_INFO "%s: No mac address found\n", 
						dev->name);
				/* use the hard coded mac addresses */
			} else {
				str2eaddr(ethaddr, pmac + strlen("ethaddr="));
				memcpy(au1000_mac_addr, ethaddr, 
						sizeof(dev->dev_addr));
			}
		}
			aup->enable = (volatile u32 *) 
				((unsigned long)iflist[0].macen_addr);
		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(dev->dev_addr));
		setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
		aup->mac_id = 0;
		au_macs[0] = aup;
	}
		else
	if (ioaddr == iflist[1].base_addr)
	{
			aup->enable = (volatile u32 *) 
				((unsigned long)iflist[1].macen_addr);
		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(dev->dev_addr));
		dev->dev_addr[4] += 0x10;
		setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
		aup->mac_id = 1;
		au_macs[1] = aup;
	}
	else
	{
		printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
	}

	/* bring the device out of reset, otherwise probing the mii
	 * will hang */
	*aup->enable = MAC_EN_CLOCK_ENABLE;
	au_sync_delay(2);
	*aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | 
		MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
	au_sync_delay(2);

	aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
	if (!aup->mii) {
		printk(KERN_ERR "%s: out of memory\n", dev->name);
		goto err_out;
	}
	aup->mii->mii_control_reg = 0;
	aup->mii->mii_data_reg = 0;

	if (mii_probe(dev) != 0) {
		goto err_out;
	}

	pDBfree = NULL;
	/* setup the data buffer descriptors and attach a buffer to each one */
	pDB = aup->db;
	for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
		pDB->pnext = pDBfree;
		pDBfree = pDB;
		pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
		pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
		pDB++;
	}
	aup->pDBfree = pDBfree;

	for (i = 0; i < NUM_RX_DMA; i++) {
		pDB = GetFreeDB(aup);
		if (!pDB) {
			goto err_out;
		}
		aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
		aup->rx_db_inuse[i] = pDB;
	}
	for (i = 0; i < NUM_TX_DMA; i++) {
		pDB = GetFreeDB(aup);
		if (!pDB) {
			goto err_out;
		}
		aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
		aup->tx_dma_ring[i]->len = 0;
		aup->tx_db_inuse[i] = pDB;
	}

	spin_lock_init(&aup->lock);
	dev->base_addr = ioaddr;
	dev->irq = irq;
	dev->open = au1000_open;
	dev->hard_start_xmit = au1000_tx;
	dev->stop = au1000_close;
	dev->get_stats = au1000_get_stats;
	dev->set_multicast_list = &set_rx_mode;
	dev->do_ioctl = &au1000_ioctl;
	dev->set_config = &au1000_set_config;
	dev->tx_timeout = au1000_tx_timeout;
	dev->watchdog_timeo = ETH_TX_TIMEOUT;

	/* 
	 * The boot code uses the ethernet controller, so reset it to start 
	 * fresh.  au1000_init() expects that the device is in reset state.
	 */
	reset_mac(dev);

	return dev;

err_out:
	/* here we should have a valid dev plus aup-> register addresses
	 * so we can reset the mac properly.*/
	reset_mac(dev);
	if (aup->mii)
		kfree(aup->mii);
	for (i = 0; i < NUM_RX_DMA; i++) {
		if (aup->rx_db_inuse[i])
			ReleaseDB(aup, aup->rx_db_inuse[i]);
	}
	for (i = 0; i < NUM_TX_DMA; i++) {
		if (aup->tx_db_inuse[i])
			ReleaseDB(aup, aup->tx_db_inuse[i]);
	}
	dma_free((void *)aup->vaddr, MAX_BUF_SIZE * 
			(NUM_TX_BUFFS+NUM_RX_BUFFS));
	unregister_netdev(dev);
	kfree(dev);
	release_region(ioaddr, MAC_IOSIZE);
	return NULL;
}
Beispiel #26
0
void
viornd_attach(struct device *parent, struct device *self, void *aux)
{
	struct viornd_softc *sc = (struct viornd_softc *)self;
	struct virtio_softc *vsc = (struct virtio_softc *)parent;
	unsigned int shift;

	vsc->sc_vqs = &sc->sc_vq;
	vsc->sc_nvqs = 1;
	vsc->sc_config_change = 0;
	if (vsc->sc_child != NULL)
		panic("already attached to something else");
	vsc->sc_child = self;
	vsc->sc_ipl = IPL_NET;
	vsc->sc_intrhand = virtio_vq_intr;
	sc->sc_virtio = vsc;

	virtio_negotiate_features(vsc, 0, NULL);

	if (sc->sc_dev.dv_cfdata->cf_flags & VIORND_ONESHOT) {
		sc->sc_interval = 0;
	} else {
		shift = VIORND_INTERVAL_SHIFT(sc->sc_dev.dv_cfdata->cf_flags);
		if (shift == 0)
			shift = VIORND_INTERVAL_SHIFT_DEFAULT;
		sc->sc_interval = 15 * (1 << shift);
	}
#if VIORND_DEBUG
	printf(": request interval: %us\n", sc->sc_interval);
#endif

	sc->sc_buf = dma_alloc(VIORND_BUFSIZE, PR_NOWAIT|PR_ZERO);
	if (sc->sc_buf == NULL) {
		printf(": Can't alloc dma buffer\n");
		goto err;
	}
	if (bus_dmamap_create(sc->sc_virtio->sc_dmat, VIORND_BUFSIZE, 1,
	    VIORND_BUFSIZE, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
	    &sc->sc_dmamap)) {
		printf(": Can't alloc dmamap\n");
		goto err;
	}
	if (bus_dmamap_load(sc->sc_virtio->sc_dmat, sc->sc_dmamap,
	    sc->sc_buf, VIORND_BUFSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ)) {
		printf(": Can't load dmamap\n");
		goto err2;
	}

	if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1,
	    "Entropy request") != 0) {
		printf(": Can't alloc virtqueue\n");
		goto err2;
	}

	sc->sc_vq.vq_done = viornd_vq_done;
	virtio_start_vq_intr(vsc, &sc->sc_vq);
	timeout_set(&sc->sc_tick, viornd_tick, sc);
	timeout_add(&sc->sc_tick, 1);

	printf("\n");
	return;
err2:
	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dmamap);
err:
	vsc->sc_child = VIRTIO_CHILD_ERROR;
	if (sc->sc_buf != NULL) {
		dma_free(sc->sc_buf, VIORND_BUFSIZE);
		sc->sc_buf = NULL;
	}
	return;
}
Beispiel #27
0
int
safte_bio_blink(struct safte_softc *sc, struct bioc_blink *blink)
{
	struct safte_writebuf_cmd *cmd;
	struct safte_slotop *op;
	struct scsi_xfer *xs;
	int error, slot, flags = 0, wantblink;

	switch (blink->bb_status) {
	case BIOC_SBBLINK:
		wantblink = 1;
		break;
	case BIOC_SBUNBLINK:
		wantblink = 0;
		break;
	default:
		return (EINVAL);
	}

	rw_enter_read(&sc->sc_lock);
	for (slot = 0; slot < sc->sc_nslots; slot++) {
		if (sc->sc_slots[slot] == blink->bb_target)
			break;
	}
	rw_exit_read(&sc->sc_lock);

	if (slot >= sc->sc_nslots)
		return (ENODEV);

	op = dma_alloc(sizeof(*op), PR_WAITOK | PR_ZERO);

	op->opcode = SAFTE_WRITE_SLOTOP;
	op->slot = slot;
	op->flags |= wantblink ? SAFTE_SLOTOP_IDENTIFY : 0;

	if (cold)
		flags |= SCSI_AUTOCONF;
	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_OUT | SCSI_SILENT);
	if (xs == NULL) {
		dma_free(op, sizeof(*op));
		return (ENOMEM);
	}
	xs->cmdlen = sizeof(*cmd);
	xs->data = (void *)op;
	xs->datalen = sizeof(*op);
	xs->retries = 2;
	xs->timeout = 30000;

	cmd = (struct safte_writebuf_cmd *)xs->cmd;
	cmd->opcode = WRITE_BUFFER;
	cmd->flags |= SAFTE_WR_MODE;
	cmd->length = htobe16(sizeof(struct safte_slotop));

	error = scsi_xs_sync(xs);
	scsi_xs_put(xs);

	if (error != 0) {
		error = EIO;
	}
	dma_free(op, sizeof(*op));

	return (error);
}
Beispiel #28
0
int
safte_read_config(struct safte_softc *sc)
{
	struct safte_config *config = NULL;
	struct safte_readbuf_cmd *cmd;
	struct safte_sensor *s;
	struct scsi_xfer *xs;
	int error = 0, flags = 0, i, j;

	config = dma_alloc(sizeof(*config), PR_NOWAIT);
	if (config == NULL)
		return (1);

	if (cold)
		flags |= SCSI_AUTOCONF;
	xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
	if (xs == NULL) {
		error = 1;
		goto done;
	}
	xs->cmdlen = sizeof(*cmd);
	xs->data = (void *)config;
	xs->datalen = sizeof(*config);
	xs->retries = 2;
	xs->timeout = 30000;

	cmd = (struct safte_readbuf_cmd *)xs->cmd;
	cmd->opcode = READ_BUFFER;
	cmd->flags |= SAFTE_RD_MODE;
	cmd->bufferid = SAFTE_RD_CONFIG;
	cmd->length = htobe16(sizeof(*config));

	error = scsi_xs_sync(xs);
	scsi_xs_put(xs);

	if (error != 0) {
		error = 1;
		goto done;
	}

	DPRINTF(("%s: nfans: %d npwrsup: %d nslots: %d doorlock: %d ntemps: %d"
	    " alarm: %d celsius: %d ntherm: %d\n", DEVNAME(sc), config->nfans,
	    config->npwrsup, config->nslots, config->doorlock, config->ntemps,
	    config->alarm, SAFTE_CFG_CELSIUS(config->therm),
	    SAFTE_CFG_NTHERM(config->therm)));

	sc->sc_encbuflen = config->nfans * sizeof(u_int8_t) + /* fan status */
	    config->npwrsup * sizeof(u_int8_t) + /* power supply status */
	    config->nslots * sizeof(u_int8_t) + /* device scsi id (lun) */
	    sizeof(u_int8_t) + /* door lock status */
	    sizeof(u_int8_t) + /* speaker status */
	    config->ntemps * sizeof(u_int8_t) + /* temp sensors */
	    sizeof(u_int16_t); /* temp out of range sensors */

	sc->sc_encbuf = dma_alloc(sc->sc_encbuflen, PR_NOWAIT);
	if (sc->sc_encbuf == NULL) {
		error = 1;
		goto done;
	}

	sc->sc_nsensors = config->nfans + config->npwrsup + config->ntemps +
		(config->doorlock ? 1 : 0) + (config->alarm ? 1 : 0);

	sc->sc_sensors = mallocarray(sc->sc_nsensors, sizeof(struct safte_sensor),
	    M_DEVBUF, M_NOWAIT | M_ZERO);
	if (sc->sc_sensors == NULL) {
		dma_free(sc->sc_encbuf, sc->sc_encbuflen);
		sc->sc_encbuf = NULL;
		sc->sc_nsensors = 0;
		error = 1;
		goto done;
	}

	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
	    sizeof(sc->sc_sensordev.xname));

	s = sc->sc_sensors;

	for (i = 0; i < config->nfans; i++) {
		s->se_type = SAFTE_T_FAN;
		s->se_field = (u_int8_t *)(sc->sc_encbuf + i);
		s->se_sensor.type = SENSOR_INDICATOR;
		snprintf(s->se_sensor.desc, sizeof(s->se_sensor.desc),
		    "Fan%d", i);

		s++;
	}
	j = config->nfans;

	for (i = 0; i < config->npwrsup; i++) {
		s->se_type = SAFTE_T_PWRSUP;
		s->se_field = (u_int8_t *)(sc->sc_encbuf + j + i);
		s->se_sensor.type = SENSOR_INDICATOR;
		snprintf(s->se_sensor.desc, sizeof(s->se_sensor.desc),
		    "PSU%d", i);

		s++;
	}
	j += config->npwrsup;

#if NBIO > 0
	sc->sc_nslots = config->nslots;
	sc->sc_slots = (u_int8_t *)(sc->sc_encbuf + j);
#endif
	j += config->nslots;

	if (config->doorlock) {
		s->se_type = SAFTE_T_DOORLOCK;
		s->se_field = (u_int8_t *)(sc->sc_encbuf + j);
		s->se_sensor.type = SENSOR_INDICATOR;
		strlcpy(s->se_sensor.desc, "doorlock",
		    sizeof(s->se_sensor.desc));

		s++;
	}
	j++;

	if (config->alarm) {
		s->se_type = SAFTE_T_ALARM;
		s->se_field = (u_int8_t *)(sc->sc_encbuf + j);
		s->se_sensor.type = SENSOR_INDICATOR;
		strlcpy(s->se_sensor.desc, "alarm", sizeof(s->se_sensor.desc));

		s++;
	}
	j++;

	/*
	 * stash the temp info so we can get out of range status. limit the
	 * number so the out of temp checks cant go into memory it doesnt own
	 */
	sc->sc_ntemps = (config->ntemps > 15) ? 15 : config->ntemps;
	sc->sc_temps = s;
	sc->sc_celsius = SAFTE_CFG_CELSIUS(config->therm);
	for (i = 0; i < config->ntemps; i++) {
		s->se_type = SAFTE_T_TEMP;
		s->se_field = (u_int8_t *)(sc->sc_encbuf + j + i);
		s->se_sensor.type = SENSOR_TEMP;

		s++;
	}
	j += config->ntemps;

	sc->sc_temperrs = (u_int8_t *)(sc->sc_encbuf + j);
done:
	dma_free(config, sizeof(*config));
	return (error);
}
Beispiel #29
0
void
viomb_attach(struct device *parent, struct device *self, void *aux)
{
	struct viomb_softc *sc = (struct viomb_softc *)self;
	struct virtio_softc *vsc = (struct virtio_softc *)parent;
	u_int32_t features;
	int i;

	if (vsc->sc_child != NULL) {
		printf("child already attached for %s; something wrong...\n",
		    parent->dv_xname);
		return;
	}

	/* fail on non-4K page size archs */
	if (VIRTIO_PAGE_SIZE != PAGE_SIZE){
		printf("non-4K page size arch found, needs %d, got %d\n",
		    VIRTIO_PAGE_SIZE, PAGE_SIZE);
		return;
	}

	sc->sc_virtio = vsc;
	vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE];
	vsc->sc_nvqs = 0;
	vsc->sc_child = self;
	vsc->sc_ipl = IPL_BIO;
	vsc->sc_config_change = viomb_config_change;
	vsc->sc_intrhand = virtio_vq_intr;

	/* negotiate features */
	features = VIRTIO_F_RING_INDIRECT_DESC;
	features = virtio_negotiate_features(vsc, features,
					     viomb_feature_names);

	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE,
	     sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0))
		goto err;
	vsc->sc_nvqs++;
	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE,
	     sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0))
		goto err;
	vsc->sc_nvqs++;

	sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr;
	sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr;
	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]);
	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]);

	viomb_read_config(sc);
	TAILQ_INIT(&sc->sc_balloon_pages);

	if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ,
	    PR_NOWAIT|PR_ZERO)) == NULL) {
		printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc));
		goto err;
	}
	if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ,
			      1, sizeof(u_int32_t) * PGS_PER_REQ, 0,
			      BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
		printf("%s: dmamap creation failed.\n", DEVNAME(sc));
		goto err;
	}
	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap,
			    &sc->sc_req.bl_pages[0],
			    sizeof(uint32_t) * PGS_PER_REQ,
			    NULL, BUS_DMA_NOWAIT)) {
		printf("%s: dmamap load failed.\n", DEVNAME(sc));
		goto err_dmamap;
	}

	sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO);
	if (sc->sc_taskq == NULL)
		goto err_dmamap;
	task_set(&sc->sc_task, viomb_worker, sc, NULL);

	printf("\n");
	return;
err_dmamap:
	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap);
err:
	if (sc->sc_req.bl_pages)
		dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ);
	for (i = 0; i < vsc->sc_nvqs; i++)
		virtio_free_vq(vsc, &sc->sc_vq[i]);
	vsc->sc_nvqs = 0;
	vsc->sc_child = VIRTIO_CHILD_ERROR;
	return;
}
Beispiel #30
0
int
safte_match(struct device *parent, void *match, void *aux)
{
	struct scsi_inquiry_data *inqbuf;
	struct scsi_attach_args	*sa = aux;
	struct scsi_inquiry_data *inq = sa->sa_inqbuf;
	struct scsi_xfer *xs;
	struct safte_inq *si;
	int error, flags = 0, length;

	if (inq == NULL)
		return (0);

	/* match on dell enclosures */
	if ((inq->device & SID_TYPE) == T_PROCESSOR &&
	    SCSISPC(inq->version) == 3)
		return (2);

	if ((inq->device & SID_TYPE) != T_PROCESSOR ||
	    SCSISPC(inq->version) != 2 ||
	    (inq->response_format & SID_ANSII) != 2)
		return (0);

	length = inq->additional_length + SAFTE_EXTRA_OFFSET;
	if (length < SAFTE_INQ_LEN)
		return (0);
	if (length > sizeof(*inqbuf))
		length = sizeof(*inqbuf);

	inqbuf = dma_alloc(sizeof(*inqbuf), PR_NOWAIT | PR_ZERO);
	if (inqbuf == NULL)
		return (0);

	memset(inqbuf->extra, ' ', sizeof(inqbuf->extra));

	if (cold)
		flags |= SCSI_AUTOCONF;
	xs = scsi_xs_get(sa->sa_sc_link, flags | SCSI_DATA_IN);
	if (xs == NULL)
		goto fail;

	xs->retries = 2;
	xs->timeout = 10000;

	scsi_init_inquiry(xs, 0, 0, inqbuf, length);

	error = scsi_xs_sync(xs);
	scsi_xs_put(xs);

	if (error)
		goto fail;

	si = (struct safte_inq *)&inqbuf->extra;
	if (memcmp(si->ident, SAFTE_IDENT, sizeof(si->ident)) == 0) {
		dma_free(inqbuf, sizeof(*inqbuf));
		return (2);
	}

fail:
	dma_free(inqbuf, sizeof(*inqbuf));
	return (0);
}