コード例 #1
0
static void __devexit cs5520_remove_one(struct pci_dev *pdev)
{
	struct device *dev = pci_dev_to_dev(pdev);
	struct ata_host *host = dev_get_drvdata(dev);

	ata_host_detach(host);
}
static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
{
	struct ata_probe_ent *probe_ent;
	unsigned int i;

	probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
	if (!probe_ent)
		return NULL;

	memset(probe_ent, 0, sizeof(*probe_ent));
	probe_ent->dev = pci_dev_to_dev(pdev);
	INIT_LIST_HEAD(&probe_ent->node);

	probe_ent->sht		= &svia_sht;
	probe_ent->port_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
	probe_ent->port_ops	= &vt6421_sata_ops;
	probe_ent->n_ports	= N_PORTS;
	probe_ent->irq		= pdev->irq;
	probe_ent->irq_flags	= IRQF_SHARED;
	probe_ent->pio_mask	= 0x1f;
	probe_ent->mwdma_mask	= 0x07;
	probe_ent->udma_mask	= 0x7f;

	for (i = 0; i < 6; i++)
		if (!pcim_iomap(pdev, i, 0)) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "failed to iomap PCI BAR %d\n", i);
			return NULL;
		}

	for (i = 0; i < N_PORTS; i++)
		vt6421_init_addrs(probe_ent, pcim_iomap_table(pdev), i);

	return probe_ent;
}
コード例 #3
0
static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
{
	struct ata_probe_ent *probe_ent;
	unsigned int i;

	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
	if (!probe_ent)
		return NULL;

	memset(probe_ent, 0, sizeof(*probe_ent));
	probe_ent->dev = pci_dev_to_dev(pdev);
	INIT_LIST_HEAD(&probe_ent->node);

	probe_ent->sht		= &svia_sht;
	probe_ent->host_flags	= ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
				  ATA_FLAG_NO_LEGACY;
	probe_ent->port_ops	= &svia_sata_ops;
	probe_ent->n_ports	= N_PORTS;
	probe_ent->irq		= pdev->irq;
	probe_ent->irq_flags	= SA_SHIRQ;
	probe_ent->pio_mask	= 0x1f;
	probe_ent->mwdma_mask	= 0x07;
	probe_ent->udma_mask	= 0x7f;

	for (i = 0; i < N_PORTS; i++)
		vt6421_init_addrs(probe_ent, pdev, i);

	return probe_ent;
}
コード例 #4
0
ファイル: ixgbe_fcoe.c プロジェクト: GreggIndustries/PF_RING
/**
 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
 * @netdev: the corresponding net_device
 * @xid: the xid that corresponding ddp will be freed
 *
 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
 * and it is expected to be called by ULD, i.e., FCP layer of libfc
 * to release the corresponding ddp context when the I/O is done.
 *
 * Returns : data length already ddp-ed in bytes
 */
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
	int len = 0;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_adapter *adapter;
	struct ixgbe_fcoe_ddp *ddp;
	u32 fcbuff;

	if (!netdev)
		goto out_ddp_put;

	if (xid > netdev->fcoe_ddp_xid)
		goto out_ddp_put;

	adapter = netdev_priv(netdev);
	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (!ddp->udl)
		goto out_ddp_put;

	len = ddp->len;
	/* if there an error, force to invalidate ddp context */
	if (ddp->err) {
		spin_lock_bh(&fcoe->lock);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
				(xid | IXGBE_FCFLTRW_WE));
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
				(xid | IXGBE_FCDMARW_WE));

		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
				(xid | IXGBE_FCDMARW_RE));
		fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
		spin_unlock_bh(&fcoe->lock);
		/* guaranteed to be invalidated after 100us */
		if (fcbuff & IXGBE_FCBUFF_VALID)
			udelay(100);
	}
	if (ddp->sgl)
		dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc,
			     DMA_FROM_DEVICE);
	if (ddp->pool) {
		dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
		ddp->pool = NULL;
	}

	ixgbe_fcoe_clear_ddp(ddp);

out_ddp_put:
	return len;
}
コード例 #5
0
ファイル: ixgbe_sysfs.c プロジェクト: AlexanderKlishin/ixgbe
/**
 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
 * @adapter: pointer to the adapter structure
 * @offset: offset in the eeprom sensor data table
 * @type: type of sensor data to display
 *
 * For each file we want in hwmon's sysfs interface we need a device_attribute
 * This is included in our hwmon_attr struct that contains the references to
 * the data structures we need to get the data to display.
 */
static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
				unsigned int offset, int type) {
	int rc;
	unsigned int n_attr;
	struct hwmon_attr *ixgbe_attr;

	n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
	ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];

	switch (type) {
	case IXGBE_HWMON_TYPE_LOC:
		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
			 "temp%u_label", offset);
		break;
	case IXGBE_HWMON_TYPE_TEMP:
		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
			 "temp%u_input", offset);
		break;
	case IXGBE_HWMON_TYPE_CAUTION:
		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
			 "temp%u_max", offset);
		break;
	case IXGBE_HWMON_TYPE_MAX:
		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
			 "temp%u_crit", offset);
		break;
	default:
		rc = -EPERM;
		return rc;
	}

	/* These always the same regardless of type */
	ixgbe_attr->sensor =
		&adapter->hw.mac.thermal_sensor_data.sensor[offset];
	ixgbe_attr->hw = &adapter->hw;
	ixgbe_attr->dev_attr.store = NULL;
	ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
	ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;

	rc = device_create_file(pci_dev_to_dev(adapter->pdev),
				&ixgbe_attr->dev_attr);

	if (rc == 0)
		++adapter->ixgbe_hwmon_buff.n_hwmon;

	return rc;
}
コード例 #6
0
/**
 * e1000e_ptp_init - initialize PTP for devices which support it
 * @adapter: board private structure
 *
 * This function performs the required steps for enabling PTP support.
 * If PTP support has already been loaded it simply calls the cyclecounter
 * init routine and exits.
 **/
void e1000e_ptp_init(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	adapter->ptp_clock = NULL;

	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
		return;

	adapter->ptp_clock_info = e1000e_ptp_clock_info;

	snprintf(adapter->ptp_clock_info.name,
		 sizeof(adapter->ptp_clock_info.name), "%pm",
		 adapter->netdev->perm_addr);

	switch (hw->mac.type) {
	case e1000_pch2lan:
	case e1000_pch_lpt:
	case e1000_pch_spt:
		if (((hw->mac.type != e1000_pch_lpt) &&
		     (hw->mac.type != e1000_pch_spt)) ||
		    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
			adapter->ptp_clock_info.max_adj = 24000000 - 1;
			break;
		}
		/* fall-through */
	case e1000_82574:
	case e1000_82583:
		adapter->ptp_clock_info.max_adj = 600000000 - 1;
		break;
	default:
		break;
	}

	INIT_DELAYED_WORK(&adapter->systim_overflow_work,
			  e1000e_systim_overflow_work);

	schedule_delayed_work(&adapter->systim_overflow_work,
			      E1000_SYSTIM_OVERFLOW_PERIOD);

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
						pci_dev_to_dev(adapter->pdev));
	if (IS_ERR(adapter->ptp_clock)) {
		adapter->ptp_clock = NULL;
		e_err("ptp_clock_register failed\n");
	} else {
		e_info("registered PHC clock\n");
	}
}
コード例 #7
0
ファイル: ixgbe_sysfs.c プロジェクト: AlexanderKlishin/ixgbe
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter __maybe_unused *adapter)
{
#ifdef IXGBE_HWMON
	int i;

	if (adapter == NULL)
		return;

	for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
		device_remove_file(pci_dev_to_dev(adapter->pdev),
			   &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
	}

	kfree(adapter->ixgbe_hwmon_buff.hwmon_list);

	if (adapter->ixgbe_hwmon_buff.device)
		hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
#endif /* IXGBE_HWMON */
}
コード例 #8
0
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	u8 pcicfg;
	void __iomem *iomap[5];
	static struct ata_probe_ent probe[2];
	int ports = 0;

	/* IDE port enable bits */
	pci_read_config_byte(dev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	if ((pcicfg & 0x40) == 0) {
		printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n");
		pci_write_config_byte(dev, 0x60, pcicfg | 0x40);
	}

	/* Perform set up for DMA */
	if (pci_enable_device_bars(dev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}
	pci_set_master(dev);
	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports */
	iomap[0] = devm_ioport_map(&dev->dev, 0x1F0, 8);
	iomap[1] = devm_ioport_map(&dev->dev, 0x3F6, 1);
	iomap[2] = devm_ioport_map(&dev->dev, 0x170, 8);
	iomap[3] = devm_ioport_map(&dev->dev, 0x376, 1);
	iomap[4] = pcim_iomap(dev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	/* We have to do our own plumbing as the PCI setup for this
	   chipset is non-standard so we can't punt to the libata code */

	INIT_LIST_HEAD(&probe[0].node);
	probe[0].dev = pci_dev_to_dev(dev);
	probe[0].port_ops = &cs5520_port_ops;
	probe[0].sht = &cs5520_sht;
	probe[0].pio_mask = 0x1F;
	probe[0].mwdma_mask = id->driver_data;
	probe[0].irq = 14;
	probe[0].irq_flags = 0;
	probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
	probe[0].n_ports = 1;
	probe[0].port[0].cmd_addr = iomap[0];
	probe[0].port[0].ctl_addr = iomap[1];
	probe[0].port[0].altstatus_addr = iomap[1];
	probe[0].port[0].bmdma_addr = iomap[4];

	/* The secondary lurks at different addresses but is otherwise
	   the same beastie */

	probe[1] = probe[0];
	INIT_LIST_HEAD(&probe[1].node);
	probe[1].irq = 15;
	probe[1].port[0].cmd_addr = iomap[2];
	probe[1].port[0].ctl_addr = iomap[3];
	probe[1].port[0].altstatus_addr = iomap[3];
	probe[1].port[0].bmdma_addr = iomap[4] + 8;

	/* Let libata fill in the port details */
	ata_std_ports(&probe[0].port[0]);
	ata_std_ports(&probe[1].port[0]);

	/* Now add the ports that are active */
	if (pcicfg & 1)
		ports += ata_device_add(&probe[0]);
	if (pcicfg & 2)
		ports += ata_device_add(&probe[1]);
	if (ports)
		return 0;
	return -ENODEV;
}
コード例 #9
0
ファイル: igb_uio.c プロジェクト: Cppowboy/mtcp
static int __devinit
#else
static int
#endif
igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct rte_uio_pci_dev *udev;
	struct msix_entry msix_entry;
	int err;

	/* essential vars for configuring the device with net_device */
	struct net_device *netdev;
	struct net_adapter *adapter = NULL;
	struct ixgbe_hw *hw_i = NULL;
	struct e1000_hw *hw_e = NULL;

	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
	if (!udev)
		return -ENOMEM;

	/*
	 * enable device: ask low-level code to enable I/O and
	 * memory
	 */
	err = pci_enable_device(dev);
	if (err != 0) {
		dev_err(&dev->dev, "Cannot enable PCI device\n");
		goto fail_free;
	}

	/*
	 * reserve device's PCI memory regions for use by this
	 * module
	 */
	err = pci_request_regions(dev, "igb_uio");
	if (err != 0) {
		dev_err(&dev->dev, "Cannot request regions\n");
		goto fail_disable;
	}

	/* enable bus mastering on the device */
	pci_set_master(dev);

	/* remap IO memory */
	err = igbuio_setup_bars(dev, &udev->info);
	if (err != 0)
		goto fail_release_iomem;

	/* set 64-bit DMA mask */
	err = pci_set_dma_mask(dev,  DMA_BIT_MASK(64));
	if (err != 0) {
		dev_err(&dev->dev, "Cannot set DMA mask\n");
		goto fail_release_iomem;
	}

	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
	if (err != 0) {
		dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
		goto fail_release_iomem;
	}

	/* fill uio infos */
	udev->info.name = "igb_uio";
	udev->info.version = "0.1";
	udev->info.handler = igbuio_pci_irqhandler;
	udev->info.irqcontrol = igbuio_pci_irqcontrol;
#ifdef CONFIG_XEN_DOM0
	/* check if the driver run on Xen Dom0 */
	if (xen_initial_domain())
		udev->info.mmap = igbuio_dom0_pci_mmap;
#endif
	udev->info.priv = udev;
	udev->pdev = dev;

	switch (igbuio_intr_mode_preferred) {
	case RTE_INTR_MODE_MSIX:
		/* Only 1 msi-x vector needed */
		msix_entry.entry = 0;
		if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
			dev_dbg(&dev->dev, "using MSI-X");
			udev->info.irq = msix_entry.vector;
			udev->mode = RTE_INTR_MODE_MSIX;
			break;
		}
		/* fall back to INTX */
	case RTE_INTR_MODE_LEGACY:
		if (pci_intx_mask_supported(dev)) {
			dev_dbg(&dev->dev, "using INTX");
			udev->info.irq_flags = IRQF_SHARED;
			udev->info.irq = dev->irq;
			udev->mode = RTE_INTR_MODE_LEGACY;
			break;
		}
		dev_notice(&dev->dev, "PCI INTX mask not supported\n");
		/* fall back to no IRQ */
	case RTE_INTR_MODE_NONE:
		udev->mode = RTE_INTR_MODE_NONE;
		udev->info.irq = 0;
		break;

	default:
		dev_err(&dev->dev, "invalid IRQ mode %u",
			igbuio_intr_mode_preferred);
		err = -EINVAL;
		goto fail_release_iomem;
	}

	err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
	if (err != 0)
		goto fail_release_iomem;

	/* initialize the corresponding netdev */
	netdev = alloc_etherdev(sizeof(struct net_adapter));
	if (!netdev) {
		err = -ENOMEM;
		goto fail_alloc_etherdev;
	}
	SET_NETDEV_DEV(netdev, pci_dev_to_dev(dev));
	adapter = netdev_priv(netdev);
	adapter->netdev = netdev;
	adapter->pdev = dev;
	udev->adapter = adapter;
	adapter->type = retrieve_dev_specs(id);
	/* recover device-specific mac address */
	switch (adapter->type) {
	case IXGBE:
		hw_i = &adapter->hw._ixgbe_hw;
		hw_i->back = adapter;
		hw_i->hw_addr = ioremap(pci_resource_start(dev, 0),
				       pci_resource_len(dev, 0));
		if (!hw_i->hw_addr) {
			err = -EIO;
			goto fail_ioremap;
		}
		break;
	case IGB:
		hw_e = &adapter->hw._e1000_hw;
		hw_e->back = adapter;
		hw_e->hw_addr = ioremap(pci_resource_start(dev, 0),
				      pci_resource_len(dev, 0));
		if (!hw_e->hw_addr) {
			err = -EIO;
			goto fail_ioremap;
		}
		break;
	}

	netdev_assign_netdev_ops(netdev);
	strncpy(netdev->name, pci_name(dev), sizeof(netdev->name) - 1);
	retrieve_dev_addr(netdev, adapter);
	strcpy(netdev->name, "dpdk%d");
	err = register_netdev(netdev);
	if (err)
		goto fail_ioremap;
	adapter->netdev_registered = true;
	
	if (sscanf(netdev->name, "dpdk%hu", &adapter->bd_number) <= 0)
		goto fail_bdnumber;

	//printk(KERN_DEBUG "ifindex picked: %hu\n", adapter->bd_number);
	dev_info(&dev->dev, "ifindex picked: %hu\n", adapter->bd_number);

	/* register uio driver */
	err = uio_register_device(&dev->dev, &udev->info);
	if (err != 0)
		goto fail_remove_group;

	pci_set_drvdata(dev, udev);

	dev_info(&dev->dev, "uio device registered with irq %lx\n",
		 udev->info.irq);

	/* reset nstats */
	memset(&adapter->nstats, 0, sizeof(struct net_device_stats));

	return 0;

 fail_bdnumber:
 fail_ioremap:
	free_netdev(netdev);
 fail_alloc_etherdev:
	pci_release_selected_regions(dev,
                                     pci_select_bars(dev, IORESOURCE_MEM));
fail_remove_group:
	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
fail_release_iomem:
	igbuio_pci_release_iomem(&udev->info);
	if (udev->mode == RTE_INTR_MODE_MSIX)
		pci_disable_msix(udev->pdev);
	pci_release_regions(dev);
fail_disable:
	pci_disable_device(dev);
fail_free:
	kfree(udev);

	return err;
}
コード例 #10
0
static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	long err;

	/* do nothing if we already have a clock device */
	if (!IS_ERR_OR_NULL(adapter->ptp_clock))
		return 0;

	switch (adapter->hw.mac.type) {
	case ixgbe_mac_X540:
		snprintf(adapter->ptp_caps.name,
			 sizeof(adapter->ptp_caps.name),
			 "%s", netdev->name);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 250000000;
		adapter->ptp_caps.n_alarm = 0;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.n_per_out = 0;
		adapter->ptp_caps.pps = 1;
		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_82599;
		adapter->ptp_caps.gettime = ixgbe_ptp_gettime_82599;
		adapter->ptp_caps.settime = ixgbe_ptp_settime_82599;
		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
		adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540;
		break;
	case ixgbe_mac_82599EB:
		snprintf(adapter->ptp_caps.name,
			 sizeof(adapter->ptp_caps.name),
			 "%s", netdev->name);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 250000000;
		adapter->ptp_caps.n_alarm = 0;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.n_per_out = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_82599;
		adapter->ptp_caps.gettime = ixgbe_ptp_gettime_82599;
		adapter->ptp_caps.settime = ixgbe_ptp_settime_82599;
		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
		break;
	default:
		adapter->ptp_clock = NULL;
		adapter->ptp_setup_sdp = NULL;
		return -EOPNOTSUPP;
	}

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
						pci_dev_to_dev(adapter->pdev));
	if (IS_ERR(adapter->ptp_clock)) {
		err = PTR_ERR(adapter->ptp_clock);
		adapter->ptp_clock = NULL;
		e_dev_err("ptp_clock_register failed\n");
		return err;
	} else
		e_dev_info("registered PHC device on %s\n", netdev->name);

	/* Set the default timestamp mode to disabled here. We do this in
	 * create_clock instead of initialization, because we don't want to
	 * override the previous settings during a suspend/resume cycle.
	 */
	adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
	adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;

	return 0;
}
コード例 #11
0
ファイル: param.c プロジェクト: StevenLeRoux/PF_RING
/**
 * e1000e_check_options - Range Checking for Command Line Parameters
 * @adapter: board private structure
 *
 * This routine checks all command line parameters for valid user
 * input.  If an invalid value is given, or if no user specified
 * value exists, a default value is used.  The final value is stored
 * in a variable in the adapter structure.
 **/
void __devinit e1000e_check_options(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	int bd = adapter->bd_number;

	if (bd >= E1000_MAX_NIC) {
		e_notice("Warning: no configuration for board #%i\n", bd);
		e_notice("Using defaults for all values\n");
	}

	{			/* Transmit Interrupt Delay */
/* *INDENT-OFF* */
		static const struct e1000_option opt = {
			.type = range_option,
			.name = "Transmit Interrupt Delay",
			.err  = "using default of "
				__MODULE_STRING(DEFAULT_TIDV),
			.def  = DEFAULT_TIDV,
			.arg  = { .r = { .min = MIN_TXDELAY,
					 .max = MAX_TXDELAY } }
		};
/* *INDENT-ON* */

		if (num_TxIntDelay > bd) {
			adapter->tx_int_delay = TxIntDelay[bd];
			e1000_validate_option(&adapter->tx_int_delay, &opt,
					      adapter);
		} else {
			adapter->tx_int_delay = opt.def;
		}
	}
	{			/* Transmit Absolute Interrupt Delay */
/* *INDENT-OFF* */
		static const struct e1000_option opt = {
			.type = range_option,
			.name = "Transmit Absolute Interrupt Delay",
			.err  = "using default of "
				__MODULE_STRING(DEFAULT_TADV),
			.def  = DEFAULT_TADV,
			.arg  = { .r = { .min = MIN_TXABSDELAY,
					 .max = MAX_TXABSDELAY } }
		};
/* *INDENT-ON* */

		if (num_TxAbsIntDelay > bd) {
			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
					      adapter);
		} else {
			adapter->tx_abs_int_delay = opt.def;
		}
	}
	{			/* Receive Interrupt Delay */
/* *INDENT-OFF* */
		static struct e1000_option opt = {
			.type = range_option,
			.name = "Receive Interrupt Delay",
			.err  = "using default of "
				__MODULE_STRING(DEFAULT_RDTR),
			.def  = DEFAULT_RDTR,
			.arg  = { .r = { .min = MIN_RXDELAY,
					 .max = MAX_RXDELAY } }
		};
/* *INDENT-ON* */

		if (num_RxIntDelay > bd) {
			adapter->rx_int_delay = RxIntDelay[bd];
			e1000_validate_option(&adapter->rx_int_delay, &opt,
					      adapter);
		} else {
			adapter->rx_int_delay = opt.def;
		}
	}
	{			/* Receive Absolute Interrupt Delay */
/* *INDENT-OFF* */
		static const struct e1000_option opt = {
			.type = range_option,
			.name = "Receive Absolute Interrupt Delay",
			.err  = "using default of "
				__MODULE_STRING(DEFAULT_RADV),
			.def  = DEFAULT_RADV,
			.arg  = { .r = { .min = MIN_RXABSDELAY,
					 .max = MAX_RXABSDELAY } }
		};
/* *INDENT-ON* */

		if (num_RxAbsIntDelay > bd) {
			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
					      adapter);
		} else {
			adapter->rx_abs_int_delay = opt.def;
		}
	}
	{			/* Interrupt Throttling Rate */
/* *INDENT-OFF* */
		static const struct e1000_option opt = {
			.type = range_option,
			.name = "Interrupt Throttling Rate (ints/sec)",
			.err  = "using default of "
				__MODULE_STRING(DEFAULT_ITR),
			.def  = DEFAULT_ITR,
			.arg  = { .r = { .min = MIN_ITR,
					 .max = MAX_ITR } }
		};
/* *INDENT-ON* */

		if (num_InterruptThrottleRate > bd) {
			adapter->itr = InterruptThrottleRate[bd];
			switch (adapter->itr) {
			case 0:
				e_info("%s turned off\n", opt.name);
				break;
			case 1:
				e_info("%s set to dynamic mode\n", opt.name);
				adapter->itr_setting = adapter->itr;
				adapter->itr = 20000;
				break;
			case 3:
				e_info("%s set to dynamic conservative mode\n",
				       opt.name);
				adapter->itr_setting = adapter->itr;
				adapter->itr = 20000;
				break;
			case 4:
				e_info("%s set to simplified (2000-8000 ints) mode\n",
				     opt.name);
				adapter->itr_setting = 4;
				break;
			default:
				/*
				 * Save the setting, because the dynamic bits
				 * change itr.
				 */
				if (e1000_validate_option(&adapter->itr, &opt,
							  adapter) &&
				    (adapter->itr == 3)) {
					/*
					 * In case of invalid user value,
					 * default to conservative mode.
					 */
					adapter->itr_setting = adapter->itr;
					adapter->itr = 20000;
				} else {
					/*
					 * Clear the lower two bits because
					 * they are used as control.
					 */
					adapter->itr_setting =
					    adapter->itr & ~3;
				}
				break;
			}
		} else {
			adapter->itr_setting = opt.def;
			adapter->itr = 20000;
		}
	}
	{			/* Interrupt Mode */
		static struct e1000_option opt = {
			.type = range_option,
			.name = "Interrupt Mode",
#ifndef CONFIG_PCI_MSI
/* *INDENT-OFF* */
			.err  = "defaulting to 0 (legacy)",
			.def  = E1000E_INT_MODE_LEGACY,
			.arg  = { .r = { .min = 0,
					 .max = 0 } }
/* *INDENT-ON* */
#endif
		};

#ifdef CONFIG_PCI_MSI
		if (adapter->flags & FLAG_HAS_MSIX) {
			opt.err = kstrdup("defaulting to 2 (MSI-X)",
					  GFP_KERNEL);
			opt.def = E1000E_INT_MODE_MSIX;
			opt.arg.r.max = E1000E_INT_MODE_MSIX;
		} else {
			opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL);
			opt.def = E1000E_INT_MODE_MSI;
			opt.arg.r.max = E1000E_INT_MODE_MSI;
		}

		if (!opt.err) {
			dev_err(pci_dev_to_dev(adapter->pdev),
				"Failed to allocate memory\n");
			return;
		}
#endif

		if (num_IntMode > bd) {
			unsigned int int_mode = IntMode[bd];
			e1000_validate_option(&int_mode, &opt, adapter);
			adapter->int_mode = int_mode;
		} else {
			adapter->int_mode = opt.def;
		}

#ifdef CONFIG_PCI_MSI
		kfree(opt.err);
#endif
	}
	{			/* Smart Power Down */
		static const struct e1000_option opt = {
			.type = enable_option,
			.name = "PHY Smart Power Down",
			.err = "defaulting to Disabled",
			.def = OPTION_DISABLED
		};

		if (num_SmartPowerDownEnable > bd) {
			unsigned int spd = SmartPowerDownEnable[bd];
			e1000_validate_option(&spd, &opt, adapter);
			if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
			    && spd)
				adapter->flags |= FLAG_SMART_POWER_DOWN;
		}
	}
	{			/* CRC Stripping */
		static const struct e1000_option opt = {
			.type = enable_option,
			.name = "CRC Stripping",
			.err = "defaulting to Enabled",
			.def = OPTION_ENABLED
		};

		if (num_CrcStripping > bd) {
			unsigned int crc_stripping = CrcStripping[bd];
			e1000_validate_option(&crc_stripping, &opt, adapter);
			if (crc_stripping == OPTION_ENABLED) {
				adapter->flags2 |= FLAG2_CRC_STRIPPING;
				adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
			}
		} else {
			adapter->flags2 |= FLAG2_CRC_STRIPPING;
			adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
		}
	}
	{			/* Kumeran Lock Loss Workaround */
		static const struct e1000_option opt = {
			.type = enable_option,
			.name = "Kumeran Lock Loss Workaround",
			.err = "defaulting to Enabled",
			.def = OPTION_ENABLED
		};

		if (num_KumeranLockLoss > bd) {
			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
			if (hw->mac.type == e1000_ich8lan)
				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
									     kmrn_lock_loss);
		} else {
			if (hw->mac.type == e1000_ich8lan)
				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
									     opt.
									     def);
		}
	}
	{			/* EEE for parts supporting the feature */
		static const struct e1000_option opt = {
			.type = enable_option,
			.name = "EEE Support",
			.err = "defaulting to Enabled",
			.def = OPTION_ENABLED
		};

		if (adapter->flags2 & FLAG2_HAS_EEE) {
			/* Currently only supported on 82579 */
			if (num_EEE > bd) {
				unsigned int eee = EEE[bd];
				e1000_validate_option(&eee, &opt, adapter);
				hw->dev_spec.ich8lan.eee_disable = !eee;
			} else {
				hw->dev_spec.ich8lan.eee_disable = !opt.def;
			}
		}
	}
	{			/* configure node specific allocation */
/* *INDENT-OFF* */
		static struct e1000_option opt = {
			.type = range_option,
			.name = "Node used to allocate memory",
			.err  = "defaulting to -1 (disabled)",
#ifdef HAVE_EARLY_VMALLOC_NODE
			.def  = 0,
#else
			.def  = -1,
#endif
			.arg  = { .r = { .min = 0,
					 .max = MAX_NUMNODES - 1 } }
		};
/* *INDENT-ON* */
		int node = opt.def;

		/* if the default was zero then we need to set the
		 * default value to an online node, which is not
		 * necessarily zero, and the constant initializer
		 * above can't take first_online_node */
		if (node == 0)
			/* must set opt.def for validate */
			opt.def = node = first_online_node;

		if (num_Node > bd) {
			node = Node[bd];
			e1000_validate_option((unsigned int *)&node, &opt,
					      adapter);
			if (node != OPTION_UNSET)
				e_info("node used for allocation: %d\n", node);
		}

		/* check sanity of the value */
		if ((node != -1) && !node_online(node)) {
			e_info("ignoring node set to invalid value %d\n", node);
			node = opt.def;
		}

		adapter->node = node;
	}
}
コード例 #12
0
ファイル: pata_cs5520.c プロジェクト: cilynx/dd-wrt
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= 0x1f,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_printk(KERN_WARNING, &pdev->dev,
			   "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_bars(pdev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8);
	iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1);
	iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8);
	iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_std_ports(ioaddr);

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_std_ports(ioaddr);

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		if (i == 0)
			host->irq = irq[0];
		else
			host->irq2 = irq[1];
	}

	return ata_host_register(host, &cs5520_sht);
}

/**
 *	cs5520_remove_one	-	device unload
 *	@pdev: PCI device being removed
 *
 *	Handle an unplug/unload event for a PCI device. Unload the
 *	PCI driver but do not use the default handler as we manage
 *	resources ourself and *MUST NOT* disable the device as it has
 *	other functions.
 */

static void __devexit cs5520_remove_one(struct pci_dev *pdev)
{
	struct device *dev = pci_dev_to_dev(pdev);
	struct ata_host *host = dev_get_drvdata(dev);

	ata_host_detach(host);
}

#ifdef CONFIG_PM
/**
 *	cs5520_reinit_one	-	device resume
 *	@pdev: PCI device
 *
 *	Do any reconfiguration work needed by a resume from RAM. We need
 *	to restore DMA mode support on BIOSen which disabled it
 */

static int cs5520_reinit_one(struct pci_dev *pdev)
{
	u8 pcicfg;
	pci_read_config_byte(pdev, 0x60, &pcicfg);
	if ((pcicfg & 0x40) == 0)
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	return ata_pci_device_resume(pdev);
}
コード例 #13
0
ファイル: ixgbe_sysfs.c プロジェクト: AlexanderKlishin/ixgbe
/* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{
	int rc = 0;
#ifdef IXGBE_HWMON
	struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
	unsigned int i;
	int n_attrs;

#endif /* IXGBE_HWMON */
	if (adapter == NULL)
		goto err;

#ifdef IXGBE_HWMON
	/* If this method isn't defined we don't support thermals */
	if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) {
		goto no_thermal;
	}

	/* Don't create thermal hwmon interface if no sensors present */
	if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
		goto no_thermal;

	/*
	 * Allocation space for max attributs
	 * max num sensors * values (loc, temp, max, caution)
	 */
	n_attrs = IXGBE_MAX_SENSORS * 4;
	ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
					  GFP_KERNEL);
	if (!ixgbe_hwmon->hwmon_list) {
		rc = -ENOMEM;
		goto err;
	}

	ixgbe_hwmon->device = hwmon_device_register(pci_dev_to_dev(adapter->pdev));
	if (IS_ERR(ixgbe_hwmon->device)) {
		rc = PTR_ERR(ixgbe_hwmon->device);
		goto err;
	}

	for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
		/*
		 * Only create hwmon sysfs entries for sensors that have
		 * meaningful data for.
		 */
		if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
			continue;

		/* Bail if any hwmon attr struct fails to initialize */
		rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
		if (rc)
			goto err;
	}

no_thermal:
#endif /* IXGBE_HWMON */
	goto exit;

err:
	ixgbe_sysfs_del_adapter(adapter);
exit:
	return rc;
}
コード例 #14
0
ファイル: ixgbe_fcoe.c プロジェクト: GreggIndustries/PF_RING
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl || !sgc)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid > netdev->fcoe_ddp_xid) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
			xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}