/**
 *  i40e_clean_arq_element
 *  @hw: pointer to the hw struct
 *  @e: event info from the receive descriptor, includes any buffers
 *  @pending: number of events that could be left to process
 *
 *  This function cleans one Admin Receive Queue element and returns
 *  the contents through e.  It can also return how many events are
 *  left to process through 'pending'
 **/
enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
					     struct i40e_arq_event_info *e,
					     u16 *pending)
{
	enum i40e_status_code ret_code = I40E_SUCCESS;
	u16 ntc = hw->aq.arq.next_to_clean;
	struct i40e_aq_desc *desc;
	struct i40e_dma_mem *bi;
	u16 desc_idx;
	u16 datalen;
	u16 flags;
	u16 ntu;

	/* pre-clean the event info */
	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);

	/* take the lock before we start messing with the ring */
	i40e_acquire_spinlock(&hw->aq.arq_spinlock);

	if (hw->aq.arq.count == 0) {
		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
			   "AQRX: Admin queue not initialized.\n");
		ret_code = I40E_ERR_QUEUE_EMPTY;
		goto clean_arq_element_err;
	}

	/* set next_to_use to head */
#ifdef PF_DRIVER
#ifdef INTEGRATED_VF
	if (!i40e_is_vf(hw))
		ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
#else
	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
#endif /* INTEGRATED_VF */
#endif /* PF_DRIVER */
#ifdef VF_DRIVER
#ifdef INTEGRATED_VF
	if (i40e_is_vf(hw))
		ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
#else
	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
#endif /* INTEGRATED_VF */
#endif /* VF_DRIVER */
	if (ntu == ntc) {
		/* nothing to do - shouldn't need to update ring's values */
		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
		goto clean_arq_element_out;
	}

	/* now clean the next descriptor */
	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
	desc_idx = ntc;

	flags = LE16_TO_CPU(desc->flags);
	if (flags & I40E_AQ_FLAG_ERR) {
		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
		hw->aq.arq_last_status =
			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQRX: Event received with error 0x%X.\n",
			   hw->aq.arq_last_status);
	}

	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
		    I40E_DMA_TO_NONDMA);
	datalen = LE16_TO_CPU(desc->datalen);
	e->msg_len = min(datalen, e->buf_len);
	if (e->msg_buf != NULL && (e->msg_len != 0))
		i40e_memcpy(e->msg_buf,
			    hw->aq.arq.r.arq_bi[desc_idx].va,
			    e->msg_len, I40E_DMA_TO_NONDMA);

	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
		      hw->aq.arq_buf_size);

	/* Restore the original datalen and buffer address in the desc,
	 * FW updates datalen to indicate the event message
	 * size
	 */
	bi = &hw->aq.arq.r.arq_bi[ntc];
	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);

	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
	desc->datalen = CPU_TO_LE16((u16)bi->size);
	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));

	/* set tail = the last cleaned desc index. */
	wr32(hw, hw->aq.arq.tail, ntc);
	/* ntc is updated to tail + 1 */
	ntc++;
	if (ntc == hw->aq.num_arq_entries)
		ntc = 0;
	hw->aq.arq.next_to_clean = ntc;
	hw->aq.arq.next_to_use = ntu;

#ifdef PF_DRIVER
	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
#endif /* PF_DRIVER */
clean_arq_element_out:
	/* Set pending if needed, unlock and return */
	if (pending != NULL)
		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
	i40e_release_spinlock(&hw->aq.arq_spinlock);

	return ret_code;
}
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
	struct e1000_nvm_info *nvm = &hw->nvm;
	struct e1000_mac_info *mac = &hw->mac;
	struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
	u32 eecd;
	s32 ret_val;
	u16 size;
	u32 ctrl_ext = 0;

	switch (hw->device_id) {
	case E1000_DEV_ID_82575EB_COPPER:
	case E1000_DEV_ID_82575EB_FIBER_SERDES:
	case E1000_DEV_ID_82575GB_QUAD_COPPER:
		mac->type = e1000_82575;
		break;
	case E1000_DEV_ID_82576:
	case E1000_DEV_ID_82576_NS:
	case E1000_DEV_ID_82576_NS_SERDES:
	case E1000_DEV_ID_82576_FIBER:
	case E1000_DEV_ID_82576_SERDES:
	case E1000_DEV_ID_82576_QUAD_COPPER:
	case E1000_DEV_ID_82576_SERDES_QUAD:
		mac->type = e1000_82576;
		break;
	case E1000_DEV_ID_82580_COPPER:
	case E1000_DEV_ID_82580_FIBER:
	case E1000_DEV_ID_82580_SERDES:
	case E1000_DEV_ID_82580_SGMII:
	case E1000_DEV_ID_82580_COPPER_DUAL:
		mac->type = e1000_82580;
		break;
	default:
		return -E1000_ERR_MAC_INIT;
		break;
	}

	/* Set media type */
	/*
	 * The 82575 uses bits 22:23 for link mode. The mode can be changed
	 * based on the EEPROM. We cannot rely upon device ID. There
	 * is no distinguishable difference between fiber and internal
	 * SerDes mode on the 82575. There can be an external PHY attached
	 * on the SGMII interface. For this, we'll set sgmii_active to true.
	 */
	phy->media_type = e1000_media_type_copper;
	dev_spec->sgmii_active = false;

	ctrl_ext = rd32(E1000_CTRL_EXT);
	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
	case E1000_CTRL_EXT_LINK_MODE_SGMII:
		dev_spec->sgmii_active = true;
		ctrl_ext |= E1000_CTRL_I2C_ENA;
		break;
	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
		hw->phy.media_type = e1000_media_type_internal_serdes;
		ctrl_ext |= E1000_CTRL_I2C_ENA;
		break;
	default:
		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
		break;
	}

	wr32(E1000_CTRL_EXT, ctrl_ext);

	/*
	 * if using i2c make certain the MDICNFG register is cleared to prevent
	 * communications from being misrouted to the mdic registers
	 */
	if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
		wr32(E1000_MDICNFG, 0);

	/* Set mta register count */
	mac->mta_reg_count = 128;
	/* Set rar entry count */
	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
	if (mac->type == e1000_82576)
		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
	if (mac->type == e1000_82580)
		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
	/* reset */
	if (mac->type == e1000_82580)
		mac->ops.reset_hw = igb_reset_hw_82580;
	else
		mac->ops.reset_hw = igb_reset_hw_82575;
	/* Set if part includes ASF firmware */
	mac->asf_firmware_present = true;
	/* Set if manageability features are enabled. */
	mac->arc_subsystem_valid =
		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
			? true : false;

	/* physical interface link setup */
	mac->ops.setup_physical_interface =
		(hw->phy.media_type == e1000_media_type_copper)
			? igb_setup_copper_link_82575
			: igb_setup_serdes_link_82575;

	/* NVM initialization */
	eecd = rd32(E1000_EECD);

	nvm->opcode_bits        = 8;
	nvm->delay_usec         = 1;
	switch (nvm->override) {
	case e1000_nvm_override_spi_large:
		nvm->page_size    = 32;
		nvm->address_bits = 16;
		break;
	case e1000_nvm_override_spi_small:
		nvm->page_size    = 8;
		nvm->address_bits = 8;
		break;
	default:
		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
		break;
	}

	nvm->type = e1000_nvm_eeprom_spi;

	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
		     E1000_EECD_SIZE_EX_SHIFT);

	/*
	 * Added to a constant, "size" becomes the left-shift value
	 * for setting word_size.
	 */
	size += NVM_WORD_SIZE_BASE_SHIFT;

	/* EEPROM access above 16k is unsupported */
	if (size > 14)
		size = 14;
	nvm->word_size = 1 << size;

	/* if 82576 then initialize mailbox parameters */
	if (mac->type == e1000_82576)
		igb_init_mbx_params_pf(hw);

	/* setup PHY parameters */
	if (phy->media_type != e1000_media_type_copper) {
		phy->type = e1000_phy_none;
		return 0;
	}

	phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
	phy->reset_delay_us      = 100;

	/* PHY function pointers */
	if (igb_sgmii_active_82575(hw)) {
		phy->ops.reset              = igb_phy_hw_reset_sgmii_82575;
		phy->ops.read_reg           = igb_read_phy_reg_sgmii_82575;
		phy->ops.write_reg          = igb_write_phy_reg_sgmii_82575;
	} else if (hw->mac.type == e1000_82580) {
		phy->ops.reset              = igb_phy_hw_reset;
		phy->ops.read_reg           = igb_read_phy_reg_82580;
		phy->ops.write_reg          = igb_write_phy_reg_82580;
	} else {
		phy->ops.reset              = igb_phy_hw_reset;
		phy->ops.read_reg           = igb_read_phy_reg_igp;
		phy->ops.write_reg          = igb_write_phy_reg_igp;
	}

	/* set lan id */
	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
	               E1000_STATUS_FUNC_SHIFT;

	/* Set phy->phy_addr and phy->id. */
	ret_val = igb_get_phy_id_82575(hw);
	if (ret_val)
		return ret_val;

	/* Verify phy id and set remaining function pointers */
	switch (phy->id) {
	case M88E1111_I_PHY_ID:
		phy->type                   = e1000_phy_m88;
		phy->ops.get_phy_info       = igb_get_phy_info_m88;
		phy->ops.get_cable_length   = igb_get_cable_length_m88;
		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
		break;
	case IGP03E1000_E_PHY_ID:
		phy->type                   = e1000_phy_igp_3;
		phy->ops.get_phy_info       = igb_get_phy_info_igp;
		phy->ops.get_cable_length   = igb_get_cable_length_igp_2;
		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
		phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82575;
		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state;
		break;
	case I82580_I_PHY_ID:
		phy->type                   = e1000_phy_82580;
		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
		phy->ops.get_cable_length   = igb_get_cable_length_82580;
		phy->ops.get_phy_info       = igb_get_phy_info_82580;
		break;
	default:
		return -E1000_ERR_PHY;
	}

	return 0;
}
Exemple #3
0
void rlc_shutdown(struct pci_dev *dev)
{
	wr32(dev, 0, RLC_CTL);
}
/**
 *  igb_setup_serdes_link_82575 - Setup link for serdes
 *  @hw: pointer to the HW structure
 *
 *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
 *  used on copper connections where the serialized gigabit media independent
 *  interface (sgmii), or serdes fiber is being used.  Configures the link
 *  for auto-negotiation or forces speed/duplex.
 **/
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
	u32 ctrl_ext, ctrl_reg, reg;
	bool pcs_autoneg;

	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
	    !igb_sgmii_active_82575(hw))
		return 0;

	/*
	 * On the 82575, SerDes loopback mode persists until it is
	 * explicitly turned off or a power cycle is performed.  A read to
	 * the register does not indicate its status.  Therefore, we ensure
	 * loopback mode is disabled during initialization.
	 */
	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);

	/* power on the sfp cage if present */
	ctrl_ext = rd32(E1000_CTRL_EXT);
	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
	wr32(E1000_CTRL_EXT, ctrl_ext);

	ctrl_reg = rd32(E1000_CTRL);
	ctrl_reg |= E1000_CTRL_SLU;

	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
		/* set both sw defined pins */
		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;

		/* Set switch control to serdes energy detect */
		reg = rd32(E1000_CONNSW);
		reg |= E1000_CONNSW_ENRGSRC;
		wr32(E1000_CONNSW, reg);
	}

	reg = rd32(E1000_PCS_LCTL);

	/* default pcs_autoneg to the same setting as mac autoneg */
	pcs_autoneg = hw->mac.autoneg;

	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
	case E1000_CTRL_EXT_LINK_MODE_SGMII:
		/* sgmii mode lets the phy handle forcing speed/duplex */
		pcs_autoneg = true;
		/* autoneg time out should be disabled for SGMII mode */
		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
		break;
	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
		/* disable PCS autoneg and support parallel detect only */
		pcs_autoneg = false;
	default:
		/*
		 * non-SGMII modes only supports a speed of 1000/Full for the
		 * link so it is best to just force the MAC and let the pcs
		 * link either autoneg or be forced to 1000/Full
		 */
		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
		            E1000_CTRL_FD | E1000_CTRL_FRCDPX;

		/* set speed of 1000/Full if speed/duplex is forced */
		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
		break;
	}

	wr32(E1000_CTRL, ctrl_reg);

	/*
	 * New SerDes mode allows for forcing speed or autonegotiating speed
	 * at 1gb. Autoneg should be default set by most drivers. This is the
	 * mode that will be compatible with older link partners and switches.
	 * However, both are supported by the hardware and some drivers/tools.
	 */
	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);

	/*
	 * We force flow control to prevent the CTRL register values from being
	 * overwritten by the autonegotiated flow control values
	 */
	reg |= E1000_PCS_LCTL_FORCE_FCTRL;

	if (pcs_autoneg) {
		/* Set PCS register for autoneg */
		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
	} else {
		/* Set PCS register for forced link */
		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */

		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
	}

	wr32(E1000_PCS_LCTL, reg);

	if (!igb_sgmii_active_82575(hw))
		igb_force_mac_fc(hw);

	return 0;
}
/**
 *  igb_reset_hw_82580 - Reset hardware
 *  @hw: pointer to the HW structure
 *
 *  This resets function or entire device (all ports, etc.)
 *  to a known state.
 **/
static s32 igb_reset_hw_82580(struct e1000_hw *hw)
{
	s32 ret_val = 0;
	/* BH SW mailbox bit in SW_FW_SYNC */
	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
	u32 ctrl, icr;
	bool global_device_reset = hw->dev_spec._82575.global_device_reset;


	hw->dev_spec._82575.global_device_reset = false;

	/* Get current control state. */
	ctrl = rd32(E1000_CTRL);

	/*
	 * Prevent the PCI-E bus from sticking if there is no TLP connection
	 * on the last TLP read/write transaction when MAC is reset.
	 */
	ret_val = igb_disable_pcie_master(hw);
	if (ret_val)
		hw_dbg("PCI-E Master disable polling has failed.\n");

	hw_dbg("Masking off all interrupts\n");
	wr32(E1000_IMC, 0xffffffff);
	wr32(E1000_RCTL, 0);
	wr32(E1000_TCTL, E1000_TCTL_PSP);
	wrfl();

	msleep(10);

	/* Determine whether or not a global dev reset is requested */
	if (global_device_reset &&
		igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
			global_device_reset = false;

	if (global_device_reset &&
		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
		ctrl |= E1000_CTRL_DEV_RST;
	else
		ctrl |= E1000_CTRL_RST;

	wr32(E1000_CTRL, ctrl);

	/* Add delay to insure DEV_RST has time to complete */
	if (global_device_reset)
		msleep(5);

	ret_val = igb_get_auto_rd_done(hw);
	if (ret_val) {
		/*
		 * When auto config read does not complete, do not
		 * return with an error. This can happen in situations
		 * where there is no eeprom and prevents getting link.
		 */
		hw_dbg("Auto Read Done did not complete\n");
	}

	/* If EEPROM is not present, run manual init scripts */
	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
		igb_reset_init_script_82575(hw);

	/* clear global device reset status bit */
	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);

	/* Clear any pending interrupt events. */
	wr32(E1000_IMC, 0xffffffff);
	icr = rd32(E1000_ICR);

	/* Install any alternate MAC address into RAR0 */
	ret_val = igb_check_alt_mac_addr(hw);

	/* Release semaphore */
	if (global_device_reset)
		igb_release_swfw_sync_82575(hw, swmbsw_mask);

	return ret_val;
}
void mddi_init(void)
{
    unsigned n;

//    dprintf("mddi_init()\n");

    rev_pkt_buf = alloc(256);
    
    mddi_do_cmd(CMD_RESET);

        /* disable periodic rev encap */
    mddi_do_cmd(CMD_PERIODIC_REV_ENC | 0);

    writel(0x0001, MDDI_VERSION);
    writel(0x3C00, MDDI_BPS);
    writel(0x0003, MDDI_SPM);

    writel(0x0005, MDDI_TA1_LEN);
    writel(0x000C, MDDI_TA2_LEN);
    writel(0x0096, MDDI_DRIVE_HI);
    writel(0x0050, MDDI_DRIVE_LO);
    writel(0x003C, MDDI_DISP_WAKE);
    writel(0x0002, MDDI_REV_RATE_DIV);

        /* needs to settle for 5uS */
    if (readl(MDDI_PAD_CTL) == 0) {
        writel(0x08000, MDDI_PAD_CTL);
        udelay(5);
    }

    writel(0xA850F, MDDI_PAD_CTL);
    writel(0x60006, MDDI_DRIVER_START_CNT);

    writel((unsigned) rev_pkt_buf, MDDI_REV_PTR);
    writel(256, MDDI_REV_SIZE);
    writel(256, MDDI_REV_ENCAP_SZ);

    mddi_do_cmd(CMD_FORCE_NEW_REV_PTR);
    
        /* disable hibernate */
    mddi_do_cmd(CMD_HIBERNATE | 0);

    panel_backlight(0);
    
    panel_poweron();
    
    mddi_do_cmd(CMD_LINK_ACTIVE);

    do {
        n = readl(MDDI_STAT);
    } while(!(n & MDDI_STAT_LINK_ACTIVE));

        /* v > 8?  v > 8 && < 0x19 ? */
    writel(2, MDDI_TEST);

//    writel(CMD_PERIODIC_REV_ENC | 0, MDDI_CMD); /* disable */
        
	mddi_get_caps();

#if 0
    writel(0x5666, MDDI_MDP_VID_FMT_DES);
    writel(0x00C3, MDDI_MDP_VID_PIX_ATTR);
    writel(0x0000, MDDI_MDP_CLIENTID);
#endif

    dprintf("panel is %d x %d\n", fb_width, fb_height);

    FB = alloc(2 * fb_width * fb_height);
    mlist = alloc(sizeof(mddi_llentry) * (fb_height / 8));

//    dprintf("FB @ %x  mlist @ %x\n", (unsigned) FB, (unsigned) mlist);
    
    for(n = 0; n < (fb_height / 8); n++) {
        unsigned y = n * 8;
        unsigned pixels = fb_width * 8;
        mddi_video_stream *vs = &(mlist[n].u.v);

        vs->length = sizeof(mddi_video_stream) - 2 + (pixels * 2);
        vs->type = TYPE_VIDEO_STREAM;
        vs->client_id = 0;
        vs->format = 0x5565; // FORMAT_16BPP;
        vs->pixattr = PIXATTR_BOTH_EYES | PIXATTR_TO_ALL;
        
        vs->left = 0;
        vs->right = fb_width - 1;
        vs->top = y;
        vs->bottom = y + 7;
        
        vs->start_x = 0;
        vs->start_y = y;
        
        vs->pixels = pixels;
        vs->crc = 0;
        vs->reserved = 0;
        
        mlist[n].header_count = sizeof(mddi_video_stream) - 2;
        mlist[n].data_count = pixels * 2;
        mlist[n].reserved = 0;
        wr32(&mlist[n].data, ((unsigned) FB) + (y * fb_width * 2));

        mlist[n].flags = 0;
        wr32(&mlist[n].next, (unsigned) (mlist + n + 1));
    }

    mlist[n-1].flags = 1;
    wr32(&mlist[n-1].next, 0);

    writel(CMD_HIBERNATE, MDDI_CMD);
    writel(CMD_LINK_ACTIVE, MDDI_CMD);

    for(n = 0; n < (fb_width * fb_height); n++) FB[n] = 0;

    mddi_start_update();

    panel_backlight(1);
}
Exemple #7
0
/**
 * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
 * @pf: Board private structure
 * @config: hwtstamp settings requested or saved
 *
 * Control hardware registers to enter the specific mode requested by the
 * user. Also used during reset path to ensure that timestamp settings are
 * maintained.
 *
 * Note: modifies config in place, and may update the requested mode to be
 * more broad if the specific filter is not directly supported.
 **/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
				       struct hwtstamp_config *config)
{
	struct i40e_hw *hw = &pf->hw;
	u32 tsyntype, regval;

	/* Reserved for future extensions. */
	if (config->flags)
		return -EINVAL;

	switch (config->tx_type) {
	case HWTSTAMP_TX_OFF:
		pf->ptp_tx = false;
		break;
	case HWTSTAMP_TX_ON:
		pf->ptp_tx = true;
		break;
	default:
		return -ERANGE;
	}

	switch (config->rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		pf->ptp_rx = false;
		/* We set the type to V1, but do not enable UDP packet
		 * recognition. In this way, we should be as close to
		 * disabling PTP Rx timestamps as possible since V1 packets
		 * are always UDP, since L2 packets are a V2 feature.
		 */
		tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
		pf->ptp_rx = true;
		tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
			   I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
		config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
		break;
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
		pf->ptp_rx = true;
		tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
			   I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
		break;
	case HWTSTAMP_FILTER_ALL:
	default:
		return -ERANGE;
	}

	/* Clear out all 1588-related registers to clear and unlatch them. */
	rd32(hw, I40E_PRTTSYN_STAT_0);
	rd32(hw, I40E_PRTTSYN_TXTIME_H);
	rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
	rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
	rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
	rd32(hw, I40E_PRTTSYN_RXTIME_H(3));

	/* Enable/disable the Tx timestamp interrupt based on user input. */
	regval = rd32(hw, I40E_PRTTSYN_CTL0);
	if (pf->ptp_tx)
		regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
	else
		regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
	wr32(hw, I40E_PRTTSYN_CTL0, regval);

	regval = rd32(hw, I40E_PFINT_ICR0_ENA);
	if (pf->ptp_tx)
		regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
	else
		regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
	wr32(hw, I40E_PFINT_ICR0_ENA, regval);

	/* Although there is no simple on/off switch for Rx, we "disable" Rx
	 * timestamps by setting to V1 only mode and clear the UDP
	 * recognition. This ought to disable all PTP Rx timestamps as V1
	 * packets are always over UDP. Note that software is configured to
	 * ignore Rx timestamps via the pf->ptp_rx flag.
	 */
	regval = rd32(hw, I40E_PRTTSYN_CTL1);
	/* clear everything but the enable bit */
	regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
	/* now enable bits for desired Rx timestamps */
	regval |= tsyntype;
	wr32(hw, I40E_PRTTSYN_CTL1, regval);

	return 0;
}
Exemple #8
0
static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                                       struct ptp_clock_request *rq, int on)
{
    struct igb_adapter *igb =
        container_of(ptp, struct igb_adapter, ptp_caps);
    struct e1000_hw *hw = &igb->hw;
    u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
    unsigned long flags;
    struct timespec ts;
    int pin = -1;
    s64 ns;

    switch (rq->type) {
    case PTP_CLK_REQ_EXTTS:
        if (on) {
            pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
                               rq->extts.index);
            if (pin < 0)
                return -EBUSY;
        }
        if (rq->extts.index == 1) {
            tsauxc_mask = TSAUXC_EN_TS1;
            tsim_mask = TSINTR_AUTT1;
        } else {
            tsauxc_mask = TSAUXC_EN_TS0;
            tsim_mask = TSINTR_AUTT0;
        }
        spin_lock_irqsave(&igb->tmreg_lock, flags);
        tsauxc = rd32(E1000_TSAUXC);
        tsim = rd32(E1000_TSIM);
        if (on) {
            igb_pin_extts(igb, rq->extts.index, pin);
            tsauxc |= tsauxc_mask;
            tsim |= tsim_mask;
        } else {
            tsauxc &= ~tsauxc_mask;
            tsim &= ~tsim_mask;
        }
        wr32(E1000_TSAUXC, tsauxc);
        wr32(E1000_TSIM, tsim);
        spin_unlock_irqrestore(&igb->tmreg_lock, flags);
        return 0;

    case PTP_CLK_REQ_PEROUT:
        if (on) {
            pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
                               rq->perout.index);
            if (pin < 0)
                return -EBUSY;
        }
        ts.tv_sec = rq->perout.period.sec;
        ts.tv_nsec = rq->perout.period.nsec;
        ns = timespec_to_ns(&ts);
        ns = ns >> 1;
        if (on && ns < 500000LL) {
            /* 2k interrupts per second is an awful lot. */
            return -EINVAL;
        }
        ts = ns_to_timespec(ns);
        if (rq->perout.index == 1) {
            tsauxc_mask = TSAUXC_EN_TT1;
            tsim_mask = TSINTR_TT1;
            trgttiml = E1000_TRGTTIML1;
            trgttimh = E1000_TRGTTIMH1;
        } else {
            tsauxc_mask = TSAUXC_EN_TT0;
            tsim_mask = TSINTR_TT0;
            trgttiml = E1000_TRGTTIML0;
            trgttimh = E1000_TRGTTIMH0;
        }
        spin_lock_irqsave(&igb->tmreg_lock, flags);
        tsauxc = rd32(E1000_TSAUXC);
        tsim = rd32(E1000_TSIM);
        if (on) {
            int i = rq->perout.index;

            igb_pin_perout(igb, i, pin);
            igb->perout[i].start.tv_sec = rq->perout.start.sec;
            igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
            igb->perout[i].period.tv_sec = ts.tv_sec;
            igb->perout[i].period.tv_nsec = ts.tv_nsec;
            wr32(trgttimh, rq->perout.start.sec);
            wr32(trgttiml, rq->perout.start.nsec);
            tsauxc |= tsauxc_mask;
            tsim |= tsim_mask;
        } else {
            tsauxc &= ~tsauxc_mask;
            tsim &= ~tsim_mask;
        }
        wr32(E1000_TSAUXC, tsauxc);
        wr32(E1000_TSIM, tsim);
        spin_unlock_irqrestore(&igb->tmreg_lock, flags);
        return 0;

    case PTP_CLK_REQ_PPS:
        spin_lock_irqsave(&igb->tmreg_lock, flags);
        tsim = rd32(E1000_TSIM);
        if (on)
            tsim |= TSINTR_SYS_WRAP;
        else
            tsim &= ~TSINTR_SYS_WRAP;
        wr32(E1000_TSIM, tsim);
        spin_unlock_irqrestore(&igb->tmreg_lock, flags);
        return 0;
    }

    return -EOPNOTSUPP;
}
Exemple #9
0
void igb_ptp_init(struct igb_adapter *adapter)
{
    struct e1000_hw *hw = &adapter->hw;
    struct net_device *netdev = adapter->netdev;
    int i;

    switch (hw->mac.type) {
    case e1000_82576:
        snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
        adapter->ptp_caps.owner = THIS_MODULE;
        adapter->ptp_caps.max_adj = 999999881;
        adapter->ptp_caps.n_ext_ts = 0;
        adapter->ptp_caps.pps = 0;
        adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
        adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
        adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
        adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
        adapter->ptp_caps.enable = igb_ptp_feature_enable;
        adapter->cc.read = igb_ptp_read_82576;
        adapter->cc.mask = CYCLECOUNTER_MASK(64);
        adapter->cc.mult = 1;
        adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
        /* Dial the nominal frequency. */
        wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
        break;
    case e1000_82580:
    case e1000_i354:
    case e1000_i350:
        snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
        adapter->ptp_caps.owner = THIS_MODULE;
        adapter->ptp_caps.max_adj = 62499999;
        adapter->ptp_caps.n_ext_ts = 0;
        adapter->ptp_caps.pps = 0;
        adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
        adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
        adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
        adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
        adapter->ptp_caps.enable = igb_ptp_feature_enable;
        adapter->cc.read = igb_ptp_read_82580;
        adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
        adapter->cc.mult = 1;
        adapter->cc.shift = 0;
        /* Enable the timer functions by clearing bit 31. */
        wr32(E1000_TSAUXC, 0x0);
        break;
    case e1000_i210:
    case e1000_i211:
        for (i = 0; i < IGB_N_SDP; i++) {
            struct ptp_pin_desc *ppd = &adapter->sdp_config[i];

            snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
            ppd->index = i;
            ppd->func = PTP_PF_NONE;
        }
        snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
        adapter->ptp_caps.owner = THIS_MODULE;
        adapter->ptp_caps.max_adj = 62499999;
        adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
        adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
        adapter->ptp_caps.n_pins = IGB_N_SDP;
        adapter->ptp_caps.pps = 1;
        adapter->ptp_caps.pin_config = adapter->sdp_config;
        adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
        adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
        adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
        adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
        adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
        adapter->ptp_caps.verify = igb_ptp_verify_pin;
        /* Enable the timer functions by clearing bit 31. */
        wr32(E1000_TSAUXC, 0x0);
        break;
    default:
        adapter->ptp_clock = NULL;
        return;
    }

    wrfl();

    spin_lock_init(&adapter->tmreg_lock);
    INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);

    /* Initialize the clock and overflow work for devices that need it. */
    if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
        struct timespec64 ts = ktime_to_timespec64(ktime_get_real());

        igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
    } else {
        timecounter_init(&adapter->tc, &adapter->cc,
                         ktime_to_ns(ktime_get_real()));

        INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
                          igb_ptp_overflow_check);

        schedule_delayed_work(&adapter->ptp_overflow_work,
                              IGB_SYSTIM_OVERFLOW_PERIOD);
    }

    /* Initialize the time sync interrupts for devices that support it. */
    if (hw->mac.type >= e1000_82580) {
        wr32(E1000_TSIM, TSYNC_INTERRUPTS);
        wr32(E1000_IMS, E1000_IMS_TS);
    }

    adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
    adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;

    adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
                                            &adapter->pdev->dev);
    if (IS_ERR(adapter->ptp_clock)) {
        adapter->ptp_clock = NULL;
        dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
    } else {
        dev_info(&adapter->pdev->dev, "added PHC on %s\n",
                 adapter->netdev->name);
        adapter->flags |= IGB_FLAG_PTP;
    }
}
Exemple #10
0
void gpu_lb_pw_dis(struct pci_dev *dev)
{
	se_sh_select(dev, 0xffffffff, 0xffffffff);
	wr32(dev, 0x00ff, SPI_LB_CU_MASK);
}
Exemple #11
0
void gpu_grdbm_int_reset(struct pci_dev *dev)
{
	wr32(dev, 0, GRBM_INT_CTL);
}
Exemple #12
0
/*
 * gpu register defaults which need to be inited only once, most of them
 * are un the cfg space (PA_SC_RASTER_CFG is in the ctx space though)
 */
void gpu_defaults(struct pci_dev *dev, u32 addr_cfg, u32 mem_row_sz_kb)
{
	struct dev_drv_data *dd;
	u32 sx_debug_1;

	dd = pci_get_drvdata(dev);

	wr32(dev, set(GC_RD_TIMEOUT, 0xff), GRBM_CTL);
	wr32(dev, addr_cfg, GB_ADDR_CFG);

	tiling_modes_tbl_init(dev, mem_row_sz_kb);
	rbs_setup(dev);
	spi_setup(dev);

	sx_debug_1 = rr32(dev, SX_DEBUG_1);
	wr32(dev, sx_debug_1, SX_DEBUG_1);

	/* cfg space */
	wr32(dev, set(PSFS_SC_FRONTEND_PRIM_FIFO_SZ,
					dd->cfg.gpu.sc_prim_fifo_sz_frontend)
		| set(PSFS_SC_BACKEND_PRIM_FIFO_SZ,
					dd->cfg.gpu.sc_prim_fifo_sz_backend)
		| set(PSFS_SC_HIZ_TILE_FIFO_SZ, dd->cfg.gpu.sc_hiz_tile_fifo_sz)
		| set(PSFS_SC_EARLYZ_TILE_FIFO_SZ,
					dd->cfg.gpu.sc_earlyz_tile_fifo_sz),
								PA_SC_FIFO_SZ);
	wr32(dev, 1, VGT_INSTS_N);

	wr32(dev, 0, SQ_CFG);

	wr32(dev, set(PSFEMC_FORCE_EOV_MAX_CLK_CNT, 4095)
				| set(PSFEMC_FORCE_EOV_MAX_REZ_CNT, 255),
						PA_SC_FORCE_EOV_MAX_CNTS);


	wr32(dev, set(VCI_CACHE_INVALIDATION, VCI_VC_AND_TC)
				| set(VCI_AUTO_INVLD_ENA, VCI_ES_AND_GS_AUTO),
							VGT_CACHE_INVALIDATION);

	wr32(dev, 16, VGT_GS_VTX_REUSE);
	wr32(dev, 0, PA_SC_LINE_STIPPLE_STATE);

	wr32(dev, 0, CB_PERF_CTR_0_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_0_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_1_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_1_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_2_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_2_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_3_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_3_SEL_1);

	wr32(dev, PCE_CLIP_VTX_REORDER_ENA | set(PCE_CLIP_SEQ_N, 3),
								PA_CL_ENHANCE);

	udelay(50);
}
Exemple #13
0
static void rbs_setup(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 se;
	u32 sh;
	u32 sh_rbs_dis;
	u32 rbs_dis;
	u32 mask;
	u32 rbs_ena;
	u32 pa_sc_raster_cfg;

	dd = pci_get_drvdata(dev);

	/* build the mask of dis rbs from regs */
	rbs_dis = 0;
	for (se = 0; se < dd->cfg.gpu.ses_n; ++se) {
		for (sh = 0; sh <  dd->cfg.gpu.se_shs_n; ++sh) {
			se_sh_select(dev, se, sh);
			sh_rbs_dis = sh_rbs_dis_get(dev);
			rbs_dis |= sh_rbs_dis << ((se * dd->cfg.gpu.se_shs_n
					+ sh) * CRBD_TAHITI_RB_BITMAP_W_PER_SH);
		}
	}

	se_sh_select(dev, 0xffffffff, 0xffffffff);

	/* build the mask of ena rbs from the mask of dis rbs */
	rbs_ena = 0;
	mask = 1;
	for (se = 0; se < dd->cfg.gpu.se_rbs_n * dd->cfg.gpu.ses_n; ++se) {
		if(!(rbs_dis & mask))
			rbs_ena |= mask;
		mask <<= 1;
	}

	/* configure the raster blk for each sh */
	for (se = 0; se < dd->cfg.gpu.ses_n; ++se) {
		pa_sc_raster_cfg = 0;
		for (sh = 0; sh < dd->cfg.gpu.se_shs_n; ++sh) {
			switch (rbs_ena & 3) {
			case 1:
				pa_sc_raster_cfg |= (PSRC_RB_MAP_0
						<< (se * dd->cfg.gpu.se_shs_n
								+ sh) * 2);
				break;
			case 2:
				pa_sc_raster_cfg |= (PSRC_RB_MAP_3
						<< (se * dd->cfg.gpu.se_shs_n
								+ sh) * 2);
				break;
			case 3:
			default:
				pa_sc_raster_cfg |= (PSRC_RB_MAP_2
				<< (se * dd->cfg.gpu.se_shs_n + sh) * 2);
				break;
			}
			rbs_ena >>= 2;/* ena rbs of next sh */
		}
		se_sh_select(dev, se, 0xffffffff);
		dev_info(&dev->dev, "se=%u pa_sc_raster_cfg=0x%08x\n", se,
							pa_sc_raster_cfg);
		wr32(dev, pa_sc_raster_cfg, PA_SC_RASTER_CFG);
	}
	se_sh_select(dev, 0xffffffff, 0xffffffff);
}
Exemple #14
0
void conn_track(packet_t *pkt)
{
    conn_t *c;
    unsigned int h;

    if (!pkt->istcp)
        return ;

    h = hash(pkt->sip, pkt->dip, pkt->sport, pkt->dport);
    c = conn_hash_find(h);

    if (pkt->tcpf == 0x012) {
        // SYN, ACK
        if (!c)
            return ;
        if (!c->h) {
            c->h = h;
            c->ack = pkt->ack;
            c->seq = pkt->seq;
            c->sip = pkt->sip;
            c->dip = pkt->dip;
            c->sport = pkt->sport;
            c->dport = pkt->dport;
            nr_conn++;
            {
                int i;
                char path[256];
                sprintf(path, "cap/%d.cap", tot_conn_cap);
                c->fp = fopen(path, "wb+");
                if (!c->fp) {
                    printf("fopen failed\n");
                    exit(0);
                }
                wr8(c->fp, 1);
                wr8(c->fp, 8);
                wr32(c->fp, c->sport);
                wr32(c->fp, c->dport);
                wr8(c->fp, 0);
                tot_conn_cap++;
            }
        }
        return ;
    }

    if (!c || !c->h)
        return ;

    /*
    int tx, rx;
    if (c->dip == pkt->dip && c->dport == pkt->dport &&
    		c->sip == pkt->sip && c->sport == pkt->sport) {
    	tx = pkt->ack - c->ack;
    	rx = pkt->seq - c->seq;
    } else
    if (c->dip == pkt->sip && c->dport == pkt->sport &&
    	  c->sip == pkt->dip && c->sport == pkt->dport) {
    	tx = pkt->ack - c->seq;
    	rx = pkt->seq - c->ack;
    } else {
    //		printf("hash collision: %u\n", c->h);
    //		exit(0);
    }
    */
    s_io += pkt->len;

    c->totsiz += pkt->len;
    if (c->fp)
        fwrite(pkt->pay, pkt->paylen, 1, c->fp);

    if (pkt->tcpf & 5) {
        // FIN, ACK
        //printf("tcp tx:%d rx:%d tot:%d nr_conn:%d nr_conn_cap:%d\n",
        //		tx, rx, c->totsiz, nr_conn, nr_conn_cap);
        if (c->fp)
            fclose(c->fp);
        nr_conn--;
        conn_hash_free(c);
        s_nrconn++;
    }
}
Exemple #15
0
static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
				       struct ptp_clock_request *rq, int on)
{
	struct igb_adapter *igb =
		container_of(ptp, struct igb_adapter, ptp_caps);
	struct e1000_hw *hw = &igb->hw;
	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
	unsigned long flags;
	struct timespec64 ts;
	int use_freq = 0, pin = -1;
	s64 ns;

	switch (rq->type) {
	case PTP_CLK_REQ_EXTTS:
		if (on) {
			pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
					   rq->extts.index);
			if (pin < 0)
				return -EBUSY;
		}
		if (rq->extts.index == 1) {
			tsauxc_mask = TSAUXC_EN_TS1;
			tsim_mask = TSINTR_AUTT1;
		} else {
			tsauxc_mask = TSAUXC_EN_TS0;
			tsim_mask = TSINTR_AUTT0;
		}
		spin_lock_irqsave(&igb->tmreg_lock, flags);
		tsauxc = rd32(E1000_TSAUXC);
		tsim = rd32(E1000_TSIM);
		if (on) {
			igb_pin_extts(igb, rq->extts.index, pin);
			tsauxc |= tsauxc_mask;
			tsim |= tsim_mask;
		} else {
			tsauxc &= ~tsauxc_mask;
			tsim &= ~tsim_mask;
		}
		wr32(E1000_TSAUXC, tsauxc);
		wr32(E1000_TSIM, tsim);
		spin_unlock_irqrestore(&igb->tmreg_lock, flags);
		return 0;

	case PTP_CLK_REQ_PEROUT:
		if (on) {
			pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
					   rq->perout.index);
			if (pin < 0)
				return -EBUSY;
		}
		ts.tv_sec = rq->perout.period.sec;
		ts.tv_nsec = rq->perout.period.nsec;
		ns = timespec64_to_ns(&ts);
		ns = ns >> 1;
		if (on && ((ns <= 70000000LL) || (ns == 125000000LL) ||
			   (ns == 250000000LL) || (ns == 500000000LL))) {
			if (ns < 8LL)
				return -EINVAL;
			use_freq = 1;
		}
		ts = ns_to_timespec64(ns);
		if (rq->perout.index == 1) {
			if (use_freq) {
				tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
				tsim_mask = 0;
			} else {
				tsauxc_mask = TSAUXC_EN_TT1;
				tsim_mask = TSINTR_TT1;
			}
			trgttiml = E1000_TRGTTIML1;
			trgttimh = E1000_TRGTTIMH1;
			freqout = E1000_FREQOUT1;
		} else {
			if (use_freq) {
				tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
				tsim_mask = 0;
			} else {
				tsauxc_mask = TSAUXC_EN_TT0;
				tsim_mask = TSINTR_TT0;
			}
			trgttiml = E1000_TRGTTIML0;
			trgttimh = E1000_TRGTTIMH0;
			freqout = E1000_FREQOUT0;
		}
		spin_lock_irqsave(&igb->tmreg_lock, flags);
		tsauxc = rd32(E1000_TSAUXC);
		tsim = rd32(E1000_TSIM);
		if (rq->perout.index == 1) {
			tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
			tsim &= ~TSINTR_TT1;
		} else {
			tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
			tsim &= ~TSINTR_TT0;
		}
		if (on) {
			int i = rq->perout.index;
			igb_pin_perout(igb, i, pin, use_freq);
			igb->perout[i].start.tv_sec = rq->perout.start.sec;
			igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
			igb->perout[i].period.tv_sec = ts.tv_sec;
			igb->perout[i].period.tv_nsec = ts.tv_nsec;
			wr32(trgttimh, rq->perout.start.sec);
			wr32(trgttiml, rq->perout.start.nsec);
			if (use_freq)
				wr32(freqout, ns);
			tsauxc |= tsauxc_mask;
			tsim |= tsim_mask;
		}
		wr32(E1000_TSAUXC, tsauxc);
		wr32(E1000_TSIM, tsim);
		spin_unlock_irqrestore(&igb->tmreg_lock, flags);
		return 0;

	case PTP_CLK_REQ_PPS:
		spin_lock_irqsave(&igb->tmreg_lock, flags);
		tsim = rd32(E1000_TSIM);
		if (on)
			tsim |= TSINTR_SYS_WRAP;
		else
			tsim &= ~TSINTR_SYS_WRAP;
		igb->pps_sys_wrap_on = !!on;
		wr32(E1000_TSIM, tsim);
		spin_unlock_irqrestore(&igb->tmreg_lock, flags);
		return 0;
	}

	return -EOPNOTSUPP;
}
static s32 igb_reset_hw_82580(struct e1000_hw *hw)
{
	s32 ret_val = 0;
	
	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
	u32 ctrl, icr;
	bool global_device_reset = hw->dev_spec._82575.global_device_reset;


	hw->dev_spec._82575.global_device_reset = false;

	
	ctrl = rd32(E1000_CTRL);

	ret_val = igb_disable_pcie_master(hw);
	if (ret_val)
		hw_dbg("PCI-E Master disable polling has failed.\n");

	hw_dbg("Masking off all interrupts\n");
	wr32(E1000_IMC, 0xffffffff);
	wr32(E1000_RCTL, 0);
	wr32(E1000_TCTL, E1000_TCTL_PSP);
	wrfl();

	msleep(10);

	
	if (global_device_reset &&
		igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
			global_device_reset = false;

	if (global_device_reset &&
		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
		ctrl |= E1000_CTRL_DEV_RST;
	else
		ctrl |= E1000_CTRL_RST;

	wr32(E1000_CTRL, ctrl);
	wrfl();

	
	if (global_device_reset)
		msleep(5);

	ret_val = igb_get_auto_rd_done(hw);
	if (ret_val) {
		hw_dbg("Auto Read Done did not complete\n");
	}

	
	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
		igb_reset_init_script_82575(hw);

	
	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);

	
	wr32(E1000_IMC, 0xffffffff);
	icr = rd32(E1000_ICR);

	ret_val = igb_reset_mdicnfg_82580(hw);
	if (ret_val)
		hw_dbg("Could not reset MDICNFG based on EEPROM\n");

	
	ret_val = igb_check_alt_mac_addr(hw);

	
	if (global_device_reset)
		igb_release_swfw_sync_82575(hw, swmbsw_mask);

	return ret_val;
}
Exemple #17
0
/**
 * igb_ptp_set_timestamp_mode - setup hardware for timestamping
 * @adapter: networking device structure
 * @config: hwtstamp configuration
 *
 * Outgoing time stamping can be enabled and disabled. Play nice and
 * disable it when requested, although it shouldn't case any overhead
 * when no packet needs it. At most one packet in the queue may be
 * marked for time stamping, otherwise it would be impossible to tell
 * for sure to which packet the hardware time stamp belongs.
 *
 * Incoming time stamping has to be configured via the hardware
 * filters. Not all combinations are supported, in particular event
 * type has to be specified. Matching the kind of event packet is
 * not supported, with the exception of "all V2 events regardless of
 * level 2 or 4".
 */
static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
				      struct hwtstamp_config *config)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
	u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
	u32 tsync_rx_cfg = 0;
	bool is_l4 = false;
	bool is_l2 = false;
	u32 regval;

	/* reserved for future extensions */
	if (config->flags)
		return -EINVAL;

	switch (config->tx_type) {
	case HWTSTAMP_TX_OFF:
		tsync_tx_ctl = 0;
	case HWTSTAMP_TX_ON:
		break;
	default:
		return -ERANGE;
	}

	switch (config->rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		tsync_rx_ctl = 0;
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
		is_l4 = true;
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
		is_l4 = true;
		break;
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
		is_l2 = true;
		is_l4 = true;
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_ALL:
		/* 82576 cannot timestamp all packets, which it needs to do to
		 * support both V1 Sync and Delay_Req messages
		 */
		if (hw->mac.type != e1000_82576) {
			tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
			config->rx_filter = HWTSTAMP_FILTER_ALL;
			break;
		}
		/* fall through */
	default:
		config->rx_filter = HWTSTAMP_FILTER_NONE;
		return -ERANGE;
	}

	if (hw->mac.type == e1000_82575) {
		if (tsync_rx_ctl | tsync_tx_ctl)
			return -EINVAL;
		return 0;
	}

	/* Per-packet timestamping only works if all packets are
	 * timestamped, so enable timestamping in all packets as
	 * long as one Rx filter was configured.
	 */
	if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
		tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
		config->rx_filter = HWTSTAMP_FILTER_ALL;
		is_l2 = true;
		is_l4 = true;

		if ((hw->mac.type == e1000_i210) ||
		    (hw->mac.type == e1000_i211)) {
			regval = rd32(E1000_RXPBS);
			regval |= E1000_RXPBS_CFG_TS_EN;
			wr32(E1000_RXPBS, regval);
		}
	}

	/* enable/disable TX */
	regval = rd32(E1000_TSYNCTXCTL);
	regval &= ~E1000_TSYNCTXCTL_ENABLED;
	regval |= tsync_tx_ctl;
	wr32(E1000_TSYNCTXCTL, regval);

	/* enable/disable RX */
	regval = rd32(E1000_TSYNCRXCTL);
	regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
	regval |= tsync_rx_ctl;
	wr32(E1000_TSYNCRXCTL, regval);

	/* define which PTP packets are time stamped */
	wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);

	/* define ethertype filter for timestamped packets */
	if (is_l2)
		wr32(E1000_ETQF(IGB_ETQF_FILTER_1588),
		     (E1000_ETQF_FILTER_ENABLE | /* enable filter */
		      E1000_ETQF_1588 | /* enable timestamping */
		      ETH_P_1588));     /* 1588 eth protocol type */
	else
		wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0);

	/* L4 Queue Filter[3]: filter by destination port and protocol */
	if (is_l4) {
		u32 ftqf = (IPPROTO_UDP /* UDP */
			| E1000_FTQF_VF_BP /* VF not compared */
			| E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
			| E1000_FTQF_MASK); /* mask all inputs */
		ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */

		wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
		wr32(E1000_IMIREXT(3),
		     (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
		if (hw->mac.type == e1000_82576) {
			/* enable source port check */
			wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
			ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
		}
		wr32(E1000_FTQF(3), ftqf);
	} else {
		wr32(E1000_FTQF(3), E1000_FTQF_MASK);
	}
	wrfl();

	/* clear TX/RX time stamp registers, just to be sure */
	regval = rd32(E1000_TXSTMPL);
	regval = rd32(E1000_TXSTMPH);
	regval = rd32(E1000_RXSTMPL);
	regval = rd32(E1000_RXSTMPH);

	return 0;
}
static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
	s32  ret_val = 0;
	u16 phy_id;
	u32 ctrl_ext;
	u32 mdic;

	if (!(igb_sgmii_active_82575(hw))) {
		phy->addr = 1;
		ret_val = igb_get_phy_id(hw);
		goto out;
	}

	if (igb_sgmii_uses_mdio_82575(hw)) {
		switch (hw->mac.type) {
		case e1000_82575:
		case e1000_82576:
			mdic = rd32(E1000_MDIC);
			mdic &= E1000_MDIC_PHY_MASK;
			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
			break;
		case e1000_82580:
		case e1000_i350:
			mdic = rd32(E1000_MDICNFG);
			mdic &= E1000_MDICNFG_PHY_MASK;
			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
			break;
		default:
			ret_val = -E1000_ERR_PHY;
			goto out;
			break;
		}
		ret_val = igb_get_phy_id(hw);
		goto out;
	}

	
	ctrl_ext = rd32(E1000_CTRL_EXT);
	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
	wrfl();
	msleep(300);

	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
		if (ret_val == 0) {
			hw_dbg("Vendor ID 0x%08X read at address %u\n",
			       phy_id, phy->addr);
			if (phy_id == M88_VENDOR)
				break;
		} else {
			hw_dbg("PHY address %u was unreadable\n", phy->addr);
		}
	}

	
	if (phy->addr == 8) {
		phy->addr = 0;
		ret_val = -E1000_ERR_PHY;
		goto out;
	} else {
		ret_val = igb_get_phy_id(hw);
	}

	
	wr32(E1000_CTRL_EXT, ctrl_ext);

out:
	return ret_val;
}
Exemple #19
0
/**
 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
 * @pf: pointer to the physical function struct
 * @cmd: ethtool rxnfc command
 *
 * Returns Success if the flow input set is supported.
 **/
static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
{
	struct i40e_hw *hw = &pf->hw;
	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);

	/* RSS does not support anything other than hashing
	 * to queues on src and dst IPs and ports
	 */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	/* We need at least the IP SRC and DEST fields for hashing */
	if (!(nfc->data & RXH_IP_SRC) ||
	    !(nfc->data & RXH_IP_DST))
		return -EINVAL;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
			break;
		default:
			return -EINVAL;
		}
		break;
	case TCP_V6_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V4_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &=
			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |=
			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)  |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V6_FLOW:
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			hena &=
			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			hena |=
			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)  |
			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
			break;
		default:
			return -EINVAL;
		}
		break;
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case SCTP_V4_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
		break;
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case SCTP_V6_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
		break;
	case IPV4_FLOW:
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
		break;
	case IPV6_FLOW:
		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
		break;
	default:
		return -EINVAL;
	}

	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
	i40e_flush(hw);

	return 0;
}
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
	u32 ctrl_ext, ctrl_reg, reg;
	bool pcs_autoneg;
	s32 ret_val = E1000_SUCCESS;
	u16 data;

	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
	    !igb_sgmii_active_82575(hw))
		return ret_val;


	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);

	
	ctrl_ext = rd32(E1000_CTRL_EXT);
	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
	wr32(E1000_CTRL_EXT, ctrl_ext);

	ctrl_reg = rd32(E1000_CTRL);
	ctrl_reg |= E1000_CTRL_SLU;

	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
		
		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;

		
		reg = rd32(E1000_CONNSW);
		reg |= E1000_CONNSW_ENRGSRC;
		wr32(E1000_CONNSW, reg);
	}

	reg = rd32(E1000_PCS_LCTL);

	
	pcs_autoneg = hw->mac.autoneg;

	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
	case E1000_CTRL_EXT_LINK_MODE_SGMII:
		
		pcs_autoneg = true;
		
		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
		break;
	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
		
		pcs_autoneg = false;
	default:
		if (hw->mac.type == e1000_82575 ||
		    hw->mac.type == e1000_82576) {
			ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
			if (ret_val) {
				printk(KERN_DEBUG "NVM Read Error\n\n");
				return ret_val;
			}

			if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
				pcs_autoneg = false;
		}

		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
		            E1000_CTRL_FD | E1000_CTRL_FRCDPX;

		
		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
		break;
	}

	wr32(E1000_CTRL, ctrl_reg);

	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);

	/*
	 * We force flow control to prevent the CTRL register values from being
	 * overwritten by the autonegotiated flow control values
	 */
	reg |= E1000_PCS_LCTL_FORCE_FCTRL;

	if (pcs_autoneg) {
		
		reg |= E1000_PCS_LCTL_AN_ENABLE | 
		       E1000_PCS_LCTL_AN_RESTART; 
		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
	} else {
		
		reg |= E1000_PCS_LCTL_FSD;        

		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
	}

	wr32(E1000_PCS_LCTL, reg);

	if (!igb_sgmii_active_82575(hw))
		igb_force_mac_fc(hw);

	return ret_val;
}
Exemple #21
0
static int
i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
{
	struct i40e_vsi		*vsi = que->vsi;
	struct i40e_hw		*hw = vsi->hw;
	struct tx_ring		*txr = &que->txr;
	struct i40e_tx_buf	*buf;
	struct i40e_tx_desc	*txd = NULL;
	struct mbuf		*m_head, *m;
	int             	i, j, error, nsegs, maxsegs;
	int			first, last = 0;
	u16			vtag = 0;
	u32			cmd, off;
	bus_dmamap_t		map;
	bus_dma_tag_t		tag;
	bus_dma_segment_t	segs[I40E_MAX_TSO_SEGS];


	cmd = off = 0;
	m_head = *m_headp;

        /*
         * Important to capture the first descriptor
         * used because it will contain the index of
         * the one we tell the hardware to report back
         */
        first = txr->next_avail;
	buf = &txr->buffers[first];
	map = buf->map;
	tag = txr->tx_tag;
	maxsegs = I40E_MAX_TX_SEGS;

	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
		/* Use larger mapping for TSO */
		tag = txr->tso_tag;
		maxsegs = I40E_MAX_TSO_SEGS;
		if (i40e_tso_detect_sparse(m_head)) {
			m = m_defrag(m_head, M_NOWAIT);
			*m_headp = m;
		}
	}

	/*
	 * Map the packet for DMA.
	 */
	error = bus_dmamap_load_mbuf_sg(tag, map,
	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

	if (error == EFBIG) {
		struct mbuf *m;

		m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
		if (m == NULL) {
			que->mbuf_defrag_failed++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (ENOBUFS);
		}
		*m_headp = m;

		/* Try it again */
		error = bus_dmamap_load_mbuf_sg(tag, map,
		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);

		if (error == ENOMEM) {
			que->tx_dma_setup++;
			return (error);
		} else if (error != 0) {
			que->tx_dma_setup++;
			m_freem(*m_headp);
			*m_headp = NULL;
			return (error);
		}
	} else if (error == ENOMEM) {
		que->tx_dma_setup++;
		return (error);
	} else if (error != 0) {
		que->tx_dma_setup++;
		m_freem(*m_headp);
		*m_headp = NULL;
		return (error);
	}

	/* Make certain there are enough descriptors */
	if (nsegs > txr->avail - 2) {
		txr->no_desc++;
		error = ENOBUFS;
		goto xmit_fail;
	}
	m_head = *m_headp;

	/* Set up the TSO/CSUM offload */
	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
		error = i40e_tx_setup_offload(que, m_head, &cmd, &off);
		if (error)
			goto xmit_fail;
	}

	cmd |= I40E_TX_DESC_CMD_ICRC;
	/* Grab the VLAN tag */
	if (m_head->m_flags & M_VLANTAG) {
		cmd |= I40E_TX_DESC_CMD_IL2TAG1;
		vtag = htole16(m_head->m_pkthdr.ether_vtag);
	}

	i = txr->next_avail;
	for (j = 0; j < nsegs; j++) {
		bus_size_t seglen;

		buf = &txr->buffers[i];
		buf->tag = tag; /* Keep track of the type tag */
		txd = &txr->base[i];
		seglen = segs[j].ds_len;

		txd->buffer_addr = htole64(segs[j].ds_addr);
		txd->cmd_type_offset_bsz =
		    htole64(I40E_TX_DESC_DTYPE_DATA
		    | ((u64)cmd  << I40E_TXD_QW1_CMD_SHIFT)
		    | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
		    | ((u64)seglen  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
		    | ((u64)vtag  << I40E_TXD_QW1_L2TAG1_SHIFT));

		last = i; /* descriptor that will get completion IRQ */

		if (++i == que->num_desc)
			i = 0;

		buf->m_head = NULL;
		buf->eop_index = -1;
	}
	/* Set the last descriptor for report */
	txd->cmd_type_offset_bsz |=
	    htole64(((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
	txr->avail -= nsegs;
	txr->next_avail = i;

	buf->m_head = m_head;
	/* Swap the dma map between the first and last descriptor */
	txr->buffers[first].map = buf->map;
	buf->map = map;
	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);

        /* Set the index of the descriptor that will be marked done */
        buf = &txr->buffers[first];
	buf->eop_index = last;

        bus_dmamap_sync(txr->dma.tag, txr->dma.map,
            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
	/*
	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
	 * hardware that this frame is available to transmit.
	 */
	++txr->total_packets;
	wr32(hw, txr->tail, i);

	i40e_flush(hw);
	/* Mark outstanding work */
	if (que->busy == 0)
		que->busy = 1;
	return (0);

xmit_fail:
	bus_dmamap_unload(tag, buf->map);
	return (error);
}
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
	struct e1000_nvm_info *nvm = &hw->nvm;
	struct e1000_mac_info *mac = &hw->mac;
	struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
	u32 eecd;
	s32 ret_val;
	u16 size;
	u32 ctrl_ext = 0;

	switch (hw->device_id) {
	case E1000_DEV_ID_82575EB_COPPER:
	case E1000_DEV_ID_82575EB_FIBER_SERDES:
	case E1000_DEV_ID_82575GB_QUAD_COPPER:
		mac->type = e1000_82575;
		break;
	case E1000_DEV_ID_82576:
	case E1000_DEV_ID_82576_NS:
	case E1000_DEV_ID_82576_NS_SERDES:
	case E1000_DEV_ID_82576_FIBER:
	case E1000_DEV_ID_82576_SERDES:
	case E1000_DEV_ID_82576_QUAD_COPPER:
	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
	case E1000_DEV_ID_82576_SERDES_QUAD:
		mac->type = e1000_82576;
		break;
	case E1000_DEV_ID_82580_COPPER:
	case E1000_DEV_ID_82580_FIBER:
	case E1000_DEV_ID_82580_QUAD_FIBER:
	case E1000_DEV_ID_82580_SERDES:
	case E1000_DEV_ID_82580_SGMII:
	case E1000_DEV_ID_82580_COPPER_DUAL:
	case E1000_DEV_ID_DH89XXCC_SGMII:
	case E1000_DEV_ID_DH89XXCC_SERDES:
	case E1000_DEV_ID_DH89XXCC_BACKPLANE:
	case E1000_DEV_ID_DH89XXCC_SFP:
		mac->type = e1000_82580;
		break;
	case E1000_DEV_ID_I350_COPPER:
	case E1000_DEV_ID_I350_FIBER:
	case E1000_DEV_ID_I350_SERDES:
	case E1000_DEV_ID_I350_SGMII:
		mac->type = e1000_i350;
		break;
	default:
		return -E1000_ERR_MAC_INIT;
		break;
	}

	
	phy->media_type = e1000_media_type_copper;
	dev_spec->sgmii_active = false;

	ctrl_ext = rd32(E1000_CTRL_EXT);
	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
	case E1000_CTRL_EXT_LINK_MODE_SGMII:
		dev_spec->sgmii_active = true;
		break;
	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
		hw->phy.media_type = e1000_media_type_internal_serdes;
		break;
	default:
		break;
	}

	
	mac->mta_reg_count = 128;
	
	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
	if (mac->type == e1000_82576)
		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
	if (mac->type == e1000_82580)
		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
	if (mac->type == e1000_i350)
		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
	
	if (mac->type >= e1000_82580)
		mac->ops.reset_hw = igb_reset_hw_82580;
	else
		mac->ops.reset_hw = igb_reset_hw_82575;
	
	mac->asf_firmware_present = true;
	
	mac->arc_subsystem_valid =
		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
			? true : false;
	
	if (mac->type == e1000_i350)
		dev_spec->eee_disable = false;
	else
		dev_spec->eee_disable = true;
	
	mac->ops.setup_physical_interface =
		(hw->phy.media_type == e1000_media_type_copper)
			? igb_setup_copper_link_82575
			: igb_setup_serdes_link_82575;

	
	eecd = rd32(E1000_EECD);

	nvm->opcode_bits        = 8;
	nvm->delay_usec         = 1;
	switch (nvm->override) {
	case e1000_nvm_override_spi_large:
		nvm->page_size    = 32;
		nvm->address_bits = 16;
		break;
	case e1000_nvm_override_spi_small:
		nvm->page_size    = 8;
		nvm->address_bits = 8;
		break;
	default:
		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
		break;
	}

	nvm->type = e1000_nvm_eeprom_spi;

	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
		     E1000_EECD_SIZE_EX_SHIFT);

	size += NVM_WORD_SIZE_BASE_SHIFT;

	if ((hw->mac.type == e1000_82576) && (size > 15)) {
		pr_notice("The NVM size is not valid, defaulting to 32K\n");
		size = 15;
	}
	nvm->word_size = 1 << size;
	if (nvm->word_size == (1 << 15))
		nvm->page_size = 128;

	
	nvm->ops.acquire = igb_acquire_nvm_82575;
	if (nvm->word_size < (1 << 15))
		nvm->ops.read = igb_read_nvm_eerd;
	else
		nvm->ops.read = igb_read_nvm_spi;

	nvm->ops.release = igb_release_nvm_82575;
	switch (hw->mac.type) {
	case e1000_82580:
		nvm->ops.validate = igb_validate_nvm_checksum_82580;
		nvm->ops.update = igb_update_nvm_checksum_82580;
		break;
	case e1000_i350:
		nvm->ops.validate = igb_validate_nvm_checksum_i350;
		nvm->ops.update = igb_update_nvm_checksum_i350;
		break;
	default:
		nvm->ops.validate = igb_validate_nvm_checksum;
		nvm->ops.update = igb_update_nvm_checksum;
	}
	nvm->ops.write = igb_write_nvm_spi;

	
	switch (mac->type) {
	case e1000_82576:
	case e1000_i350:
		igb_init_mbx_params_pf(hw);
		break;
	default:
		break;
	}

	
	if (phy->media_type != e1000_media_type_copper) {
		phy->type = e1000_phy_none;
		return 0;
	}

	phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
	phy->reset_delay_us      = 100;

	ctrl_ext = rd32(E1000_CTRL_EXT);

	
	if (igb_sgmii_active_82575(hw)) {
		phy->ops.reset      = igb_phy_hw_reset_sgmii_82575;
		ctrl_ext |= E1000_CTRL_I2C_ENA;
	} else {
		phy->ops.reset      = igb_phy_hw_reset;
		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
	}

	wr32(E1000_CTRL_EXT, ctrl_ext);
	igb_reset_mdicnfg_82580(hw);

	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
		phy->ops.read_reg   = igb_read_phy_reg_sgmii_82575;
		phy->ops.write_reg  = igb_write_phy_reg_sgmii_82575;
	} else if (hw->mac.type >= e1000_82580) {
		phy->ops.read_reg   = igb_read_phy_reg_82580;
		phy->ops.write_reg  = igb_write_phy_reg_82580;
	} else {
		phy->ops.read_reg   = igb_read_phy_reg_igp;
		phy->ops.write_reg  = igb_write_phy_reg_igp;
	}

	
	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
	               E1000_STATUS_FUNC_SHIFT;

	
	ret_val = igb_get_phy_id_82575(hw);
	if (ret_val)
		return ret_val;

	
	switch (phy->id) {
	case I347AT4_E_PHY_ID:
	case M88E1112_E_PHY_ID:
	case M88E1111_I_PHY_ID:
		phy->type                   = e1000_phy_m88;
		phy->ops.get_phy_info       = igb_get_phy_info_m88;

		if (phy->id == I347AT4_E_PHY_ID ||
		    phy->id == M88E1112_E_PHY_ID)
			phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
		else
			phy->ops.get_cable_length = igb_get_cable_length_m88;

		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
		break;
	case IGP03E1000_E_PHY_ID:
		phy->type                   = e1000_phy_igp_3;
		phy->ops.get_phy_info       = igb_get_phy_info_igp;
		phy->ops.get_cable_length   = igb_get_cable_length_igp_2;
		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
		phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82575;
		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state;
		break;
	case I82580_I_PHY_ID:
	case I350_I_PHY_ID:
		phy->type                   = e1000_phy_82580;
		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
		phy->ops.get_cable_length   = igb_get_cable_length_82580;
		phy->ops.get_phy_info       = igb_get_phy_info_82580;
		break;
	default:
		return -E1000_ERR_PHY;
	}

	return 0;
}
/**
 *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
 *  @hw: pointer to the HW structure
 *
 *  After rx enable if managability is enabled then there is likely some
 *  bad data at the start of the fifo and possibly in the DMA fifo.  This
 *  function clears the fifos and flushes any packets that came in as rx was
 *  being enabled.
 **/
void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
{
	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
	int i, ms_wait;

	if (hw->mac.type != e1000_82575 ||
	    !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
		return;

	/* Disable all RX queues */
	for (i = 0; i < 4; i++) {
		rxdctl[i] = rd32(E1000_RXDCTL(i));
		wr32(E1000_RXDCTL(i),
		     rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
	}
	/* Poll all queues to verify they have shut down */
	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
		msleep(1);
		rx_enabled = 0;
		for (i = 0; i < 4; i++)
			rx_enabled |= rd32(E1000_RXDCTL(i));
		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
			break;
	}

	if (ms_wait == 10)
		hw_dbg("Queue disable timed out after 10ms\n");

	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
	 * incoming packets are rejected.  Set enable and wait 2ms so that
	 * any packet that was coming in as RCTL.EN was set is flushed
	 */
	rfctl = rd32(E1000_RFCTL);
	wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);

	rlpml = rd32(E1000_RLPML);
	wr32(E1000_RLPML, 0);

	rctl = rd32(E1000_RCTL);
	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
	temp_rctl |= E1000_RCTL_LPE;

	wr32(E1000_RCTL, temp_rctl);
	wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
	wrfl();
	msleep(2);

	/* Enable RX queues that were previously enabled and restore our
	 * previous state
	 */
	for (i = 0; i < 4; i++)
		wr32(E1000_RXDCTL(i), rxdctl[i]);
	wr32(E1000_RCTL, rctl);
	wrfl();

	wr32(E1000_RLPML, rlpml);
	wr32(E1000_RFCTL, rfctl);

	/* Flush receive errors generated by workaround */
	rd32(E1000_ROC);
	rd32(E1000_RNBC);
	rd32(E1000_MPC);
}
/**
 *  i40e_asq_send_command - send command to Admin Queue
 *  @hw: pointer to the hw struct
 *  @desc: prefilled descriptor describing the command (non DMA mem)
 *  @buff: buffer to use for indirect commands
 *  @buff_size: size of buffer for indirect commands
 *  @cmd_details: pointer to command details structure
 *
 *  This is the main send command driver routine for the Admin Queue send
 *  queue.  It runs the queue, cleans the queue, etc
 **/
i40e_status i40e_asq_send_command(struct i40e_hw *hw,
				struct i40e_aq_desc *desc,
				void *buff, /* can be NULL */
				u16  buff_size,
				struct i40e_asq_cmd_details *cmd_details)
{
	i40e_status status = 0;
	struct i40e_dma_mem *dma_buff = NULL;
	struct i40e_asq_cmd_details *details;
	struct i40e_aq_desc *desc_on_ring;
	bool cmd_completed = false;
	u16  retval = 0;
	u32  val = 0;

	mutex_lock(&hw->aq.asq_mutex);

	if (hw->aq.asq.count == 0) {
		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
			   "AQTX: Admin queue not initialized.\n");
		status = I40E_ERR_QUEUE_EMPTY;
		goto asq_send_command_error;
	}

	hw->aq.asq_last_status = I40E_AQ_RC_OK;

	val = rd32(hw, hw->aq.asq.head);
	if (val >= hw->aq.num_asq_entries) {
		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
			   "AQTX: head overrun at %d\n", val);
		status = I40E_ERR_QUEUE_EMPTY;
		goto asq_send_command_error;
	}

	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
	if (cmd_details) {
		*details = *cmd_details;

		/* If the cmd_details are defined copy the cookie.  The
		 * cpu_to_le32 is not needed here because the data is ignored
		 * by the FW, only used by the driver
		 */
		if (details->cookie) {
			desc->cookie_high =
				cpu_to_le32(upper_32_bits(details->cookie));
			desc->cookie_low =
				cpu_to_le32(lower_32_bits(details->cookie));
		}
	} else {
		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
	}

	/* clear requested flags and then set additional flags if defined */
	desc->flags &= ~cpu_to_le16(details->flags_dis);
	desc->flags |= cpu_to_le16(details->flags_ena);

	if (buff_size > hw->aq.asq_buf_size) {
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQTX: Invalid buffer size: %d.\n",
			   buff_size);
		status = I40E_ERR_INVALID_SIZE;
		goto asq_send_command_error;
	}

	if (details->postpone && !details->async) {
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQTX: Async flag not set along with postpone flag");
		status = I40E_ERR_PARAM;
		goto asq_send_command_error;
	}

	/* call clean and check queue available function to reclaim the
	 * descriptors that were processed by FW, the function returns the
	 * number of desc available
	 */
	/* the clean function called here could be called in a separate thread
	 * in case of asynchronous completions
	 */
	if (i40e_clean_asq(hw) == 0) {
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQTX: Error queue is full.\n");
		status = I40E_ERR_ADMIN_QUEUE_FULL;
		goto asq_send_command_error;
	}

	/* initialize the temp desc pointer with the right desc */
	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);

	/* if the desc is available copy the temp desc to the right place */
	*desc_on_ring = *desc;

	/* if buff is not NULL assume indirect command */
	if (buff != NULL) {
		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
		/* copy the user buff into the respective DMA buff */
		memcpy(dma_buff->va, buff, buff_size);
		desc_on_ring->datalen = cpu_to_le16(buff_size);

		/* Update the address values in the desc with the pa value
		 * for respective buffer
		 */
		desc_on_ring->params.external.addr_high =
				cpu_to_le32(upper_32_bits(dma_buff->pa));
		desc_on_ring->params.external.addr_low =
				cpu_to_le32(lower_32_bits(dma_buff->pa));
	}

	/* bump the tail */
	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
		      buff, buff_size);
	(hw->aq.asq.next_to_use)++;
	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
		hw->aq.asq.next_to_use = 0;
	if (!details->postpone)
		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);

	/* if cmd_details are not defined or async flag is not set,
	 * we need to wait for desc write back
	 */
	if (!details->async && !details->postpone) {
		u32 total_delay = 0;

		do {
			/* AQ designers suggest use of head for better
			 * timing reliability than DD bit
			 */
			if (i40e_asq_done(hw))
				break;
			usleep_range(1000, 2000);
			total_delay++;
		} while (total_delay < hw->aq.asq_cmd_timeout);
	}

	/* if ready, copy the desc back to temp */
	if (i40e_asq_done(hw)) {
		*desc = *desc_on_ring;
		if (buff != NULL)
			memcpy(buff, dma_buff->va, buff_size);
		retval = le16_to_cpu(desc->retval);
		if (retval != 0) {
			i40e_debug(hw,
				   I40E_DEBUG_AQ_MESSAGE,
				   "AQTX: Command completed with error 0x%X.\n",
				   retval);

			/* strip off FW internal code */
			retval &= 0xff;
		}
		cmd_completed = true;
		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
			status = 0;
		else
			status = I40E_ERR_ADMIN_QUEUE_ERROR;
		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
	}

	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
		   "AQTX: desc and buffer writeback:\n");
	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);

	/* save writeback aq if requested */
	if (details->wb_desc)
		*details->wb_desc = *desc_on_ring;

	/* update the error if time out occurred */
	if ((!cmd_completed) &&
	    (!details->async && !details->postpone)) {
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQTX: Writeback timeout.\n");
		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
	}

asq_send_command_error:
	mutex_unlock(&hw->aq.asq_mutex);
	return status;
}
/**
 *  igb_get_phy_id_82575 - Retrieve PHY addr and id
 *  @hw: pointer to the HW structure
 *
 *  Retrieves the PHY address and ID for both PHY's which do and do not use
 *  sgmi interface.
 **/
static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
	s32  ret_val = 0;
	u16 phy_id;
	u32 ctrl_ext;

	/*
	 * For SGMII PHYs, we try the list of possible addresses until
	 * we find one that works.  For non-SGMII PHYs
	 * (e.g. integrated copper PHYs), an address of 1 should
	 * work.  The result of this function should mean phy->phy_addr
	 * and phy->id are set correctly.
	 */
	if (!(igb_sgmii_active_82575(hw))) {
		phy->addr = 1;
		ret_val = igb_get_phy_id(hw);
		goto out;
	}

	/* Power on sgmii phy if it is disabled */
	ctrl_ext = rd32(E1000_CTRL_EXT);
	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
	wrfl();
	msleep(300);

	/*
	 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
	 * Therefore, we need to test 1-7
	 */
	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
		if (ret_val == 0) {
			hw_dbg("Vendor ID 0x%08X read at address %u\n",
			       phy_id, phy->addr);
			/*
			 * At the time of this writing, The M88 part is
			 * the only supported SGMII PHY product.
			 */
			if (phy_id == M88_VENDOR)
				break;
		} else {
			hw_dbg("PHY address %u was unreadable\n", phy->addr);
		}
	}

	/* A valid PHY type couldn't be found. */
	if (phy->addr == 8) {
		phy->addr = 0;
		ret_val = -E1000_ERR_PHY;
		goto out;
	} else {
		ret_val = igb_get_phy_id(hw);
	}

	/* restore previous sfp cage power state */
	wr32(E1000_CTRL_EXT, ctrl_ext);

out:
	return ret_val;
}
/**
 *  i40e_clean_arq_element
 *  @hw: pointer to the hw struct
 *  @e: event info from the receive descriptor, includes any buffers
 *  @pending: number of events that could be left to process
 *
 *  This function cleans one Admin Receive Queue element and returns
 *  the contents through e.  It can also return how many events are
 *  left to process through 'pending'
 **/
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
					     struct i40e_arq_event_info *e,
					     u16 *pending)
{
	i40e_status ret_code = 0;
	u16 ntc = hw->aq.arq.next_to_clean;
	struct i40e_aq_desc *desc;
	struct i40e_dma_mem *bi;
	u16 desc_idx;
	u16 datalen;
	u16 flags;
	u16 ntu;

	/* take the lock before we start messing with the ring */
	mutex_lock(&hw->aq.arq_mutex);

	if (hw->aq.arq.count == 0) {
		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
			   "AQRX: Admin queue not initialized.\n");
		ret_code = I40E_ERR_QUEUE_EMPTY;
		goto clean_arq_element_err;
	}

	/* set next_to_use to head */
	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
	if (ntu == ntc) {
		/* nothing to do - shouldn't need to update ring's values */
		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
		goto clean_arq_element_out;
	}

	/* now clean the next descriptor */
	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
	desc_idx = ntc;

	flags = le16_to_cpu(desc->flags);
	if (flags & I40E_AQ_FLAG_ERR) {
		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
		hw->aq.arq_last_status =
			(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQRX: Event received with error 0x%X.\n",
			   hw->aq.arq_last_status);
	}

	e->desc = *desc;
	datalen = le16_to_cpu(desc->datalen);
	e->msg_len = min(datalen, e->buf_len);
	if (e->msg_buf != NULL && (e->msg_len != 0))
		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
		       e->msg_len);

	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
		      hw->aq.arq_buf_size);

	/* Restore the original datalen and buffer address in the desc,
	 * FW updates datalen to indicate the event message
	 * size
	 */
	bi = &hw->aq.arq.r.arq_bi[ntc];
	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));

	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
	desc->datalen = cpu_to_le16((u16)bi->size);
	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));

	/* set tail = the last cleaned desc index. */
	wr32(hw, hw->aq.arq.tail, ntc);
	/* ntc is updated to tail + 1 */
	ntc++;
	if (ntc == hw->aq.num_arq_entries)
		ntc = 0;
	hw->aq.arq.next_to_clean = ntc;
	hw->aq.arq.next_to_use = ntu;

clean_arq_element_out:
	/* Set pending if needed, unlock and return */
	if (pending != NULL)
		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);

clean_arq_element_err:
	mutex_unlock(&hw->aq.arq_mutex);

	if (i40e_is_nvm_update_op(&e->desc)) {
		if (hw->aq.nvm_release_on_done) {
			i40e_release_nvm(hw);
			hw->aq.nvm_release_on_done = false;
		}

		switch (hw->nvmupd_state) {
		case I40E_NVMUPD_STATE_INIT_WAIT:
			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
			break;

		case I40E_NVMUPD_STATE_WRITE_WAIT:
			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
			break;

		default:
			break;
		}
	}

	return ret_code;
}
Exemple #27
0
/**
 * i40e_configure_lan_hmc - prepare the HMC backing store
 * @hw: pointer to the hw structure
 * @model: the model for the layout of the SD/PD tables
 *
 * - This function will be called once per physical function initialization.
 * - This function will be called after i40e_init_lan_hmc() and before
 *   any LAN/FCoE HMC objects can be created.
 **/
enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
					     enum i40e_hmc_model model)
{
	struct i40e_hmc_lan_create_obj_info info;
	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
	struct i40e_hmc_obj_info *obj;
	enum i40e_status_code ret_code = I40E_SUCCESS;

	/* Initialize part of the create object info struct */
	info.hmc_info = &hw->hmc;
	info.rsrc_type = I40E_HMC_LAN_FULL;
	info.start_idx = 0;
	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;

	/* Build the SD entry for the LAN objects */
	switch (model) {
	case I40E_HMC_MODEL_DIRECT_PREFERRED:
	case I40E_HMC_MODEL_DIRECT_ONLY:
		info.entry_type = I40E_SD_TYPE_DIRECT;
		/* Make one big object, a single SD */
		info.count = 1;
		ret_code = i40e_create_lan_hmc_object(hw, &info);
		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
			goto try_type_paged;
		else if (ret_code != I40E_SUCCESS)
			goto configure_lan_hmc_out;
		/* else clause falls through the break */
		break;
	case I40E_HMC_MODEL_PAGED_ONLY:
try_type_paged:
		info.entry_type = I40E_SD_TYPE_PAGED;
		/* Make one big object in the PD table */
		info.count = 1;
		ret_code = i40e_create_lan_hmc_object(hw, &info);
		if (ret_code != I40E_SUCCESS)
			goto configure_lan_hmc_out;
		break;
	default:
		/* unsupported type */
		ret_code = I40E_ERR_INVALID_SD_TYPE;
		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
			  ret_code);
		goto configure_lan_hmc_out;
	}

	/* Configure and program the FPM registers so objects can be created */

	/* Tx contexts */
	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);

	/* Rx contexts */
	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);

	/* FCoE contexts */
	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);

	/* FCoE filters */
	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);

configure_lan_hmc_out:
	return ret_code;
}
Exemple #28
0
static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
{
	static const u32 aux0_sel_sdp[IGB_N_SDP] = {
		AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
	};
	static const u32 aux1_sel_sdp[IGB_N_SDP] = {
		AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
	};
	static const u32 ts_sdp_en[IGB_N_SDP] = {
		TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
	};
	static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
		TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
		TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
	};
	static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
		TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
		TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
	};
	static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
		TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
		TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
	};
	static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
		TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
		TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
	};
	static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
		TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
		TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
	};
	struct e1000_hw *hw = &igb->hw;
	u32 ctrl, ctrl_ext, tssdp = 0;

	ctrl = rd32(E1000_CTRL);
	ctrl_ext = rd32(E1000_CTRL_EXT);
	tssdp = rd32(E1000_TSSDP);

	igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);

	/* Make sure this pin is not enabled as an input. */
	if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
		tssdp &= ~AUX0_TS_SDP_EN;

	if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
		tssdp &= ~AUX1_TS_SDP_EN;

	tssdp &= ~ts_sdp_sel_clr[pin];
	if (freq) {
		if (chan == 1)
			tssdp |= ts_sdp_sel_fc1[pin];
		else
			tssdp |= ts_sdp_sel_fc0[pin];
	} else {
		if (chan == 1)
			tssdp |= ts_sdp_sel_tt1[pin];
		else
			tssdp |= ts_sdp_sel_tt0[pin];
	}
	tssdp |= ts_sdp_en[pin];

	wr32(E1000_TSSDP, tssdp);
	wr32(E1000_CTRL, ctrl);
	wr32(E1000_CTRL_EXT, ctrl_ext);
}
Exemple #29
0
void rlc_ena(struct pci_dev *dev)
{
	wr32(dev, RC_RLC_ENA, RLC_CTL);
}
Exemple #30
0
/**
 * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
 * @pf: Board private structure
 * @config: hwtstamp settings requested or saved
 *
 * Control hardware registers to enter the specific mode requested by the
 * user. Also used during reset path to ensure that timestamp settings are
 * maintained.
 *
 * Note: modifies config in place, and may update the requested mode to be
 * more broad if the specific filter is not directly supported.
 **/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
				       struct hwtstamp_config *config)
{
	struct i40e_hw *hw = &pf->hw;
	u32 pf_id, tsyntype, regval;

	/* Reserved for future extensions. */
	if (config->flags)
		return -EINVAL;

	/* Confirm that 1588 is supported on this PF. */
	pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
		I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
	if (hw->pf_id != pf_id) {
		dev_err(&pf->pdev->dev,
			"PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n",
			hw->pf_id, hw->port, pf_id);
		return -EPERM;
	}

	switch (config->tx_type) {
	case HWTSTAMP_TX_OFF:
		pf->ptp_tx = false;
		break;
	case HWTSTAMP_TX_ON:
		pf->ptp_tx = true;
		break;
	default:
		return -ERANGE;
	}

	switch (config->rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		pf->ptp_rx = false;
		tsyntype = 0;
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
		pf->ptp_rx = true;
		tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
			   I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
		config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
		break;
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
		pf->ptp_rx = true;
		tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
			   I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
		break;
	case HWTSTAMP_FILTER_ALL:
	default:
		return -ERANGE;
	}

	/* Clear out all 1588-related registers to clear and unlatch them. */
	rd32(hw, I40E_PRTTSYN_STAT_0);
	rd32(hw, I40E_PRTTSYN_TXTIME_H);
	rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
	rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
	rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
	rd32(hw, I40E_PRTTSYN_RXTIME_H(3));

	/* Enable/disable the Tx timestamp interrupt based on user input. */
	regval = rd32(hw, I40E_PRTTSYN_CTL0);
	if (pf->ptp_tx)
		regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
	else
		regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
	wr32(hw, I40E_PRTTSYN_CTL0, regval);

	regval = rd32(hw, I40E_PFINT_ICR0_ENA);
	if (pf->ptp_tx)
		regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
	else
		regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
	wr32(hw, I40E_PFINT_ICR0_ENA, regval);

	/* There is no simple on/off switch for Rx. To "disable" Rx support,
	 * ignore any received timestamps, rather than turn off the clock.
	 */
	if (pf->ptp_rx) {
		regval = rd32(hw, I40E_PRTTSYN_CTL1);
		/* clear everything but the enable bit */
		regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
		/* now enable bits for desired Rx timestamps */
		regval |= tsyntype;
		wr32(hw, I40E_PRTTSYN_CTL1, regval);
	}

	return 0;
}