Esempio n. 1
0
/**
 * qla24xx_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
 * Returns non-zero if a failure occured, else zero.
 */
int
qla24xx_start_scsi(srb_t *sp)
{
    int		ret;
    unsigned long   flags;
    scsi_qla_host_t	*ha;
    struct scsi_cmnd *cmd;
    uint32_t	*clr_ptr;
    uint32_t        index;
    uint32_t	handle;
    struct cmd_type_7 *cmd_pkt;
    struct scatterlist *sg;
    uint16_t	cnt;
    uint16_t	req_cnt;
    uint16_t	tot_dsds;
    struct device_reg_24xx __iomem *reg;

    /* Setup device pointers. */
    ret = 0;
    ha = sp->ha;
    reg = &ha->iobase->isp24;
    cmd = sp->cmd;
    /* So we know we haven't pci_map'ed anything yet */
    tot_dsds = 0;

    /* Send marker if required */
    if (ha->marker_needed != 0) {
        if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
            return QLA_FUNCTION_FAILED;
        }
        ha->marker_needed = 0;
    }

    /* Acquire ring specific lock */
    spin_lock_irqsave(&ha->hardware_lock, flags);

    /* Check for room in outstanding command list. */
    handle = ha->current_outstanding_cmd;
    for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
        handle++;
        if (handle == MAX_OUTSTANDING_COMMANDS)
            handle = 1;
        if (ha->outstanding_cmds[handle] == 0)
            break;
    }
    if (index == MAX_OUTSTANDING_COMMANDS)
        goto queuing_error;

    /* Map the sg table so we have an accurate count of sg entries needed */
    if (cmd->use_sg) {
        sg = (struct scatterlist *) cmd->request_buffer;
        tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
                              cmd->sc_data_direction);
        if (tot_dsds == 0)
            goto queuing_error;
    } else if (cmd->request_bufflen) {
        dma_addr_t      req_dma;

        req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
                                 cmd->request_bufflen, cmd->sc_data_direction);
        if (dma_mapping_error(req_dma))
            goto queuing_error;

        sp->dma_handle = req_dma;
        tot_dsds = 1;
    }

    req_cnt = qla24xx_calc_iocbs(tot_dsds);
    if (ha->req_q_cnt < (req_cnt + 2)) {
        cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
        if (ha->req_ring_index < cnt)
            ha->req_q_cnt = cnt - ha->req_ring_index;
        else
            ha->req_q_cnt = ha->request_q_length -
                            (ha->req_ring_index - cnt);
    }
    if (ha->req_q_cnt < (req_cnt + 2))
        goto queuing_error;

    /* Build command packet. */
    ha->current_outstanding_cmd = handle;
    ha->outstanding_cmds[handle] = sp;
    sp->ha = ha;
    sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
    ha->req_q_cnt -= req_cnt;

    cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
    cmd_pkt->handle = handle;

    /* Zero out remaining portion of packet. */
    /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
    clr_ptr = (uint32_t *)cmd_pkt + 2;
    memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
    cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

    /* Set NPORT-ID and LUN number*/
    cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
    cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
    cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
    cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;

    int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
    host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));

    /* Load SCSI command packet. */
    memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
    host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));

    cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);

    /* Build IOCB segments */
    qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);

    /* Set total data segment count. */
    cmd_pkt->entry_count = (uint8_t)req_cnt;
    wmb();

    /* Adjust ring index. */
    ha->req_ring_index++;
    if (ha->req_ring_index == ha->request_q_length) {
        ha->req_ring_index = 0;
        ha->request_ring_ptr = ha->request_ring;
    } else
        ha->request_ring_ptr++;

    sp->flags |= SRB_DMA_VALID;
    sp->state = SRB_ACTIVE_STATE;

    /* Set chip new ring index. */
    WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
    RD_REG_DWORD_RELAXED(&reg->req_q_in);		/* PCI Posting. */

    /* Manage unprocessed RIO/ZIO commands in response queue. */
    if (ha->flags.process_response_queue &&
            ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
        qla24xx_process_response_queue(ha);

    spin_unlock_irqrestore(&ha->hardware_lock, flags);
    return QLA_SUCCESS;

queuing_error:
    if (cmd->use_sg && tot_dsds) {
        sg = (struct scatterlist *) cmd->request_buffer;
        pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
                     cmd->sc_data_direction);
    } else if (tot_dsds) {
        pci_unmap_single(ha->pdev, sp->dma_handle,
                         cmd->request_bufflen, cmd->sc_data_direction);
    }
    spin_unlock_irqrestore(&ha->hardware_lock, flags);

    return QLA_FUNCTION_FAILED;
}
Esempio n. 2
0
void mwl_rx_recv(unsigned long data)
{
	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
	struct mwl_priv *priv;
	struct mwl_rx_desc *curr_desc;
	int work_done = 0;
	struct sk_buff *prx_skb = NULL;
	int pkt_len;
	struct ieee80211_rx_status status;
	struct mwl_vif *mwl_vif = NULL;
	struct ieee80211_hdr *wh;
	u32 status_mask;

	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!hw);
	priv = hw->priv;
	BUG_ON(!priv);

	curr_desc = priv->desc_data[0].pnext_rx_desc;

	if (curr_desc == NULL) {
		status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);
		writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY,
		       priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);

		priv->is_rx_schedule = false;

		WLDBG_EXIT_INFO(DBG_LEVEL_4, "busy or no receiving packets");
		return;
	}

	while ((curr_desc->rx_control == EAGLE_RXD_CTRL_DMA_OWN)
		&& (work_done < priv->recv_limit)) {
		prx_skb = curr_desc->psk_buff;
		if (prx_skb == NULL)
			goto out;
		pci_unmap_single(priv->pdev,
				 ENDIAN_SWAP32(curr_desc->pphys_buff_data),
				 priv->desc_data[0].rx_buf_size,
				 PCI_DMA_FROMDEVICE);
		pkt_len = curr_desc->pkt_len;

		if (skb_tailroom(prx_skb) < pkt_len) {
			WLDBG_PRINT("Critical error: not enough tail room =%x pkt_len=%x, curr_desc=%x, curr_desc_data=%x",
				    skb_tailroom(prx_skb), pkt_len, curr_desc, curr_desc->pbuff_data);
			dev_kfree_skb_any(prx_skb);
			goto out;
		}

		if (curr_desc->channel != hw->conf.chandef.chan->hw_value) {
			dev_kfree_skb_any(prx_skb);
			goto out;
		}

		mwl_rx_prepare_status(curr_desc, &status);

		priv->noise = -curr_desc->noise_floor;

		wh = &((struct mwl_dma_data *)prx_skb->data)->wh;

		if (ieee80211_has_protected(wh->frame_control)) {
			/* Check if hw crypto has been enabled for
			 * this bss. If yes, set the status flags
			 * accordingly
			 */
			if (ieee80211_has_tods(wh->frame_control))
				mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list,
							      wh->addr1);
			else
				mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list,
							      wh->addr2);

			if (mwl_vif != NULL &&
			    mwl_vif->is_hw_crypto_enabled) {
				/*
				 * When MMIC ERROR is encountered
				 * by the firmware, payload is
				 * dropped and only 32 bytes of
				 * mwl8k Firmware header is sent
				 * to the host.
				 *
				 * We need to add four bytes of
				 * key information.  In it
				 * MAC80211 expects keyidx set to
				 * 0 for triggering Counter
				 * Measure of MMIC failure.
				 */
				if (status.flag & RX_FLAG_MMIC_ERROR) {
					struct mwl_dma_data *tr;

					tr = (struct mwl_dma_data *)prx_skb->data;
					memset((void *)&(tr->data), 0, 4);
					pkt_len += 4;
				}

				if (!ieee80211_is_auth(wh->frame_control))
					status.flag |= RX_FLAG_IV_STRIPPED |
						       RX_FLAG_DECRYPTED |
						       RX_FLAG_MMIC_STRIPPED;
			}
		}

		skb_put(prx_skb, pkt_len);
		mwl_rx_remove_dma_header(prx_skb, curr_desc->qos_ctrl);
		memcpy(IEEE80211_SKB_RXCB(prx_skb), &status, sizeof(status));
		ieee80211_rx(hw, prx_skb);
out:
		mwl_rx_refill(priv, curr_desc);
		curr_desc->rx_control = EAGLE_RXD_CTRL_DRIVER_OWN;
		curr_desc->qos_ctrl = 0;
		curr_desc = curr_desc->pnext;
		work_done++;
	}

	priv->desc_data[0].pnext_rx_desc = curr_desc;

	status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);
	writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY,
	       priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);

	priv->is_rx_schedule = false;

	WLDBG_EXIT(DBG_LEVEL_4);
}
Esempio n. 3
0
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp)
{
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   rtdev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   rtdev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   rtdev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct /*RTnet*/rtskb *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   rtdev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#if 0 /*RTnet*/
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) {
				skb->rtdev = rtdev;
				/*RTnet*/rtskb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				//eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
				//		 pkt_len, 0);
				memcpy(rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#else
				memcpy(/*RTnet*/rtskb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
#endif /*RTnet*/
			{
				unsigned char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb, pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %08x ? / %p.\n",
					       rtdev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       tp->rx_buffers[entry].mapping,
					       temp);/*RTnet*/
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
			skb->time_stamp = *time_stamp;
			/*RTnet*/rtnetif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
int tulip_interrupt(rtdm_irq_t *irq_handle)
{
	nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/
	struct rtnet_device *rtdev =
	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
	long ioaddr = rtdev->base_addr;
	unsigned int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
		return RTDM_IRQ_NONE;
	}

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
			rx += tulip_rx(rtdev, &time_stamp);
			tulip_refill_rx(rtdev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			rtdm_lock_get(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   rtdev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
				rtnetif_tx(rtdev);
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   rtdev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				/*RTnet*/rtnetif_wake_queue(rtdev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			rtdm_lock_put(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
#if 0 /*RTnet*/
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(rtdev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				/*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
					rtdev->name, tp->nir, error);
			}
#endif /*RTnet*/
			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
Esempio n. 4
0
static void p54p_stop(struct ieee80211_hw *dev)
{
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	unsigned int i;
	struct p54p_desc *desc;

	P54P_WRITE(int_enable, cpu_to_le32(0));
	P54P_READ(int_enable);
	udelay(10);

	free_irq(priv->pdev->irq, dev);

	tasklet_kill(&priv->tasklet);

	P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));

	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
		desc = &ring_control->rx_data[i];
		if (desc->host_addr)
			pci_unmap_single(priv->pdev,
					 le32_to_cpu(desc->host_addr),
					 priv->common.rx_mtu + 32,
					 PCI_DMA_FROMDEVICE);
		kfree_skb(priv->rx_buf_data[i]);
		priv->rx_buf_data[i] = NULL;
	}

	for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
		desc = &ring_control->rx_mgmt[i];
		if (desc->host_addr)
			pci_unmap_single(priv->pdev,
					 le32_to_cpu(desc->host_addr),
					 priv->common.rx_mtu + 32,
					 PCI_DMA_FROMDEVICE);
		kfree_skb(priv->rx_buf_mgmt[i]);
		priv->rx_buf_mgmt[i] = NULL;
	}

	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
		desc = &ring_control->tx_data[i];
		if (desc->host_addr)
			pci_unmap_single(priv->pdev,
					 le32_to_cpu(desc->host_addr),
					 le16_to_cpu(desc->len),
					 PCI_DMA_TODEVICE);

		p54_free_skb(dev, priv->tx_buf_data[i]);
		priv->tx_buf_data[i] = NULL;
	}

	for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
		desc = &ring_control->tx_mgmt[i];
		if (desc->host_addr)
			pci_unmap_single(priv->pdev,
					 le32_to_cpu(desc->host_addr),
					 le16_to_cpu(desc->len),
					 PCI_DMA_TODEVICE);

		p54_free_skb(dev, priv->tx_buf_mgmt[i]);
		priv->tx_buf_mgmt[i] = NULL;
	}

	memset(ring_control, 0, sizeof(*ring_control));
}
Esempio n. 5
0
/*
 * Add a socket buffer to a TX queue
 *
 * This maps all fragments of a socket buffer for DMA and adds them to
 * the TX queue.  The queue's insert pointer will be incremented by
 * the number of fragments in the socket buffer.
 *
 * If any DMA mapping fails, any mapped fragments will be unmapped,
 * the queue's insert pointer will be restored to its original value.
 *
 * This function is split out from efx_hard_start_xmit to allow the
 * loopback test to direct packets via specific TX queues.
 *
 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
 * You must hold netif_tx_lock() to call this function.
 */
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
	struct efx_nic *efx = tx_queue->efx;
	struct pci_dev *pci_dev = efx->pci_dev;
	struct efx_tx_buffer *buffer;
	skb_frag_t *fragment;
	struct page *page;
	int page_offset;
	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
	dma_addr_t dma_addr, unmap_addr = 0;
	unsigned int dma_len;
	bool unmap_single;
	int q_space, i = 0;
	netdev_tx_t rc = NETDEV_TX_OK;

	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);

	if (skb_shinfo(skb)->gso_size)
		return efx_enqueue_skb_tso(tx_queue, skb);

	/* Get size of the initial fragment */
	len = skb_headlen(skb);

	/* Pad if necessary */
	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
		EFX_BUG_ON_PARANOID(skb->data_len);
		len = 32 + 1;
		if (skb_pad(skb, len - skb->len))
			return NETDEV_TX_OK;
	}

	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
	q_space = efx->txq_entries - 1 - fill_level;

	/* Map for DMA.  Use pci_map_single rather than pci_map_page
	 * since this is more efficient on machines with sparse
	 * memory.
	 */
	unmap_single = true;
	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);

	/* Process all fragments */
	while (1) {
		if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
			goto pci_err;

		/* Store fields for marking in the per-fragment final
		 * descriptor */
		unmap_len = len;
		unmap_addr = dma_addr;

		/* Add to TX queue, splitting across DMA boundaries */
		do {
			if (unlikely(q_space-- <= 0)) {
				/* It might be that completions have
				 * happened since the xmit path last
				 * checked.  Update the xmit path's
				 * copy of read_count.
				 */
				netif_tx_stop_queue(tx_queue->core_txq);
				/* This memory barrier protects the
				 * change of queue state from the access
				 * of read_count. */
				smp_mb();
				tx_queue->old_read_count =
					ACCESS_ONCE(tx_queue->read_count);
				fill_level = (tx_queue->insert_count
					      - tx_queue->old_read_count);
				q_space = efx->txq_entries - 1 - fill_level;
				if (unlikely(q_space-- <= 0)) {
					rc = NETDEV_TX_BUSY;
					goto unwind;
				}
				smp_mb();
				netif_tx_start_queue(tx_queue->core_txq);
			}

			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
			buffer = &tx_queue->buffer[insert_ptr];
			efx_tsoh_free(tx_queue, buffer);
			EFX_BUG_ON_PARANOID(buffer->tsoh);
			EFX_BUG_ON_PARANOID(buffer->skb);
			EFX_BUG_ON_PARANOID(buffer->len);
			EFX_BUG_ON_PARANOID(!buffer->continuation);
			EFX_BUG_ON_PARANOID(buffer->unmap_len);

			dma_len = efx_max_tx_len(efx, dma_addr);
			if (likely(dma_len >= len))
				dma_len = len;

			/* Fill out per descriptor fields */
			buffer->len = dma_len;
			buffer->dma_addr = dma_addr;
			len -= dma_len;
			dma_addr += dma_len;
			++tx_queue->insert_count;
		} while (len);

		/* Transfer ownership of the unmapping to the final buffer */
		buffer->unmap_single = unmap_single;
		buffer->unmap_len = unmap_len;
		unmap_len = 0;

		/* Get address and size of next fragment */
		if (i >= skb_shinfo(skb)->nr_frags)
			break;
		fragment = &skb_shinfo(skb)->frags[i];
		len = fragment->size;
		page = fragment->page;
		page_offset = fragment->page_offset;
		i++;
		/* Map for DMA */
		unmap_single = false;
		dma_addr = pci_map_page(pci_dev, page, page_offset, len,
					PCI_DMA_TODEVICE);
	}

	/* Transfer ownership of the skb to the final buffer */
	buffer->skb = skb;
	buffer->continuation = false;

	/* Pass off to hardware */
	efx_nic_push_buffers(tx_queue);

	return NETDEV_TX_OK;

 pci_err:
	netif_err(efx, tx_err, efx->net_dev,
		  " TX queue %d could not map skb with %d bytes %d "
		  "fragments for DMA\n", tx_queue->queue, skb->len,
		  skb_shinfo(skb)->nr_frags + 1);

	/* Mark the packet as transmitted, and free the SKB ourselves */
	dev_kfree_skb_any(skb);

 unwind:
	/* Work backwards until we hit the original insert pointer value */
	while (tx_queue->insert_count != tx_queue->write_count) {
		--tx_queue->insert_count;
		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
		buffer = &tx_queue->buffer[insert_ptr];
		efx_dequeue_buffer(tx_queue, buffer);
		buffer->len = 0;
	}

	/* Free the fragment we were mid-way through pushing */
	if (unmap_len) {
		if (unmap_single)
			pci_unmap_single(pci_dev, unmap_addr, unmap_len,
					 PCI_DMA_TODEVICE);
		else
			pci_unmap_page(pci_dev, unmap_addr, unmap_len,
				       PCI_DMA_TODEVICE);
	}

	return rc;
}
Esempio n. 6
0
static int r6040_rx(struct net_device *dev, int limit)
{
	struct r6040_private *priv = netdev_priv(dev);
	struct r6040_descriptor *descptr = priv->rx_remove_ptr;
	struct sk_buff *skb_ptr, *new_skb;
	int count = 0;
	u16 err;

	/* Limit not reached and the descriptor belongs to the CPU */
	while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
		/* Read the descriptor status */
		err = descptr->status;
		/* Global error status set */
		if (err & DSC_RX_ERR) {
			/* RX dribble */
			if (err & DSC_RX_ERR_DRI)
				dev->stats.rx_frame_errors++;
			/* Buffer length exceeded */
			if (err & DSC_RX_ERR_BUF)
				dev->stats.rx_length_errors++;
			/* Packet too long */
			if (err & DSC_RX_ERR_LONG)
				dev->stats.rx_length_errors++;
			/* Packet < 64 bytes */
			if (err & DSC_RX_ERR_RUNT)
				dev->stats.rx_length_errors++;
			/* CRC error */
			if (err & DSC_RX_ERR_CRC) {
				spin_lock(&priv->lock);
				dev->stats.rx_crc_errors++;
				spin_unlock(&priv->lock);
			}
			goto next_descr;
		}

		/* Packet successfully received */
		new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
		if (!new_skb) {
			dev->stats.rx_dropped++;
			goto next_descr;
		}
		skb_ptr = descptr->skb_ptr;
		skb_ptr->dev = priv->dev;

		/* Do not count the CRC */
		skb_put(skb_ptr, descptr->len - 4);
		pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
					MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
		skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);

		/* Send to upper layer */
		netif_receive_skb(skb_ptr);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += descptr->len - 4;

		/* put new skb into descriptor */
		descptr->skb_ptr = new_skb;
		descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
						descptr->skb_ptr->data,
					MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));

next_descr:
		/* put the descriptor back to the MAC */
		descptr->status = DSC_OWNER_MAC;
		descptr = descptr->vndescp;
		count++;
	}
	priv->rx_remove_ptr = descptr;

	return count;
}
Esempio n. 7
0
/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	long ioaddr = dev->base_addr;
	int work_limit = max_interrupt_work;

	spin_lock(&np->lock);

	do {
		u32 intr_status = readl(ioaddr + IntrStatus);

		/* Acknowledge all of the current interrupt sources ASAP. */
		writel(intr_status & 0x001ffff, ioaddr + IntrStatus);

		if (debug > 4)
			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
				   dev->name, intr_status);

		if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
			break;

		if (intr_status & (IntrRxDone | RxNoBuf))
			netdev_rx(dev);

		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
			int entry = np->dirty_tx % TX_RING_SIZE;
			int tx_status = le32_to_cpu(np->tx_ring[entry].status);

			if (tx_status < 0)
				break;
			if (tx_status & 0x8000) { 		/* There was an error, log it. */
#ifndef final_version
				if (debug > 1)
					printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
						   dev->name, tx_status);
#endif
				np->stats.tx_errors++;
				if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
				if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
				if (tx_status & 0x0200) np->stats.tx_window_errors++;
				if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
				if ((tx_status & 0x0080) && np->full_duplex == 0)
					np->stats.tx_heartbeat_errors++;
#ifdef ETHER_STATS
				if (tx_status & 0x0100) np->stats.collisions16++;
#endif
			} else {
#ifdef ETHER_STATS
				if (tx_status & 0x0001) np->stats.tx_deferred++;
#endif
				np->stats.tx_bytes += np->tx_skbuff[entry]->len;
				np->stats.collisions += (tx_status >> 3) & 15;
				np->stats.tx_packets++;
			}
			/* Free the original skb. */
			pci_unmap_single(np->pdev,np->tx_addr[entry],
						np->tx_skbuff[entry]->len,
						PCI_DMA_TODEVICE);
			np->tx_q_bytes -= np->tx_skbuff[entry]->len;
			dev_kfree_skb_irq(np->tx_skbuff[entry]);
			np->tx_skbuff[entry] = 0;
		}
		if (np->tx_full &&
			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4
			&&  np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
			/* The ring is no longer full, clear tbusy. */
			np->tx_full = 0;
			netif_wake_queue(dev);
		}

		/* Abnormal error summary/uncommon events handlers. */
		if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
						   TimerInt | IntrTxStopped))
			netdev_error(dev, intr_status);

		if (--work_limit < 0) {
			printk(KERN_WARNING "%s: Too much work at interrupt, "
				   "status=0x%4.4x.\n", dev->name, intr_status);
			/* Set the timer to re-enable the other interrupts after
			   10*82usec ticks. */
			writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
			writel(10, ioaddr + GPTimer);
			break;
		}
	} while (1);

	if (debug > 3)
		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
			   dev->name, (int)readl(ioaddr + IntrStatus));

	spin_unlock(&np->lock);
}
Esempio n. 8
0
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = (struct tulip_private *)dev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

#ifdef CONFIG_NET_HW_FLOWCONTROL
        int drop = 0, mit_sel = 0;

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#ifdef CONFIG_NET_HW_FLOWCONTROL
                        drop = atomic_read(&netdev_dropping);
                        if (drop)
                                goto throttle;
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
						 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);
#ifdef CONFIG_NET_HW_FLOWCONTROL
                        mit_sel =
#endif
			netif_rx(skb);

#ifdef CONFIG_NET_HW_FLOWCONTROL
                        switch (mit_sel) {
                        case NET_RX_SUCCESS:
                        case NET_RX_CN_LOW:
                        case NET_RX_CN_MOD:
                                break;

                        case NET_RX_CN_HIGH:
                                rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
                                break;
                        case NET_RX_DROP:
                                rx_work_limit = -1;
                                break;
                        default:
                                printk("unknown feedback return code %d\n", mit_sel);
                                break;
                        }

                        drop = atomic_read(&netdev_dropping);
                        if (drop) {
throttle:
                                rx_work_limit = -1;
                                mit_sel = NET_RX_DROP;

                                if (tp->fc_bit) {
                                        long ioaddr = dev->base_addr;

                                        /* disable Rx & RxNoBuf ints. */
                                        outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
                                        set_bit(tp->fc_bit, &netdev_fc_xoff);
                                }
                        }
#endif
			dev->last_rx = jiffies;
			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
#ifdef CONFIG_NET_HW_FLOWCONTROL

        /* We use this simplistic scheme for IM. It's proven by
           real life installations. We can have IM enabled
           continuesly but this would cause unnecessary latency.
           Unfortunely we can't use all the NET_RX_* feedback here.
           This would turn on IM for devices that is not contributing
           to backlog congestion with unnecessary latency.

           We monitor the device RX-ring and have:

           HW Interrupt Mitigation either ON or OFF.

           ON:  More then 1 pkt received (per intr.) OR we are dropping
           OFF: Only 1 pkt received

           Note. We only use min and max (0, 15) settings from mit_table */


        if( tp->flags &  HAS_INTR_MITIGATION) {
                if((received > 1 || mit_sel == NET_RX_DROP)
                   && tp->mit_sel != 15 ) {
                        tp->mit_sel = 15;
                        tp->mit_change = 1; /* Force IM change */
                }
                if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
                        tp->mit_sel = 0;
                        tp->mit_change = 1; /* Force IM change */
                }
        }

        return RX_RING_SIZE+1; /* maxrx+1 */
#else
	return received;
#endif
}
Esempio n. 9
0
/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct tulip_private *tp = (struct tulip_private *)dev->priv;
	long ioaddr = dev->base_addr;
	int csr5;
	int entry;
	int missed;
	int rx = 0;
	int tx = 0;
	int oi = 0;
	int maxrx = RX_RING_SIZE;
	int maxtx = TX_RING_SIZE;
	int maxoi = TX_RING_SIZE;
	unsigned int work_count = tulip_max_interrupt_work;
	unsigned int handled = 0;

	/* Let's see whether the interrupt really is for us */
	csr5 = inl(ioaddr + CSR5);

        if (tp->flags & HAS_PHY_IRQ) 
	        handled = phy_interrupt (dev);
    
	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
		return IRQ_RETVAL(handled);

	tp->nir++;

	do {
		/* Acknowledge all of the current interrupt sources ASAP. */
		outl(csr5 & 0x0001ffff, ioaddr + CSR5);

		if (tulip_debug > 4)
			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
				   dev->name, csr5, inl(dev->base_addr + CSR5));

		if (csr5 & (RxIntr | RxNoBuf)) {
#ifdef CONFIG_NET_HW_FLOWCONTROL
                        if ((!tp->fc_bit) ||
			    (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
#endif
				rx += tulip_rx(dev);
			tulip_refill_rx(dev);
		}

		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
			unsigned int dirty_tx;

			spin_lock(&tp->lock);

			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
				 dirty_tx++) {
				int entry = dirty_tx % TX_RING_SIZE;
				int status = le32_to_cpu(tp->tx_ring[entry].status);

				if (status < 0)
					break;			/* It still has not been Txed */

				/* Check for Rx filter setup frames. */
				if (tp->tx_buffers[entry].skb == NULL) {
					/* test because dummy frames not mapped */
					if (tp->tx_buffers[entry].mapping)
						pci_unmap_single(tp->pdev,
							 tp->tx_buffers[entry].mapping,
							 sizeof(tp->setup_frame),
							 PCI_DMA_TODEVICE);
					continue;
				}

				if (status & 0x8000) {
					/* There was an major error, log it. */
#ifndef final_version
					if (tulip_debug > 1)
						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
							   dev->name, status);
#endif
					tp->stats.tx_errors++;
					if (status & 0x4104) tp->stats.tx_aborted_errors++;
					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
					if (status & 0x0200) tp->stats.tx_window_errors++;
					if (status & 0x0002) tp->stats.tx_fifo_errors++;
					if ((status & 0x0080) && tp->full_duplex == 0)
						tp->stats.tx_heartbeat_errors++;
				} else {
					tp->stats.tx_bytes +=
						tp->tx_buffers[entry].skb->len;
					tp->stats.collisions += (status >> 3) & 15;
					tp->stats.tx_packets++;
				}

				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
						 tp->tx_buffers[entry].skb->len,
						 PCI_DMA_TODEVICE);

				/* Free the original skb. */
				dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
				tp->tx_buffers[entry].skb = NULL;
				tp->tx_buffers[entry].mapping = 0;
				tx++;
			}

#ifndef final_version
			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
					   dev->name, dirty_tx, tp->cur_tx);
				dirty_tx += TX_RING_SIZE;
			}
#endif

			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
				netif_wake_queue(dev);

			tp->dirty_tx = dirty_tx;
			if (csr5 & TxDied) {
				if (tulip_debug > 2)
					printk(KERN_WARNING "%s: The transmitter stopped."
						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
						   dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
				tulip_restart_rxtx(tp);
			}
			spin_unlock(&tp->lock);
		}

		/* Log errors. */
		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
			if (csr5 == 0xffffffff)
				break;
			if (csr5 & TxJabber) tp->stats.tx_errors++;
			if (csr5 & TxFIFOUnderflow) {
				if ((tp->csr6 & 0xC000) != 0xC000)
					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
				else
					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
				/* Restart the transmit process. */
				tulip_restart_rxtx(tp);
				outl(0, ioaddr + CSR1);
			}
			if (csr5 & (RxDied | RxNoBuf)) {
				if (tp->flags & COMET_MAC_ADDR) {
					outl(tp->mc_filter[0], ioaddr + 0xAC);
					outl(tp->mc_filter[1], ioaddr + 0xB0);
				}
			}
			if (csr5 & RxDied) {		/* Missed a Rx frame. */
                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
#ifdef CONFIG_NET_HW_FLOWCONTROL
				if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
					tp->stats.rx_errors++;
					tulip_start_rxtx(tp);
				}
#else
				tp->stats.rx_errors++;
				tulip_start_rxtx(tp);
#endif
			}
			/*
			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
			 * call is ever done under the spinlock
			 */
			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
				if (tp->link_change)
					(tp->link_change)(dev, csr5);
			}
			if (csr5 & SytemError) {
				int error = (csr5 >> 23) & 7;
				/* oops, we hit a PCI error.  The code produced corresponds
				 * to the reason:
				 *  0 - parity error
				 *  1 - master abort
				 *  2 - target abort
				 * Note that on parity error, we should do a software reset
				 * of the chip to get it back into a sane state (according
				 * to the 21142/3 docs that is).
				 *   -- rmk
				 */
				printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
					dev->name, tp->nir, error);
			}
			/* Clear all error sources, included undocumented ones! */
			outl(0x0800f7ba, ioaddr + CSR5);
			oi++;
		}
Esempio n. 10
0
void ep1_int_hndlr()
{
	dma_addr_t dma_addr;
	unsigned int len;
	int status = Ser0UDCCS1;

	PRINTKD( "[%lu]Ep1 int %d\n", (jiffies-start_time)*10, status);
	
	if ( naking )
		printk( "%sEh? in ISR but naking = %d\n", "usbrx: ", naking );

	// Reive packet complete
	if (status & UDCCS1_RPC) {
		if (!ep1_curdmalen) {
			printk("usb_recv: RPC for non-existent buffer\n");
			naking = 1;
			return;
		}

		sa1100_stop_dma(dmachn_rx);

		if (status & UDCCS1_SST) {
			printk("usb_recv: stall sent OMP=%d\n",Ser0UDCOMP);
			UDC_flip(Ser0UDCCS1, UDCCS1_SST);
			ep1_done(-EIO); // UDC aborted current transfer, so we do
			return;
		}

		if (status & UDCCS1_RPE) {
		    printk("usb_recv: RPError %x\n", status);
			UDC_flip(Ser0UDCCS1, UDCCS1_RPC);
			ep1_done(-EIO);
			return;
		}

		dma_addr = sa1100_get_dma_pos(dmachn_rx);
		pci_unmap_single(NULL, ep1_curdmapos, ep1_curdmalen, PCI_DMA_FROMDEVICE);
		
		len = dma_addr - ep1_curdmapos;

		if (len < ep1_curdmalen) {
			char *buf = ep1_curdmabuf + len;
			while (Ser0UDCCS1 & UDCCS1_RNE) {
				if (len >= ep1_curdmalen) {
					printk("usb_recv: too much data in fifo\n");
					break;
				}
				*buf++ = Ser0UDCDR;
				len++;
			}
		} else if (Ser0UDCCS1 & UDCCS1_RNE) {
			printk("usb_recv: fifo screwed, shouldn't contain data\n");
			len = 0;
		}
		ep1_curdmalen = 0;  /* dma unmap already done */
		ep1_remain -= len;
		naking = 1;
		ep1_done((len) ? 0 : -EPIPE);
	}
	/* else, you can get here if we are holding NAK */
}
Esempio n. 11
0
static void enic_rq_indicate_buf(struct vnic_rq *rq,
	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
	int skipped, void *opaque)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
	struct net_device *netdev = enic->netdev;
	struct sk_buff *skb;

	u8 type, color, eop, sop, ingress_port, vlan_stripped;
	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
	u8 packet_error;
	u16 q_number, completed_index, bytes_written, vlan, checksum;
	u32 rss_hash;

	if (skipped)
		return;

	skb = buf->os_buf;
	prefetch(skb->data - NET_IP_ALIGN);
	pci_unmap_single(enic->pdev, buf->dma_addr,
		buf->len, PCI_DMA_FROMDEVICE);

	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
		&type, &color, &q_number, &completed_index,
		&ingress_port, &fcoe, &eop, &sop, &rss_type,
		&csum_not_calc, &rss_hash, &bytes_written,
		&packet_error, &vlan_stripped, &vlan, &checksum,
		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
		&fcs_ok);

	if (packet_error) {

		if (!fcs_ok) {
			if (bytes_written > 0)
				enic->rq_bad_fcs++;
			else if (bytes_written == 0)
				enic->rq_truncated_pkts++;
		}

		dev_kfree_skb_any(skb);

		return;
	}

	if (eop && bytes_written > 0) {

		

		skb_put(skb, bytes_written);
		skb->protocol = eth_type_trans(skb, netdev);

		if (enic->csum_rx_enabled && !csum_not_calc) {
			skb->csum = htons(checksum);
			skb->ip_summed = CHECKSUM_COMPLETE;
		}

		skb->dev = netdev;

		if (enic->vlan_group && vlan_stripped) {

			if ((netdev->features & NETIF_F_LRO) && ipv4)
				lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
					skb, enic->vlan_group,
					vlan, cq_desc);
			else
				vlan_hwaccel_receive_skb(skb,
					enic->vlan_group, vlan);

		} else {

			if ((netdev->features & NETIF_F_LRO) && ipv4)
				lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
			else
				netif_receive_skb(skb);

		}

	} else {

		

		dev_kfree_skb_any(skb);
	}
}
Esempio n. 12
0
/****************************************************************
 *	Name:	Irq_Handler	:LOCAL
 *
 *	Description:	Interrupt handler.
 *
 *	Parameters:		irq		- Hardware IRQ number.
 *					dev_id	-
 *					regs	-
 *
 *	Returns:		TRUE if drive is not ready in time.
 *
 ****************************************************************/
static void Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
	{
	struct Scsi_Host   *shost = NULL;	// Pointer to host data block
	PADAPTER2000		padapter;		// Pointer to adapter control structure
	PDEV2000			pdev;
	Scsi_Cmnd		   *SCpnt;
	UCHAR				tag = 0;
	UCHAR				tag0;
	ULONG				error;
	int					pun;
	int					bus;
	int					z;
    unsigned long		flags;

    /*
     * Disable interrupts, if they aren't already disabled and acquire
     * the I/O spinlock.
     */
    spin_lock_irqsave (&io_request_lock, flags);

	DEB(printk ("\npci2000 received interrupt "));
	for ( z = 0; z < NumAdapters;  z++ )										// scan for interrupt to process
		{
		if ( PsiHost[z]->irq == (UCHAR)(irq & 0xFF) )
			{
			tag = inb_p (HOSTDATA(PsiHost[z])->tag);
			if (  tag )
				{
				shost = PsiHost[z];
				break;
				}
			}
		}

	if ( !shost )
		{
		DEB (printk ("\npci2000: not my interrupt"));
		goto irq_return;
		}

	padapter = HOSTDATA(shost);

	tag0 = tag & 0x7F;															// mask off the error bit
	for ( bus = 0;  bus < MAX_BUS;  bus++ )										// scan the busses
    	{
		for ( pun = 0;  pun < MAX_UNITS;  pun++ )								// scan the targets
    		{
			pdev = &padapter->dev[bus][pun];
			if ( !pdev->tag )
    			continue;
			if ( pdev->tag == tag0 )											// is this it?
				{
				pdev->tag = 0;
				SCpnt = pdev->SCpnt;
				goto unmapProceed;
    			}
			}
    	}

	outb_p (0xFF, padapter->tag);												// clear the op interrupt
	outb_p (CMD_DONE, padapter->cmd);											// complete the op
	goto irq_return;;															// done, but, with what?

unmapProceed:;
	if ( !bus )
		{
		switch ( SCpnt->cmnd[0] )
			{
			case SCSIOP_TEST_UNIT_READY:
				pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, sizeof (SCpnt->sense_buffer), PCI_DMA_FROMDEVICE);
				goto irqProceed;
			case SCSIOP_READ_CAPACITY:
				pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, 8, PCI_DMA_FROMDEVICE);
				goto irqProceed;
			case SCSIOP_VERIFY:
			case SCSIOP_START_STOP_UNIT:
			case SCSIOP_MEDIUM_REMOVAL:
				goto irqProceed;
			}
		}
	if ( SCpnt->SCp.have_data_in )
		pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, SCpnt->request_bufflen, scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
	else 
		{
		if ( SCpnt->use_sg )
			pci_unmap_sg (padapter->pdev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
		}

irqProceed:;
	if ( tag & ERR08_TAGGED )												// is there an error here?
		{
		if ( WaitReady (padapter) )
			{
			OpDone (SCpnt, DID_TIME_OUT << 16);
			goto irq_return;;
			}

		outb_p (tag0, padapter->mb0);										// get real error code
		outb_p (CMD_ERROR, padapter->cmd);
		if ( WaitReady (padapter) )											// wait for controller to suck up the op
			{
			OpDone (SCpnt, DID_TIME_OUT << 16);
			goto irq_return;;
			}

		error = inl (padapter->mb0);										// get error data
		outb_p (0xFF, padapter->tag);										// clear the op interrupt
		outb_p (CMD_DONE, padapter->cmd);									// complete the op

		DEB (printk ("status: %lX ", error));
		if ( error == 0x00020002 )											// is this error a check condition?
			{
			if ( bus )														// are we doint SCSI commands?
				{
				OpDone (SCpnt, (DID_OK << 16) | 2);
				goto irq_return;;
				}
			if ( *SCpnt->cmnd == SCSIOP_TEST_UNIT_READY )
				OpDone (SCpnt, (DRIVER_SENSE << 24) | (DID_OK << 16) | 2);	// test caller we have sense data too
			else
				OpDone (SCpnt, DID_ERROR << 16);
			goto irq_return;;
			}
		OpDone (SCpnt, DID_ERROR << 16);
		goto irq_return;;
		}

	outb_p (0xFF, padapter->tag);											// clear the op interrupt
	outb_p (CMD_DONE, padapter->cmd);										// complete the op
	OpDone (SCpnt, DID_OK << 16);

irq_return:;
    /*
     * Release the I/O spinlock and restore the original flags
     * which will enable interrupts if and only if they were
     * enabled on entry.
     */
    spin_unlock_irqrestore (&io_request_lock, flags);
	}
Esempio n. 13
0
static int __init vino_init(void)
{
	unsigned long rev;
	dma_addr_t dma;
	int i, ret = 0;
	
	/* VINO is Indy specific beast */
	if (ip22_is_fullhouse())
		return -ENODEV;

	/*
	 * VINO is in the EISA address space, so the sysid register will tell
	 * us if the EISA_PRESENT pin on MC has been pulled low.
	 * 
	 * If EISA_PRESENT is not set we definitely don't have a VINO equiped
	 * system.
	 */
	if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
		printk(KERN_ERR "VINO not found\n");
		return -ENODEV;
	}

	vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino));
	if (!vino)
		return -EIO;

	/* Okay, once we know that VINO is present we'll read its revision
	 * safe way. One never knows... */
	if (get_dbe(rev, &(vino->rev_id))) {
		printk(KERN_ERR "VINO: failed to read revision register\n");
		ret = -ENODEV;
		goto out_unmap;
	}
	if (VINO_ID_VALUE(rev) != VINO_CHIP_ID) {
		printk(KERN_ERR "VINO is not VINO (Rev/ID: 0x%04lx)\n", rev);
		ret = -ENODEV;
		goto out_unmap;
	}
	printk(KERN_INFO "VINO Rev: 0x%02lx\n", VINO_REV_NUM(rev));

	Vino = (struct vino_video *)
		kmalloc(sizeof(struct vino_video), GFP_KERNEL);
	if (!Vino) {
		ret = -ENOMEM;
		goto out_unmap;
	}
	memset(Vino, 0, sizeof(struct vino_video));

	Vino->dummy_desc = get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!Vino->dummy_desc) {
		ret = -ENOMEM;
		goto out_free_vino;
	}
	Vino->dummy_dma.cpu = pci_alloc_consistent(NULL, 4 * sizeof(dma_addr_t),
						   &Vino->dummy_dma.dma);
	if (!Vino->dummy_dma.cpu) {
		ret = -ENOMEM;
		goto out_free_dummy_desc;
	}
	dma = pci_map_single(NULL, (void *)Vino->dummy_desc, PAGE_SIZE,
			     PCI_DMA_FROMDEVICE);
	for (i = 0; i < 4; i++)
		Vino->dummy_dma.cpu[i] = dma;

	vino->control = 0;
	/* prevent VINO from throwing spurious interrupts */
	vino->a.next_4_desc = Vino->dummy_dma.dma;
	vino->b.next_4_desc = Vino->dummy_dma.dma;
	udelay(5);
	vino->intr_status = 0;
        /* set threshold level */
        vino->a.fifo_thres = threshold_a;
	vino->b.fifo_thres = threshold_b;

	spin_lock_init(&Vino->vino_lock);
	spin_lock_init(&Vino->input_lock);
	init_channel_data(&Vino->chA, VINO_CHAN_A);
	init_channel_data(&Vino->chB, VINO_CHAN_B);

	if (request_irq(SGI_VINO_IRQ, vino_interrupt, 0, vinostr, NULL)) {
		printk(KERN_ERR "VINO: request irq%02d failed\n",
		       SGI_VINO_IRQ);
		ret = -EAGAIN;
		goto out_unmap_dummy_desc;
	}

	ret = vino_i2c_add_bus();
	if (ret) {
		printk(KERN_ERR "VINO: I2C bus registration failed\n");
		goto out_free_irq;
	}

	if (video_register_device(&Vino->chA.vdev, VFL_TYPE_GRABBER, -1) < 0) {
		printk("%s, chnl %d: device registration failed.\n",
			Vino->chA.vdev.name, Vino->chA.chan);
		ret = -EINVAL;
		goto out_i2c_del_bus;
	}
	if (video_register_device(&Vino->chB.vdev, VFL_TYPE_GRABBER, -1) < 0) {
		printk("%s, chnl %d: device registration failed.\n",
			Vino->chB.vdev.name, Vino->chB.chan);
		ret = -EINVAL;
		goto out_unregister_vdev;
	}

#if defined(CONFIG_KMOD) && defined (MODULE)
	request_module("saa7191");
	request_module("indycam");
#endif
	return 0;

out_unregister_vdev:
	video_unregister_device(&Vino->chA.vdev);
out_i2c_del_bus:
	vino_i2c_del_bus();
out_free_irq:
	free_irq(SGI_VINO_IRQ, NULL);
out_unmap_dummy_desc:
	pci_unmap_single(NULL, Vino->dummy_dma.dma, PAGE_SIZE,
			 PCI_DMA_FROMDEVICE);
	pci_free_consistent(NULL, 4 * sizeof(dma_addr_t),
			    (void *)Vino->dummy_dma.cpu, Vino->dummy_dma.dma);
out_free_dummy_desc:
	free_page(Vino->dummy_desc);
out_free_vino:
	kfree(Vino);
out_unmap:
	iounmap(vino);

	return ret;
}
Esempio n. 14
0
void pcie_tx_done_ndp(struct ieee80211_hw *hw)
{
	struct mwl_priv *priv = hw->priv;
	struct pcie_priv *pcie_priv = priv->hif.priv;
	struct pcie_desc_data_ndp *desc = &pcie_priv->desc_data_ndp;
	u32 tx_done_head, tx_done_tail;
	struct tx_ring_done *ptx_ring_done;
	u32 index;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct pcie_tx_ctrl_ndp *tx_ctrl;
	struct pcie_dma_data *dma_data;
	u16 hdrlen;

	spin_lock_bh(&pcie_priv->tx_desc_lock);

	tx_done_head = readl(pcie_priv->iobase1 +
			     MACREG_REG_TXDONEHEAD);
	tx_done_tail = desc->tx_done_tail & (MAX_TX_RING_DONE_SIZE - 1);
	tx_done_head &= (MAX_TX_RING_DONE_SIZE - 1);

	while (tx_done_head != tx_done_tail) {
		ptx_ring_done = &desc->ptx_ring_done[tx_done_tail];

		index = le32_to_cpu(ptx_ring_done->user);
		ptx_ring_done->user = 0;
		if (index >= MAX_TX_RING_SEND_SIZE) {
			wiphy_err(hw->wiphy,
				  "corruption for index of buffer\n");
			break;
		}
		skb = desc->tx_vbuflist[index];
		if (!skb) {
			wiphy_err(hw->wiphy,
				  "buffer is NULL for tx done ring\n");
			break;
		}
		pci_unmap_single(pcie_priv->pdev,
				 desc->pphys_tx_buflist[index],
				 skb->len,
				 PCI_DMA_TODEVICE);
		desc->pphys_tx_buflist[index] = 0;
		desc->tx_vbuflist[index] = NULL;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_ctrl = (struct pcie_tx_ctrl_ndp *)
			tx_info->status.status_driver_data;

		if (tx_ctrl->flags & TX_CTRL_TYPE_DATA) {
			dev_kfree_skb_any(skb);
			goto bypass_ack;
		} else {
			/* Remove H/W dma header */
			dma_data = (struct pcie_dma_data *)skb->data;

			if (ieee80211_is_assoc_resp(
			    dma_data->wh.frame_control) ||
			    ieee80211_is_reassoc_resp(
			    dma_data->wh.frame_control)) {
				dev_kfree_skb_any(skb);
				goto bypass_ack;
			}
			hdrlen = ieee80211_hdrlen(
				dma_data->wh.frame_control);
			memmove(dma_data->data - hdrlen,
				&dma_data->wh, hdrlen);
			skb_pull(skb, sizeof(*dma_data) - hdrlen);
		}

		pcie_tx_prepare_info(priv, 0, tx_info);
		ieee80211_tx_status(hw, skb);

bypass_ack:
		if (++tx_done_tail >= MAX_TX_RING_DONE_SIZE)
			tx_done_tail = 0;
		desc->tx_desc_busy_cnt--;
	}

	writel(tx_done_tail, pcie_priv->iobase1 +
	       MACREG_REG_TXDONETAIL);
	desc->tx_done_tail = tx_done_tail;

	spin_unlock_bh(&pcie_priv->tx_desc_lock);
}
Esempio n. 15
0
bool
device_receive_frame(
	PSDevice pDevice,
	PSRxDesc pCurrRD
)
{
	PDEVICE_RD_INFO  pRDInfo = pCurrRD->pRDInfo;
	struct net_device_stats *pStats = &pDevice->stats;
	struct sk_buff *skb;
	PSMgmtObject    pMgmt = pDevice->pMgmt;
	PSRxMgmtPacket  pRxPacket = &(pDevice->pMgmt->sRxPacket);
	PS802_11Header  p802_11Header;
	unsigned char *pbyRsr;
	unsigned char *pbyNewRsr;
	unsigned char *pbyRSSI;
	PQWORD          pqwTSFTime;
	unsigned short *pwFrameSize;
	unsigned char *pbyFrame;
	bool bDeFragRx = false;
	bool bIsWEP = false;
	unsigned int cbHeaderOffset;
	unsigned int FrameSize;
	unsigned short wEtherType = 0;
	int             iSANodeIndex = -1;
	int             iDANodeIndex = -1;
	unsigned int ii;
	unsigned int cbIVOffset;
	bool bExtIV = false;
	unsigned char *pbyRxSts;
	unsigned char *pbyRxRate;
	unsigned char *pbySQ;
	unsigned int cbHeaderSize;
	PSKeyItem       pKey = NULL;
	unsigned short wRxTSC15_0 = 0;
	unsigned long dwRxTSC47_16 = 0;
	SKeyItem        STempKey;
	// 802.11h RPI
	unsigned long dwDuration = 0;
	long            ldBm = 0;
	long            ldBmThreshold = 0;
	PS802_11Header pMACHeader;
	bool bRxeapol_key = false;

	skb = pRDInfo->skb;

//PLICE_DEBUG->
	pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma,
			 pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
//PLICE_DEBUG<-
	pwFrameSize = (unsigned short *)(skb->data + 2);
	FrameSize = cpu_to_le16(pCurrRD->m_rd1RD1.wReqCount) - cpu_to_le16(pCurrRD->m_rd0RD0.wResCount);

	// Max: 2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
	// Min (ACK): 10HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
	if ((FrameSize > 2364) || (FrameSize <= 32)) {
		// Frame Size error drop this packet.
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 1\n");
		return false;
	}

	pbyRxSts = (unsigned char *)(skb->data);
	pbyRxRate = (unsigned char *)(skb->data + 1);
	pbyRsr = (unsigned char *)(skb->data + FrameSize - 1);
	pbyRSSI = (unsigned char *)(skb->data + FrameSize - 2);
	pbyNewRsr = (unsigned char *)(skb->data + FrameSize - 3);
	pbySQ = (unsigned char *)(skb->data + FrameSize - 4);
	pqwTSFTime = (PQWORD)(skb->data + FrameSize - 12);
	pbyFrame = (unsigned char *)(skb->data + 4);

	// get packet size
	FrameSize = cpu_to_le16(*pwFrameSize);

	if ((FrameSize > 2346)|(FrameSize < 14)) { // Max: 2312Payload + 30HD +4CRC
		// Min: 14 bytes ACK
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2\n");
		return false;
	}
//PLICE_DEBUG->
	// update receive statistic counter
	STAvUpdateRDStatCounter(&pDevice->scStatistic,
				*pbyRsr,
				*pbyNewRsr,
				*pbyRxRate,
				pbyFrame,
				FrameSize);

	pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8);
//PLICE_DEBUG<-
	if (pDevice->bMeasureInProgress) {
		if ((*pbyRsr & RSR_CRCOK) != 0)
			pDevice->byBasicMap |= 0x01;

		dwDuration = (FrameSize << 4);
		dwDuration /= acbyRxRate[*pbyRxRate%MAX_RATE];
		if (*pbyRxRate <= RATE_11M) {
			if (*pbyRxSts & 0x01) {
				// long preamble
				dwDuration += 192;
			} else {
				// short preamble
				dwDuration += 96;
			}
		} else {
			dwDuration += 16;
		}
		RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm);
		ldBmThreshold = -57;
		for (ii = 7; ii > 0;) {
			if (ldBm > ldBmThreshold)
				break;

			ldBmThreshold -= 5;
			ii--;
		}
		pDevice->dwRPIs[ii] += dwDuration;
		return false;
	}

	if (!is_multicast_ether_addr(pbyFrame)) {
		if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header)(skb->data + 4))) {
			pDevice->s802_11Counter.FrameDuplicateCount++;
			return false;
		}
	}

	// Use for TKIP MIC
	s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader);

	// filter packet send from myself
	if (ether_addr_equal(pDevice->sRxEthHeader.abySrcAddr,
			     pDevice->abyCurrentNetAddr))
		return false;

	if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
		if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
			p802_11Header = (PS802_11Header)(pbyFrame);
			// get SA NodeIndex
			if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(p802_11Header->abyAddr2), &iSANodeIndex)) {
				pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies;
				pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0;
			}
		}
	}

	if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
		if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex))
			return false;
	}

	if (IS_FC_WEP(pbyFrame)) {
		bool bRxDecryOK = false;

		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx WEP pkt\n");
		bIsWEP = true;
		if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) {
			pKey = &STempKey;
			pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite;
			pKey->dwKeyIndex = pMgmt->sNodeDBTable[iSANodeIndex].dwKeyIndex;
			pKey->uKeyLength = pMgmt->sNodeDBTable[iSANodeIndex].uWepKeyLength;
			pKey->dwTSC47_16 = pMgmt->sNodeDBTable[iSANodeIndex].dwTSC47_16;
			pKey->wTSC15_0 = pMgmt->sNodeDBTable[iSANodeIndex].wTSC15_0;
			memcpy(pKey->abyKey,
			       &pMgmt->sNodeDBTable[iSANodeIndex].abyWepKey[0],
			       pKey->uKeyLength
);

			bRxDecryOK = s_bHostWepRxEncryption(pDevice,
							    pbyFrame,
							    FrameSize,
							    pbyRsr,
							    pMgmt->sNodeDBTable[iSANodeIndex].bOnFly,
							    pKey,
							    pbyNewRsr,
							    &bExtIV,
							    &wRxTSC15_0,
							    &dwRxTSC47_16);
		} else {
			bRxDecryOK = s_bHandleRxEncryption(pDevice,
							   pbyFrame,
							   FrameSize,
							   pbyRsr,
							   pbyNewRsr,
							   &pKey,
							   &bExtIV,
							   &wRxTSC15_0,
							   &dwRxTSC47_16);
		}

		if (bRxDecryOK) {
			if ((*pbyNewRsr & NEWRSR_DECRYPTOK) == 0) {
				DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ICV Fail\n");
				if ((pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
				    (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
				    (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
				    (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
				    (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
					if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP))
						pDevice->s802_11Counter.TKIPICVErrors++;
					else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
						pDevice->s802_11Counter.CCMPDecryptErrors++;
				}
				return false;
			}
		} else {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP Func Fail\n");
			return false;
		}
		if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
			FrameSize -= 8;         // Message Integrity Code
		else
			FrameSize -= 4;         // 4 is ICV
	}

	//
	// RX OK
	//
	//remove the CRC length
	FrameSize -= ETH_FCS_LEN;

	if ((!(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI))) && // unicast address
	    (IS_FRAGMENT_PKT((skb->data+4)))
) {
		// defragment
		bDeFragRx = WCTLbHandleFragment(pDevice, (PS802_11Header)(skb->data+4), FrameSize, bIsWEP, bExtIV);
		pDevice->s802_11Counter.ReceivedFragmentCount++;
		if (bDeFragRx) {
			// defrag complete
			skb = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb;
			FrameSize = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength;

		} else {
			return false;
		}
	}

// Management & Control frame Handle
	if ((IS_TYPE_DATA((skb->data+4))) == false) {
		// Handle Control & Manage Frame

		if (IS_TYPE_MGMT((skb->data+4))) {
			unsigned char *pbyData1;
			unsigned char *pbyData2;

			pRxPacket->p80211Header = (PUWLAN_80211HDR)(skb->data+4);
			pRxPacket->cbMPDULen = FrameSize;
			pRxPacket->uRSSI = *pbyRSSI;
			pRxPacket->bySQ = *pbySQ;
			HIDWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(HIDWORD(*pqwTSFTime));
			LODWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(LODWORD(*pqwTSFTime));
			if (bIsWEP) {
				// strip IV
				pbyData1 = WLAN_HDR_A3_DATA_PTR(skb->data+4);
				pbyData2 = WLAN_HDR_A3_DATA_PTR(skb->data+4) + 4;
				for (ii = 0; ii < (FrameSize - 4); ii++) {
					*pbyData1 = *pbyData2;
					pbyData1++;
					pbyData2++;
				}
			}
			pRxPacket->byRxRate = s_byGetRateIdx(*pbyRxRate);
			pRxPacket->byRxChannel = (*pbyRxSts) >> 2;

			vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket);

			// hostap Deamon handle 802.11 management
			if (pDevice->bEnableHostapd) {
				skb->dev = pDevice->apdev;
				skb->data += 4;
				skb->tail += 4;
				skb_put(skb, FrameSize);
				skb_reset_mac_header(skb);
				skb->pkt_type = PACKET_OTHERHOST;
				skb->protocol = htons(ETH_P_802_2);
				memset(skb->cb, 0, sizeof(skb->cb));
				netif_rx(skb);
				return true;
			}
		}

		return false;
	} else {
Esempio n. 16
0
int islpci_eth_receive(islpci_private * private_config)
{
    struct net_device *nw_device = private_config->my_module;
    struct net_device *wds_dev = NULL;
    struct wds_net_local *wds_lp;
    isl38xx_control_block *control_block = private_config->control_block;
#ifdef WDS_LINKS
    struct wds_priv *wdsp = private_config->wdsp;
#endif    
    struct sk_buff *skb;
    u16 size;
    u32 index, offset;
    unsigned char *src;

#if VERBOSE > SHOW_ERROR_MESSAGES 
    DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
#endif

    // the device has written an Ethernet frame in the data area
    // of the sk_buff without updating the structure, do it now
	index = private_config->free_data_rx % ISL38XX_CB_RX_QSIZE;
    size = le16_to_cpu(control_block->rx_data_low[index].size);
	skb = private_config->data_low_rx[index];
    offset = ((u32) le32_to_cpu(control_block->rx_data_low[index].address) - 
        (u32) skb->data ) & 3;

#if VERBOSE > SHOW_ERROR_MESSAGES 
    DEBUG( SHOW_TRACING, "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ", 
        control_block->rx_data_low[private_config->free_data_rx].address,
		skb->data, skb->len, offset, skb->truesize );
#endif

    // delete the streaming DMA mapping before processing the skb
    pci_unmap_single( private_config->pci_device, 
        private_config->pci_map_rx_address[index],
        MAX_FRAGMENT_SIZE+2, PCI_DMA_FROMDEVICE );

	// update the skb structure and allign the buffer
    skb_put( skb, size );
    if( offset )
    {
		// shift the buffer allocation offset bytes to get the right frame
        skb_pull( skb, 2 );
		skb_put( skb, 2 );
    }
    
#if VERBOSE > SHOW_ERROR_MESSAGES 
    // display the buffer contents for debugging
    DEBUG( SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data );
    display_buffer((char *) skb->data, skb->len );
#endif

    // check whether WDS is enabled and whether the data frame is a WDS frame

    if( init_wds )
    {
        // WDS enabled, check for the wds address on the first 6 bytes of the buffer
#ifdef WDS_LINKS
        wds_dev = wds_find_device( skb->data, wdsp ); 
#ifdef ISLPCI_ETH_DEBUG
        if ( wds_dev ) {
            printk("islpci_eth_receive:wds_find_device ! %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 
                   skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5] );
        } else {
            printk("islpci_eth_receive:wds_find_device %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 
                   skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5] );
        }
#endif
#endif
        src = skb->data+6;
        memmove( skb->data, src, skb->len-6 );
        skb_trim( skb, skb->len-6 );
    }

#if VERBOSE > SHOW_ERROR_MESSAGES 
    DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
    DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);

    // display the buffer contents for debugging
    DEBUG( SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data );
    display_buffer((char *) skb->data, skb->len );
#endif

    // do some additional sk_buff and network layer parameters
    skb->dev = nw_device;
    skb->protocol = eth_type_trans(skb, nw_device);
    skb->ip_summed = CHECKSUM_NONE;
    private_config->statistics.rx_packets++;
    private_config->statistics.rx_bytes += size;

#ifdef WDS_LINKS
        if ( wds_dev != NULL )
        {
            wds_lp = (struct wds_net_local *) wds_dev->priv;
            wds_lp->stats.rx_packets++;
            wds_lp->stats.rx_bytes += skb->len;            
            skb->dev = wds_dev;
        }
#endif
    // deliver the skb to the network layer
#ifdef ISLPCI_ETH_DEBUG
    printk("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 
           skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5] );
#endif
    netif_rx(skb);

    // increment the read index for the rx data low queue
    private_config->free_data_rx++;

    // add one or more sk_buff structures
    while( index = le32_to_cpu( 
		control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ] ), 
		index - private_config->free_data_rx < ISL38XX_CB_RX_QSIZE )
    {
        // allocate an sk_buff for received data frames storage
	    // include any required allignment operations
        if (skb = dev_alloc_skb(MAX_FRAGMENT_SIZE+2), skb == NULL)
        {
            // error allocating an sk_buff structure elements
            DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
            break;
        }

		// store the new skb structure pointer
		index = index % ISL38XX_CB_RX_QSIZE;
		private_config->data_low_rx[index] = skb;

#if VERBOSE > SHOW_ERROR_MESSAGES 
        DEBUG( SHOW_TRACING, "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ", 
            skb, skb->data, skb->len, index, skb->truesize );
#endif

        // set the streaming DMA mapping for proper PCI bus operation
		private_config->pci_map_rx_address[index] = pci_map_single(
			private_config->pci_device, (void *) skb->data, MAX_FRAGMENT_SIZE+2, 
			PCI_DMA_FROMDEVICE );
        if( private_config->pci_map_rx_address[index] == (dma_addr_t) NULL )
        {
            // error mapping the buffer to device accessable memory address
            DEBUG(SHOW_ERROR_MESSAGES, "Error mapping DMA address\n");

            // free the skbuf structure before aborting
            dev_kfree_skb((struct sk_buff *) skb );
            break;
        }

        // update the fragment address
        control_block->rx_data_low[index].address = cpu_to_le32( (u32)
		 	private_config->pci_map_rx_address[index] );

        // increment the driver read pointer
        add_le32p(&control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1 );
    }

    // trigger the device
    isl38xx_trigger_device( &private_config->powerstate,
        private_config->remapped_device_base );

    return 0;
}
Esempio n. 17
0
static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
{
	struct sk_buff *skb = si->rxskb;
	dma_addr_t dma_addr;
	unsigned int len, stat, data;

	if (!skb) {
		printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
		return;
	}

	/*
	 * Get the current data position.
	 */
	sa1100_dma_get_current(si->rxdma, NULL, &dma_addr);
	len = dma_addr - si->rxbuf_dma;
	pci_unmap_single(NULL, si->rxbuf_dma, len, PCI_DMA_FROMDEVICE);

	do {
		/*
		 * Read Status, and then Data.
		 */
		stat = Ser2HSSR1;
		rmb();
		data = Ser2HSDR;

		if (stat & (HSSR1_CRE | HSSR1_ROR)) {
			si->stats.rx_errors++;
			if (stat & HSSR1_CRE)
				si->stats.rx_crc_errors++;
			if (stat & HSSR1_ROR)
				si->stats.rx_frame_errors++;
		} else
			skb->data[len++] = data;

		/*
		 * If we hit the end of frame, there's
		 * no point in continuing.
		 */
		if (stat & HSSR1_EOF)
			break;
	} while (Ser2HSSR0 & HSSR0_EIF);

	if (stat & HSSR1_EOF) {
		si->rxskb = NULL;

		skb_put(skb, len);
		skb->dev = dev;
		skb->mac.raw = skb->data;
		skb->protocol = htons(ETH_P_IRDA);
		si->stats.rx_packets++;
		si->stats.rx_bytes += len;

		/*
		 * Before we pass the buffer up, allocate a new one.
		 */
		sa1100_irda_rx_alloc(si);

		netif_rx(skb);
	} else {
		/*
		 * Remap the buffer.
		 */
		si->rxbuf_dma = pci_map_single(NULL, si->rxskb->data,
						HPSIR_MAX_RXLEN,
						PCI_DMA_FROMDEVICE);
	}
}
Esempio n. 18
0
/*
 * Process completed qtds for a qh, issuing completions if needed.
 * When freeing:  frees qtds, unmaps buf, returns URB to driver.
 * When not freeing (queued periodic qh):  retain qtds, mapping, and urb.
 * Races up to qh->hw_current; returns number of urb completions.
 */
static int
qh_completions (
	struct ehci_hcd		*ehci,
	struct ehci_qh		*qh,
	int			freeing
) {
	struct ehci_qtd		*qtd, *last;
	struct list_head	*next, *qtd_list = &qh->qtd_list;
	int			unlink = 0, halted = 0;
	unsigned long		flags;
	int			retval = 0;

	spin_lock_irqsave (&ehci->lock, flags);
	if (unlikely (list_empty (qtd_list))) {
		spin_unlock_irqrestore (&ehci->lock, flags);
		return retval;
	}

	/* scan QTDs till end of list, or we reach an active one */
	for (qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list),
			    	last = 0, next = 0;
			next != qtd_list;
			last = qtd, qtd = list_entry (next,
						struct ehci_qtd, qtd_list)) {
		struct urb	*urb = qtd->urb;
		u32		token = 0;

		/* clean up any state from previous QTD ...*/
		if (last) {
			if (likely (last->urb != urb)) {
				/* complete() can reenter this HCD */
				spin_unlock_irqrestore (&ehci->lock, flags);
				if (likely (freeing != 0))
					ehci_urb_done (ehci, last->buf_dma,
						last->urb);
				else
					ehci_urb_complete (ehci, last->buf_dma,
						last->urb);
				spin_lock_irqsave (&ehci->lock, flags);
				retval++;
			}

			/* qh overlays can have HC's old cached copies of
			 * next qtd ptrs, if an URB was queued afterwards.
			 */
			if (cpu_to_le32 (last->qtd_dma) == qh->hw_current
					&& last->hw_next != qh->hw_qtd_next) {
				qh->hw_alt_next = last->hw_alt_next;
				qh->hw_qtd_next = last->hw_next;
			}

			if (likely (freeing != 0))
				ehci_qtd_free (ehci, last);
			last = 0;
		}
		next = qtd->qtd_list.next;

		/* QTDs at tail may be active if QH+HC are running,
		 * or when unlinking some urbs queued to this QH
		 */
		token = le32_to_cpu (qtd->hw_token);
		halted = halted
			|| (__constant_cpu_to_le32 (QTD_STS_HALT)
				& qh->hw_token) != 0
			|| (ehci->hcd.state == USB_STATE_HALT)
			|| (qh->qh_state == QH_STATE_IDLE);

		/* fault: unlink the rest, since this qtd saw an error? */
		if (unlikely ((token & QTD_STS_HALT) != 0)) {
			freeing = unlink = 1;
			/* status copied below */

		/* QH halts only because of fault (above) or unlink (here). */
		} else if (unlikely (halted != 0)) {

			/* unlinking everything because of HC shutdown? */
			if (ehci->hcd.state == USB_STATE_HALT) {
				freeing = unlink = 1;

			/* explicit unlink, maybe starting here? */
			} else if (qh->qh_state == QH_STATE_IDLE
					&& (urb->status == -ECONNRESET
						|| urb->status == -ENOENT)) {
				freeing = unlink = 1;

			/* QH halted to unlink urbs _after_ this?  */
			} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
				qtd = 0;
				continue;
			}

			/* unlink the rest?  once we start unlinking, after
			 * a fault or explicit unlink, we unlink all later
			 * urbs.  usb spec requires that.
			 */
			if (unlink && urb->status == -EINPROGRESS)
				urb->status = -ECONNRESET;

		/* Else QH is active, so we must not modify QTDs
		 * that HC may be working on.  No more qtds to check.
		 */
		} else if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
			next = qtd_list;
			qtd = 0;
			continue;
		}

		spin_lock (&urb->lock);
		qtd_copy_status (urb, qtd->length, token);
		spin_unlock (&urb->lock);

		/*
		 * NOTE:  this won't work right with interrupt urbs that
		 * need multiple qtds ... only the first scan of qh->qtd_list
		 * starts at the right qtd, yet multiple scans could happen
		 * for transfers that are scheduled across multiple uframes. 
		 * (Such schedules are not currently allowed!)
		 */
		if (likely (freeing != 0))
			list_del (&qtd->qtd_list);
		else {
			/* restore everything the HC could change
			 * from an interrupt QTD
			 */
			qtd->hw_token = (qtd->hw_token
					& __constant_cpu_to_le32 (0x8300))
				| cpu_to_le32 (qtd->length << 16)
				| __constant_cpu_to_le32 (QTD_STS_ACTIVE
					| (EHCI_TUNE_CERR << 10));
			qtd->hw_buf [0] &= ~__constant_cpu_to_le32 (0x0fff);

			/* this offset, and the length above,
			 * are likely wrong on QTDs #2..N
			 */
			qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
		}

#if 0
		if (urb->status == -EINPROGRESS)
			vdbg ("  qtd %p ok, urb %p, token %8x, len %d",
				qtd, urb, token, urb->actual_length);
		else
			vdbg ("urb %p status %d, qtd %p, token %8x, len %d",
				urb, urb->status, qtd, token,
				urb->actual_length);
#endif

		/* SETUP for control urb? */
		if (unlikely (QTD_PID (token) == 2))
			pci_unmap_single (ehci->hcd.pdev,
				qtd->buf_dma, sizeof (devrequest),
				PCI_DMA_TODEVICE);
	}

	/* patch up list head? */
	if (unlikely (halted && !list_empty (qtd_list))) {
		qh_update (qh, list_entry (qtd_list->next,
				struct ehci_qtd, qtd_list));
	}
Esempio n. 19
0
/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_ring[entry].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while (--work_limit >= 0) {
		struct w840_rx_desc *desc = np->rx_head_desc;
		s32 status = le32_to_cpu(desc->status);

		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   status);
		if (status < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
						   "multiple buffers, entry %#x status %4.4x!\n",
						   dev->name, np->cur_rx, status);
					np->stats.rx_length_errors++;
				}
			} else if (status & 0x8000) {
				/* There was a fatal error. */
				if (debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				np->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) np->stats.rx_length_errors++;
				if (status & 0x004C) np->stats.rx_frame_errors++;
				if (status & 0x0002) np->stats.rx_crc_errors++;
			}
		} else {
			struct sk_buff *skb;
			/* Omit the four octet CRC from the length. */
			int pkt_len = ((status >> 16) & 0x7ff) - 4;

#ifndef final_version
			if (debug > 4)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
					   " status %x.\n", pkt_len, status);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(np->pdev,np->rx_addr[entry],
							np->rx_skbuff[entry]->len,
							PCI_DMA_FROMDEVICE);
				/* Call copy + cksum if available. */
#if HAS_IP_COPYSUM
				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
					   pkt_len);
#endif
			} else {
				pci_unmap_single(np->pdev,np->rx_addr[entry],
							np->rx_skbuff[entry]->len,
							PCI_DMA_FROMDEVICE);
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
#ifndef final_version				/* Remove after testing. */
			/* You will want this info for the initial debug. */
			if (debug > 5)
				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
					   "%d.%d.%d.%d.\n",
					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
					   skb->data[8], skb->data[9], skb->data[10],
					   skb->data[11], skb->data[12], skb->data[13],
					   skb->data[14], skb->data[15], skb->data[16],
					   skb->data[17]);
#endif
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);
			dev->last_rx = jiffies;
			np->stats.rx_packets++;
			np->stats.rx_bytes += pkt_len;
		}
		entry = (++np->cur_rx) % RX_RING_SIZE;
		np->rx_head_desc = &np->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
		struct sk_buff *skb;
		entry = np->dirty_rx % RX_RING_SIZE;
		if (np->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(np->rx_buf_sz);
			np->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;			/* Better luck next round. */
			skb->dev = dev;			/* Mark as being used by this device. */
			np->rx_addr[entry] = pci_map_single(np->pdev,
							skb->tail,
							skb->len, PCI_DMA_FROMDEVICE);
			np->rx_ring[entry].buffer1 = cpu_to_le32(np->rx_addr[entry]);
		}
		wmb();
		np->rx_ring[entry].status = cpu_to_le32(DescOwn);
	}

	return 0;
}
Esempio n. 20
0
File: c2.c Progetto: mecke/linux-2.6
static void c2_rx_interrupt(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_ring *rx_ring = &c2_port->rx_ring;
	struct c2_element *elem;
	struct c2_rx_desc *rx_desc;
	struct c2_rxp_hdr *rxp_hdr;
	struct sk_buff *skb;
	dma_addr_t mapaddr;
	u32 maplen, buflen;
	unsigned long flags;

	spin_lock_irqsave(&c2dev->lock, flags);

	/* Begin where we left off */
	rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;

	for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
	     elem = elem->next) {
		rx_desc = elem->ht_desc;
		mapaddr = elem->mapaddr;
		maplen = elem->maplen;
		skb = elem->skb;
		rxp_hdr = (struct c2_rxp_hdr *) skb->data;

		if (rxp_hdr->flags != RXP_HRXD_DONE)
			break;
		buflen = rxp_hdr->len;

		/* Sanity check the RXP header */
		if (rxp_hdr->status != RXP_HRXD_OK ||
		    buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
			c2_rx_error(c2_port, elem);
			continue;
		}

		/*
		 * Allocate and map a new skb for replenishing the host
		 * RX desc
		 */
		if (c2_rx_alloc(c2_port, elem)) {
			c2_rx_error(c2_port, elem);
			continue;
		}

		/* Unmap the old skb */
		pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
				 PCI_DMA_FROMDEVICE);

		prefetch(skb->data);

		/*
		 * Skip past the leading 8 bytes comprising of the
		 * "struct c2_rxp_hdr", prepended by the adapter
		 * to the usual Ethernet header ("struct ethhdr"),
		 * to the start of the raw Ethernet packet.
		 *
		 * Fix up the various fields in the sk_buff before
		 * passing it up to netif_rx(). The transfer size
		 * (in bytes) specified by the adapter len field of
		 * the "struct rxp_hdr_t" does NOT include the
		 * "sizeof(struct c2_rxp_hdr)".
		 */
		skb->data += sizeof(*rxp_hdr);
		skb_set_tail_pointer(skb, buflen);
		skb->len = buflen;
		skb->protocol = eth_type_trans(skb, netdev);

		netif_rx(skb);

		netdev->last_rx = jiffies;
		netdev->stats.rx_packets++;
		netdev->stats.rx_bytes += buflen;
	}

	/* Save where we left off */
	rx_ring->to_clean = elem;
	c2dev->cur_rx = elem - rx_ring->start;
	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);

	spin_unlock_irqrestore(&c2dev->lock, flags);
}
Esempio n. 21
0
File: dl2k.c Progetto: Lyude/linux
static int
receive_packet (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int entry = np->cur_rx % RX_RING_SIZE;
	int cnt = 30;

	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
	while (1) {
		struct netdev_desc *desc = &np->rx_ring[entry];
		int pkt_len;
		u64 frame_status;

		if (!(desc->status & cpu_to_le64(RFDDone)) ||
		    !(desc->status & cpu_to_le64(FrameStart)) ||
		    !(desc->status & cpu_to_le64(FrameEnd)))
			break;

		/* Chip omits the CRC. */
		frame_status = le64_to_cpu(desc->status);
		pkt_len = frame_status & 0xffff;
		if (--cnt < 0)
			break;
		/* Update rx error statistics, drop packet. */
		if (frame_status & RFS_Errors) {
			dev->stats.rx_errors++;
			if (frame_status & (RxRuntFrame | RxLengthError))
				dev->stats.rx_length_errors++;
			if (frame_status & RxFCSError)
				dev->stats.rx_crc_errors++;
			if (frame_status & RxAlignmentError && np->speed != 1000)
				dev->stats.rx_frame_errors++;
			if (frame_status & RxFIFOOverrun)
				dev->stats.rx_fifo_errors++;
		} else {
			struct sk_buff *skb;

			/* Small skbuffs for short packets */
			if (pkt_len > copy_thresh) {
				pci_unmap_single (np->pdev,
						  desc_to_dma(desc),
						  np->rx_buf_sz,
						  PCI_DMA_FROMDEVICE);
				skb_put (skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
				pci_dma_sync_single_for_cpu(np->pdev,
							    desc_to_dma(desc),
							    np->rx_buf_sz,
							    PCI_DMA_FROMDEVICE);
				skb_copy_to_linear_data (skb,
						  np->rx_skbuff[entry]->data,
						  pkt_len);
				skb_put (skb, pkt_len);
				pci_dma_sync_single_for_device(np->pdev,
							       desc_to_dma(desc),
							       np->rx_buf_sz,
							       PCI_DMA_FROMDEVICE);
			}
			skb->protocol = eth_type_trans (skb, dev);
#if 0
			/* Checksum done by hw, but csum value unavailable. */
			if (np->pdev->pci_rev_id >= 0x0c &&
				!(frame_status & (TCPError | UDPError | IPError))) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			}
#endif
			netif_rx (skb);
		}
		entry = (entry + 1) % RX_RING_SIZE;
	}
	spin_lock(&np->rx_lock);
	np->cur_rx = entry;
	/* Re-allocate skbuffs to fill the descriptor ring */
	entry = np->old_rx;
	while (entry != np->cur_rx) {
		struct sk_buff *skb;
		/* Dropped packets don't need to re-allocate */
		if (np->rx_skbuff[entry] == NULL) {
			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
			if (skb == NULL) {
				np->rx_ring[entry].fraginfo = 0;
				printk (KERN_INFO
					"%s: receive_packet: "
					"Unable to re-allocate Rx skbuff.#%d\n",
					dev->name, entry);
				break;
			}
			np->rx_skbuff[entry] = skb;
			np->rx_ring[entry].fraginfo =
			    cpu_to_le64 (pci_map_single
					 (np->pdev, skb->data, np->rx_buf_sz,
					  PCI_DMA_FROMDEVICE));
		}
		np->rx_ring[entry].fraginfo |=
		    cpu_to_le64((u64)np->rx_buf_sz << 48);
		np->rx_ring[entry].status = 0;
		entry = (entry + 1) % RX_RING_SIZE;
	}
	np->old_rx = entry;
	spin_unlock(&np->rx_lock);
	return 0;
}
Esempio n. 22
0
/*
 *  Generate beacon frame and queue cab data for a vap.
 *
 *  Updates the contents of the beacon frame.  It is assumed that the buffer for
 *  the beacon frame has been allocated in the ATH object, and simply needs to
 *  be filled for this cycle.  Also, any CAB (crap after beacon?) traffic will
 *  be added to the beacon frame at this point.
*/
static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
{
	struct ath_buf *bf;
	struct ath_vap *avp;
	struct sk_buff *skb;
	struct ath_txq *cabq;
	struct ieee80211_tx_info *info;
	int cabq_depth;

	avp = sc->sc_vaps[if_id];
	ASSERT(avp);

	cabq = sc->sc_cabq;

	if (avp->av_bcbuf == NULL) {
		DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
			__func__, avp, avp->av_bcbuf);
		return NULL;
	}

	bf = avp->av_bcbuf;
	skb = (struct sk_buff *)bf->bf_mpdu;
	if (skb) {
		pci_unmap_single(sc->pdev, bf->bf_dmacontext,
				 skb_end_pointer(skb) - skb->head,
				 PCI_DMA_TODEVICE);
	}

	skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
	bf->bf_mpdu = skb;
	if (skb == NULL)
		return NULL;

	info = IEEE80211_SKB_CB(skb);
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		/*
		 * TODO: make sure the seq# gets assigned properly (vs. other
		 * TX frames)
		 */
		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
		sc->seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
	}

	bf->bf_buf_addr = bf->bf_dmacontext =
		pci_map_single(sc->pdev, skb->data,
			       skb_end_pointer(skb) - skb->head,
			       PCI_DMA_TODEVICE);

	skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);

	/*
	 * if the CABQ traffic from previous DTIM is pending and the current
	 *  beacon is also a DTIM.
	 *  1) if there is only one vap let the cab traffic continue.
	 *  2) if there are more than one vap and we are using staggered
	 *     beacons, then drain the cabq by dropping all the frames in
	 *     the cabq so that the current vaps cab traffic can be scheduled.
	 */
	spin_lock_bh(&cabq->axq_lock);
	cabq_depth = cabq->axq_depth;
	spin_unlock_bh(&cabq->axq_lock);

	if (skb && cabq_depth) {
		/*
		 * Unlock the cabq lock as ath_tx_draintxq acquires
		 * the lock again which is a common function and that
		 * acquires txq lock inside.
		 */
		if (sc->sc_nvaps > 1) {
			ath_tx_draintxq(sc, cabq, false);
			DPRINTF(sc, ATH_DBG_BEACON,
				"%s: flush previous cabq traffic\n", __func__);
		}
	}

	/* Construct tx descriptor. */
	ath_beacon_setup(sc, avp, bf);

	/*
	 * Enable the CAB queue before the beacon queue to
	 * insure cab frames are triggered by this beacon.
	 */
	while (skb) {
		ath_tx_cabq(sc, skb);
		skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
	}

	return bf;
}
Esempio n. 23
0
/**
 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
 * @tx_queue:		Efx TX queue
 * @skb:		Socket buffer
 *
 * Context: You must hold netif_tx_lock() to call this function.
 *
 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
 * @skb was not enqueued.  In all cases @skb is consumed.  Return
 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
 */
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
			       struct sk_buff *skb)
{
	struct efx_nic *efx = tx_queue->efx;
	int frag_i, rc, rc2 = NETDEV_TX_OK;
	struct tso_state state;

	/* Find the packet protocol and sanity-check it */
	state.protocol = efx_tso_check_protocol(skb);

	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);

	tso_start(&state, skb);

	/* Assume that skb header area contains exactly the headers, and
	 * all payload is in the frag list.
	 */
	if (skb_headlen(skb) == state.header_len) {
		/* Grab the first payload fragment. */
		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
		frag_i = 0;
		rc = tso_get_fragment(&state, efx,
				      skb_shinfo(skb)->frags + frag_i);
		if (rc)
			goto mem_err;
	} else {
		rc = tso_get_head_fragment(&state, efx, skb);
		if (rc)
			goto mem_err;
		frag_i = -1;
	}

	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
		goto mem_err;

	while (1) {
		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
		if (unlikely(rc)) {
			rc2 = NETDEV_TX_BUSY;
			goto unwind;
		}

		/* Move onto the next fragment? */
		if (state.in_len == 0) {
			if (++frag_i >= skb_shinfo(skb)->nr_frags)
				/* End of payload reached. */
				break;
			rc = tso_get_fragment(&state, efx,
					      skb_shinfo(skb)->frags + frag_i);
			if (rc)
				goto mem_err;
		}

		/* Start at new packet? */
		if (state.packet_space == 0 &&
		    tso_start_new_packet(tx_queue, skb, &state) < 0)
			goto mem_err;
	}

	/* Pass off to hardware */
	efx_nic_push_buffers(tx_queue);

	tx_queue->tso_bursts++;
	return NETDEV_TX_OK;

 mem_err:
	netif_err(efx, tx_err, efx->net_dev,
		  "Out of memory for TSO headers, or PCI mapping error\n");
	dev_kfree_skb_any(skb);

 unwind:
	/* Free the DMA mapping we were in the process of writing out */
	if (state.unmap_len) {
		if (state.unmap_single)
			pci_unmap_single(efx->pci_dev, state.unmap_addr,
					 state.unmap_len, PCI_DMA_TODEVICE);
		else
			pci_unmap_page(efx->pci_dev, state.unmap_addr,
				       state.unmap_len, PCI_DMA_TODEVICE);
	}

	efx_enqueue_unwind(tx_queue);
	return rc2;
}
Esempio n. 24
0
static int mic_psmi_alloc_buffer(mic_ctx_t *mic_ctx)
{
	int i, j, ret;
	void *va;
	dma_addr_t dma_hndl;
	struct mic_psmi_ctx *psmi_ctx = &mic_ctx->bi_psmi;

	/* allocate psmi page tables */
	psmi_ctx->nr_dma_pages =
		ALIGN(psmi_ctx->dma_mem_size, 
				MIC_PSMI_PAGE_SIZE) / MIC_PSMI_PAGE_SIZE;
	if ((psmi_ctx->va_tbl =
		kmalloc(psmi_ctx->nr_dma_pages *
				sizeof(struct mic_psmi_pte), GFP_KERNEL)) == NULL) {
		printk("mic: psmi va table alloc failed\n");
		return -ENOMEM;
	}
	psmi_ctx->dma_tbl_size =
		(psmi_ctx->nr_dma_pages + 2) * sizeof(struct mic_psmi_pte);
	if ((psmi_ctx->dma_tbl =
			kmalloc(psmi_ctx->dma_tbl_size, GFP_KERNEL)) == NULL) {
		printk("mic: psmi dma table alloc failed\n");
		ret = -ENOMEM;
		goto free_va_tbl;
	}
	psmi_ctx->dma_tbl_hndl =
		pci_map_single(mic_ctx->bi_pdev, 
			psmi_ctx->dma_tbl, psmi_ctx->dma_tbl_size, PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(mic_ctx->bi_pdev, 
						psmi_ctx->dma_tbl_hndl)) {
		printk("mic: psmi dma table mapping failed\n");
		ret = -ENOMEM;
		goto free_dma_tbl;
	}

	/* allocate psmi pages */
	for (i = 0; i < psmi_ctx->nr_dma_pages; i++) {
		if ((va = (void *)__get_free_pages(
				GFP_KERNEL | __GFP_HIGHMEM,
					MIC_PSMI_PAGE_ORDER)) == NULL) {
			printk("mic: psmi page alloc failed: %d\n", i);
			ret = -ENOMEM;
			goto free_ptes;
		}
		memset(va, 0, MIC_PSMI_PAGE_SIZE);
		dma_hndl = pci_map_single(mic_ctx->bi_pdev, va, 
						MIC_PSMI_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(mic_ctx->bi_pdev, dma_hndl)) {
			printk("mic: psmi page mapping failed: %d\n", i);
			free_pages((unsigned long)va, MIC_PSMI_PAGE_ORDER);
			ret = -ENOMEM;
			goto free_ptes;
		}
		psmi_ctx->dma_tbl[i + 1].pa = dma_hndl;
		psmi_ctx->va_tbl[i].pa = (uint64_t)va;
	}
	psmi_ctx->dma_tbl[0].pa = MIC_PSMI_SIGNATURE;
	psmi_ctx->dma_tbl[psmi_ctx->nr_dma_pages + 1].pa = MIC_PSMI_SIGNATURE;
	printk("mic: psmi #%d, %ld bytes, "
			"dma_tbl va=0x%lx hndl=0x%lx\n", mic_ctx->bi_id + 1, 
			(unsigned long)psmi_ctx->dma_mem_size, 
			(unsigned long)psmi_ctx->dma_tbl, 
			(unsigned long)psmi_ctx->dma_tbl_hndl);
	return 0;
free_ptes:
	for (j = 1; j < i; j++)
		mic_psmi_free_pte(mic_ctx, j);
	pci_unmap_single(mic_ctx->bi_pdev, 
		psmi_ctx->dma_tbl_hndl, psmi_ctx->dma_tbl_size, PCI_DMA_BIDIRECTIONAL);
free_dma_tbl:
	kfree(psmi_ctx->dma_tbl);
	psmi_ctx->dma_tbl = NULL;
free_va_tbl:
	kfree(psmi_ctx->va_tbl);
	psmi_ctx->va_tbl = NULL;
	return ret;
}
Esempio n. 25
0
void
_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
                   int direction)
{
	return pci_unmap_single(dev, dma_addr, size, direction);
}
Esempio n. 26
0
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
				struct mlx4_en_tx_ring *ring,
				int index, u8 owner)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
	struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
	struct sk_buff *skb = tx_info->skb;
	struct skb_frag_struct *frag;
	void *end = ring->buf + ring->buf_size;
	int frags = skb_shinfo(skb)->nr_frags;
	int i;
	__be32 *ptr = (__be32 *)tx_desc;
	__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));

	/* Optimize the common case when there are no wraparounds */
	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
		if (!tx_info->inl) {
			if (tx_info->linear) {
				pci_unmap_single(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data->addr),
					 be32_to_cpu(data->byte_count),
					 PCI_DMA_TODEVICE);
				++data;
			}

			for (i = 0; i < frags; i++) {
				frag = &skb_shinfo(skb)->frags[i];
				pci_unmap_page(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data[i].addr),
					frag->size, PCI_DMA_TODEVICE);
			}
		}
		/* Stamp the freed descriptor */
		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
			*ptr = stamp;
			ptr += STAMP_DWORDS;
		}

	} else {
		if (!tx_info->inl) {
			if ((void *) data >= end) {
				data = ring->buf + ((void *)data - end);
			}

			if (tx_info->linear) {
				pci_unmap_single(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data->addr),
					 be32_to_cpu(data->byte_count),
					 PCI_DMA_TODEVICE);
				++data;
			}

			for (i = 0; i < frags; i++) {
				/* Check for wraparound before unmapping */
				if ((void *) data >= end)
					data = ring->buf;
				frag = &skb_shinfo(skb)->frags[i];
				pci_unmap_page(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data->addr),
					 frag->size, PCI_DMA_TODEVICE);
				++data;
			}
		}
		/* Stamp the freed descriptor */
		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
			*ptr = stamp;
			ptr += STAMP_DWORDS;
			if ((void *) ptr >= end) {
				ptr = ring->buf;
				stamp ^= cpu_to_be32(0x80000000);
			}
		}

	}
	dev_kfree_skb_any(skb);
	return tx_info->nr_txbb;
}
Esempio n. 27
0
/* NOTE: doesn't handle new transfer while old is running. */
int dma_init_kthread(void *data) {

  ssize_t last_size;
  last_size = 0;
  
  while ( !kthread_should_stop() ) {

    /* done with previous transfer */
    total_size -= dma_size;

    /* unmap last DMA mapping */
    pci_unmap_single(dev_ptr, dma_bus_addr, 
		     dma_size, PCI_DMA_TODEVICE);

    if ( total_size > 0 ) {
      
      /* update transfered size thus far */
      last_size += dma_size;

      /* assign next transfer size */
      dma_size = MIN(total_size, 4 * ALMOST_EMPTY * 1024);

      /* map next DMA buffer */
      dma_bus_addr = pci_map_single(dev_ptr, dma_virt_addr + last_size, 
				    dma_size, PCI_DMA_TODEVICE);

     #if DEBUG != 0
      printk(KERN_DEBUG "NEXT DMA TRANSFER OF SIZE %u, "
	     "offset last_size %u, DELAY %u microseconds\n", 
	     (unsigned)dma_size, (unsigned)last_size, 
	     (unsigned)dma_delay);
     #endif
    }  
    else {
      kfree(dma_virt_addr);
      goto sleep;
    }

    /* wait for FIFO to deplete to start transfer */
    if ( dma_delay ) {
      dma_delay /= 1000; /* ns -> ~us */
      usleep_range(dma_delay - 1, dma_delay);
    }

    /* clear interrupts and disable DMA */
    iowrite8(0x08, timing_card[12].base + 0xa9);
    iowrite8(0x00, timing_card[12].base + 0xa9);

    /* Mode - 32 bit bus, don't increment local addr, enable interrupt */
    iowrite32(cpu_to_le32(0x00020c01),   timing_card[12].base + 0x94); 

    /* PCI and local bus addresses, transfer count, transfer direction */
    iowrite32(cpu_to_le32(dma_bus_addr), timing_card[12].base + 0x98);
    iowrite32(cpu_to_le32(0x14),         timing_card[12].base + 0x9c);
    iowrite32(cpu_to_le32(dma_size),     timing_card[12].base + 0xa0);
    iowrite32(0x00,                      timing_card[12].base + 0xa4);

    /* Enable DMA */
    iowrite8( 0x01, timing_card[12].base + 0xa9);
 
    /* Start DMA, record start time */
    iowrite8( 0x03, timing_card[12].base + 0xa9);
    start_ns = ktime_to_ns(ktime_get());
    
   sleep:
    set_current_state(TASK_INTERRUPTIBLE);
    schedule();
  }
  
  /* I am dying now */
  return 0;
} /* end kthread function */
Esempio n. 28
0
struct net_device_stats *benet_get_stats(struct net_device *dev)
{
	struct be_net_object *pnob = netdev_priv(dev);
	struct be_adapter *adapter = pnob->adapter;
	u64 pa;
	struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;

	if (adapter->dev_state != BE_DEV_STATE_OPEN) {
		/* Return previously read stats */
		return &(adapter->benet_stats);
	}
	/* Get Physical Addr */
	pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
			    sizeof(struct FWCMD_ETH_GET_STATISTICS),
			    PCI_DMA_FROMDEVICE);
	ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
	atomic_inc(&ctxt->get_stat_flag);

	be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
				    cpu_to_le64(pa), be_get_stat_cb, ctxt,
				    NULL);

	ctxt->get_stats_timer.data = (unsigned long)ctxt;
	mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
	down((void *)ctxt->get_stat_sem_addr);	/* callback will unblock us */

	/* Adding port0 and port1 stats. */
	adapter->benet_stats.rx_packets =
	    adapter->eth_statsp->params.response.p0recvdtotalframes +
	    adapter->eth_statsp->params.response.p1recvdtotalframes;
	adapter->benet_stats.tx_packets =
	    adapter->eth_statsp->params.response.p0xmitunicastframes +
	    adapter->eth_statsp->params.response.p1xmitunicastframes;
	adapter->benet_stats.tx_bytes =
	    adapter->eth_statsp->params.response.p0xmitbyteslsd +
	    adapter->eth_statsp->params.response.p1xmitbyteslsd;
	adapter->benet_stats.rx_errors =
	    adapter->eth_statsp->params.response.p0crcerrors +
	    adapter->eth_statsp->params.response.p1crcerrors;
	adapter->benet_stats.rx_errors +=
	    adapter->eth_statsp->params.response.p0alignmentsymerrs +
	    adapter->eth_statsp->params.response.p1alignmentsymerrs;
	adapter->benet_stats.rx_errors +=
	    adapter->eth_statsp->params.response.p0inrangelenerrors +
	    adapter->eth_statsp->params.response.p1inrangelenerrors;
	adapter->benet_stats.rx_bytes =
	    adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
	    adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
	adapter->benet_stats.rx_crc_errors =
	    adapter->eth_statsp->params.response.p0crcerrors +
	    adapter->eth_statsp->params.response.p1crcerrors;

	adapter->benet_stats.tx_packets +=
	    adapter->eth_statsp->params.response.p0xmitmulticastframes +
	    adapter->eth_statsp->params.response.p1xmitmulticastframes;
	adapter->benet_stats.tx_packets +=
	    adapter->eth_statsp->params.response.p0xmitbroadcastframes +
	    adapter->eth_statsp->params.response.p1xmitbroadcastframes;
	adapter->benet_stats.tx_errors = 0;

	adapter->benet_stats.multicast =
	    adapter->eth_statsp->params.response.p0xmitmulticastframes +
	    adapter->eth_statsp->params.response.p1xmitmulticastframes;

	adapter->benet_stats.rx_fifo_errors =
	    adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
	    adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
	adapter->benet_stats.rx_frame_errors =
	    adapter->eth_statsp->params.response.p0alignmentsymerrs +
	    adapter->eth_statsp->params.response.p1alignmentsymerrs;
	adapter->benet_stats.rx_length_errors =
	    adapter->eth_statsp->params.response.p0inrangelenerrors +
	    adapter->eth_statsp->params.response.p1inrangelenerrors;
	adapter->benet_stats.rx_length_errors +=
	    adapter->eth_statsp->params.response.p0outrangeerrors +
	    adapter->eth_statsp->params.response.p1outrangeerrors;
	adapter->benet_stats.rx_length_errors +=
	    adapter->eth_statsp->params.response.p0frametoolongerrors +
	    adapter->eth_statsp->params.response.p1frametoolongerrors;

	pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
			 sizeof(struct FWCMD_ETH_GET_STATISTICS),
			 PCI_DMA_FROMDEVICE);
	return &(adapter->benet_stats);

}
Esempio n. 29
0
static void rtl8180_handle_rx(struct ieee80211_hw *dev)
{
	struct rtl8180_priv *priv = dev->priv;
	unsigned int count = 32;

	while (count--) {
		struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
		struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
		u32 flags = le32_to_cpu(entry->flags);

		if (flags & RTL818X_RX_DESC_FLAG_OWN)
			return;

		if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
				      RTL818X_RX_DESC_FLAG_FOF |
				      RTL818X_RX_DESC_FLAG_RX_ERR)))
			goto done;
		else {
			u32 flags2 = le32_to_cpu(entry->flags2);
			struct ieee80211_rx_status rx_status = {0};
			struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE);

			if (unlikely(!new_skb))
				goto done;

			pci_unmap_single(priv->pdev,
					 *((dma_addr_t *)skb->cb),
					 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
			skb_put(skb, flags & 0xFFF);

			rx_status.antenna = (flags2 >> 15) & 1;
			/* TODO: improve signal/rssi reporting */
			rx_status.qual = flags2 & 0xFF;
			rx_status.signal = (flags2 >> 8) & 0x7F;
			/* XXX: is this correct? */
			rx_status.rate_idx = (flags >> 20) & 0xF;
			rx_status.freq = dev->conf.channel->center_freq;
			rx_status.band = dev->conf.channel->band;
			rx_status.mactime = le64_to_cpu(entry->tsft);
			rx_status.flag |= RX_FLAG_TSFT;
			if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
				rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;

			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
			ieee80211_rx_irqsafe(dev, skb);

			skb = new_skb;
			priv->rx_buf[priv->rx_idx] = skb;
			*((dma_addr_t *) skb->cb) =
				pci_map_single(priv->pdev, skb_tail_pointer(skb),
					       MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
		}

	done:
		entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
		entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
					   MAX_RX_SIZE);
		if (priv->rx_idx == 31)
			entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
		priv->rx_idx = (priv->rx_idx + 1) % 32;
	}
}
Esempio n. 30
0
static int cp_rx_poll (struct net_device *dev, int *budget)
{
	struct cp_private *cp = netdev_priv(dev);
	unsigned rx_tail = cp->rx_tail;
	unsigned rx_work = dev->quota;
	unsigned rx;

rx_status_loop:
	rx = 0;
	cpw16(IntrStatus, cp_rx_intr_mask);

	while (1) {
		u32 status, len;
		dma_addr_t mapping;
		struct sk_buff *skb, *new_skb;
		struct cp_desc *desc;
		unsigned buflen;

		skb = cp->rx_skb[rx_tail].skb;
		if (!skb)
			BUG();

		desc = &cp->rx_ring[rx_tail];
		status = le32_to_cpu(desc->opts1);
		if (status & DescOwn)
			break;

		len = (status & 0x1fff) - 4;
		mapping = cp->rx_skb[rx_tail].mapping;

		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
			/* we don't support incoming fragmented frames.
			 * instead, we attempt to ensure that the
			 * pre-allocated RX skbs are properly sized such
			 * that RX fragments are never encountered
			 */
			cp_rx_err_acct(cp, rx_tail, status, len);
			cp->net_stats.rx_dropped++;
			cp->cp_stats.rx_frags++;
			goto rx_next;
		}

		if (status & (RxError | RxErrFIFO)) {
			cp_rx_err_acct(cp, rx_tail, status, len);
			goto rx_next;
		}

		if (netif_msg_rx_status(cp))
			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
			       cp->dev->name, rx_tail, status, len);

		buflen = cp->rx_buf_sz + RX_OFFSET;
		new_skb = dev_alloc_skb (buflen);
		if (!new_skb) {
			cp->net_stats.rx_dropped++;
			goto rx_next;
		}

		skb_reserve(new_skb, RX_OFFSET);
		new_skb->dev = cp->dev;

		pci_unmap_single(cp->pdev, mapping,
				 buflen, PCI_DMA_FROMDEVICE);

		/* Handle checksum offloading for incoming packets. */
		if (cp_rx_csum_ok(status))
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;

		skb_put(skb, len);

		mapping =
		cp->rx_skb[rx_tail].mapping =
			pci_map_single(cp->pdev, new_skb->tail,
				       buflen, PCI_DMA_FROMDEVICE);
		cp->rx_skb[rx_tail].skb = new_skb;

		cp_rx_skb(cp, skb, desc);
		rx++;

rx_next:
		cp->rx_ring[rx_tail].opts2 = 0;
		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
		if (rx_tail == (CP_RX_RING_SIZE - 1))
			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
						  cp->rx_buf_sz);
		else
			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
		rx_tail = NEXT_RX(rx_tail);

		if (!rx_work--)
			break;
	}

	cp->rx_tail = rx_tail;

	dev->quota -= rx;
	*budget -= rx;

	/* if we did not reach work limit, then we're done with
	 * this round of polling
	 */
	if (rx_work) {
		if (cpr16(IntrStatus) & cp_rx_intr_mask)
			goto rx_status_loop;

		local_irq_disable();
		cpw16_f(IntrMask, cp_intr_mask);
		__netif_rx_complete(dev);
		local_irq_enable();

		return 0;	/* done */
	}

	return 1;		/* not done */
}