Example #1
0
/* Transmit DMA interrupt service */
static inline void sca_tx_intr(port_t *port)
{
	struct net_device *dev = port_to_dev(port);
	u16 dmac = get_dmac_tx(port);
	card_t* card = port_to_card(port);
	u8 stat;

	spin_lock(&port->lock);

	stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */

	/* Reset DSR status bits */
	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
		DSR_TX(phy_node(port)), card);

	while (1) {
		pkt_desc __iomem *desc;

		u32 desc_off = desc_offset(port, port->txlast, 1);
		u32 cda = sca_inw(dmac + CDAL, card);
		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
			break;	/* Transmitter is/will_be sending this frame */

		desc = desc_address(port, port->txlast, 1);
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += readw(&desc->len);
		writeb(0, &desc->stat);	/* Free descriptor */
		port->txlast = next_desc(port, port->txlast, 1);
	}

	netif_wake_queue(dev);
	spin_unlock(&port->lock);
}
static inline void sca_rx_intr(port_t *port)
{
    struct net_device *dev = port_to_dev(port);
    u16 dmac = get_dmac_rx(port);
    card_t *card = port_to_card(port);
    u8 stat = sca_in(DSR_RX(phy_node(port)), card);


    sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
            DSR_RX(phy_node(port)), card);

    if (stat & DSR_BOF)

        dev->stats.rx_over_errors++;

    while (1) {
        u32 desc_off = desc_offset(port, port->rxin, 0);
        pkt_desc __iomem *desc;
        u32 cda = sca_inw(dmac + CDAL, card);

        if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
            break;

        desc = desc_address(port, port->rxin, 0);
        stat = readb(&desc->stat);
        if (!(stat & ST_RX_EOM))
            port->rxpart = 1;
        else if ((stat & ST_ERROR_MASK) || port->rxpart) {
            dev->stats.rx_errors++;
            if (stat & ST_RX_OVERRUN)
                dev->stats.rx_fifo_errors++;
            else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
                              ST_RX_RESBIT)) || port->rxpart)
                dev->stats.rx_frame_errors++;
            else if (stat & ST_RX_CRC)
                dev->stats.rx_crc_errors++;
            if (stat & ST_RX_EOM)
                port->rxpart = 0;
        } else
            sca_rx(card, port, desc, port->rxin);


        sca_outw(desc_off, dmac + EDAL, card);
        port->rxin = next_desc(port, port->rxin, 0);
    }


    sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
Example #3
0
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
	struct net_device *dev = port_to_dev(port);
	u16 dmac = get_dmac_rx(port);
	card_t *card = port_to_card(port);
	u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */

	/* Reset DSR status bits */
	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
		DSR_RX(phy_node(port)), card);

	if (stat & DSR_BOF)
		/* Dropped one or more frames */
		dev->stats.rx_over_errors++;

	while (1) {
		u32 desc_off = desc_offset(port, port->rxin, 0);
		pkt_desc __iomem *desc;
		u32 cda = sca_inw(dmac + CDAL, card);

		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
			break;	/* No frame received */

		desc = desc_address(port, port->rxin, 0);
		stat = readb(&desc->stat);
		if (!(stat & ST_RX_EOM))
			port->rxpart = 1; /* partial frame received */
		else if ((stat & ST_ERROR_MASK) || port->rxpart) {
			dev->stats.rx_errors++;
			if (stat & ST_RX_OVERRUN)
				dev->stats.rx_fifo_errors++;
			else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
					  ST_RX_RESBIT)) || port->rxpart)
				dev->stats.rx_frame_errors++;
			else if (stat & ST_RX_CRC)
				dev->stats.rx_crc_errors++;
			if (stat & ST_RX_EOM)
				port->rxpart = 0; /* received last fragment */
		} else
			sca_rx(card, port, desc, port->rxin);

		/* Set new error descriptor address */
		sca_outw(desc_off, dmac + EDAL, card);
		port->rxin = next_desc(port, port->rxin, 0);
	}

	/* make sure RX DMA is enabled */
	sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
Example #4
0
u16 virtio_queue_get_head_iovec(struct virtio_queue *vq,
				u16 head, struct virtio_iovec *iov,
				u32 *ret_iov_cnt, u32 *ret_total_len)
{
	struct vring_desc *desc;
	u16 idx, max;
	int i = 0;

	if (!vq->addr) {
		*ret_iov_cnt = 0;
		*ret_total_len = 0;
		return 0;
	}

	idx = head;

	*ret_iov_cnt = 0;
	*ret_total_len = 0;
	max = vq->vring.num;
	desc = vq->vring.desc;

	if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
#if 0
		max = desc[idx].len / sizeof(struct vring_desc);
		desc = guest_flat_to_host(kvm, desc[idx].addr);
		idx = 0;
#endif
	}

	do {
		iov[i].addr = desc[idx].addr;
		iov[i].len = desc[idx].len;

		*ret_total_len += desc[idx].len;

		if (desc[idx].flags & VRING_DESC_F_WRITE) {
			iov[i].flags = 1;  /* Write */
		} else {
			iov[i].flags = 0; /* Read */
		}

		i++;

	} while ((idx = next_desc(desc, idx, max)) != max);

	*ret_iov_cnt = i;

	return head;
}
Example #5
0
static unsigned wait_for_vq_desc(struct virtqueue *vq,
				 struct iovec iov[],
				 unsigned int *out_num, unsigned int *in_num)
{
	unsigned int i, head, max;
	struct vring_desc *desc;
	u16 last_avail = lg_last_avail(vq);

	
	while (last_avail == vq->vring.avail->idx) {
		u64 event;

		trigger_irq(vq);

		
		vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;

		/*
		 * They could have slipped one in as we were doing that: make
		 * sure it's written, then check again.
		 */
		mb();
		if (last_avail != vq->vring.avail->idx) {
			vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
			break;
		}

		
		if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
			errx(1, "Event read failed?");

		
		vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
	}

	
	if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
		errx(1, "Guest moved used index from %u to %u",
		     last_avail, vq->vring.avail->idx);

	head = vq->vring.avail->ring[last_avail % vq->vring.num];
	lg_last_avail(vq)++;

	
	if (head >= vq->vring.num)
		errx(1, "Guest says index %u is available", head);

	
	*out_num = *in_num = 0;

	max = vq->vring.num;
	desc = vq->vring.desc;
	i = head;

	if (desc[i].flags & VRING_DESC_F_INDIRECT) {
		if (desc[i].len % sizeof(struct vring_desc))
			errx(1, "Invalid size for indirect buffer table");

		max = desc[i].len / sizeof(struct vring_desc);
		desc = check_pointer(desc[i].addr, desc[i].len);
		i = 0;
	}

	do {
		
		iov[*out_num + *in_num].iov_len = desc[i].len;
		iov[*out_num + *in_num].iov_base
			= check_pointer(desc[i].addr, desc[i].len);
		
		if (desc[i].flags & VRING_DESC_F_WRITE)
			(*in_num)++;
		else {
			if (*in_num)
				errx(1, "Descriptor has out after in");
			(*out_num)++;
		}

		
		if (*out_num + *in_num > max)
			errx(1, "Looped descriptor");
	} while ((i = next_desc(desc, i, max)) != max);

	return head;
}
Example #6
0
/* This looks in the virtqueue and for the first available buffer, and converts
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
 * This function returns the descriptor number found, or vq->num (which
 * is never a valid descriptor number) if none was found. */
unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
			   struct iovec iov[], unsigned int iov_size,
			   unsigned int *out_num, unsigned int *in_num,
			   struct vhost_log *log, unsigned int *log_num)
{
	struct vring_desc desc;
	unsigned int i, head, found = 0;
	u16 last_avail_idx;
	int ret;

	/* Check it isn't doing very strange things with descriptor numbers. */
	last_avail_idx = vq->last_avail_idx;
	if (get_user(vq->avail_idx, &vq->avail->idx)) {
		vq_err(vq, "Failed to access avail idx at %p\n",
		       &vq->avail->idx);
		return vq->num;
	}

	if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
		vq_err(vq, "Guest moved used index from %u to %u",
		       last_avail_idx, vq->avail_idx);
		return vq->num;
	}

	/* If there's nothing new since last we looked, return invalid. */
	if (vq->avail_idx == last_avail_idx)
		return vq->num;

	/* Only get avail ring entries after they have been exposed by guest. */
	smp_rmb();

	/* Grab the next descriptor number they're advertising, and increment
	 * the index we've seen. */
	if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
		vq_err(vq, "Failed to read head: idx %d address %p\n",
		       last_avail_idx,
		       &vq->avail->ring[last_avail_idx % vq->num]);
		return vq->num;
	}

	/* If their number is silly, that's an error. */
	if (head >= vq->num) {
		vq_err(vq, "Guest says index %u > %u is available",
		       head, vq->num);
		return vq->num;
	}

	/* When we start there are none of either input nor output. */
	*out_num = *in_num = 0;
	if (unlikely(log))
		*log_num = 0;

	i = head;
	do {
		unsigned iov_count = *in_num + *out_num;
		if (i >= vq->num) {
			vq_err(vq, "Desc index is %u > %u, head = %u",
			       i, vq->num, head);
			return vq->num;
		}
		if (++found > vq->num) {
			vq_err(vq, "Loop detected: last one at %u "
			       "vq size %u head %u\n",
			       i, vq->num, head);
			return vq->num;
		}
		ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
		if (ret) {
			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
			       i, vq->desc + i);
			return vq->num;
		}
		if (desc.flags & VRING_DESC_F_INDIRECT) {
			ret = get_indirect(dev, vq, iov, iov_size,
					   out_num, in_num,
					   log, log_num, &desc);
			if (ret < 0) {
				vq_err(vq, "Failure detected "
				       "in indirect descriptor at idx %d\n", i);
				return vq->num;
			}
			continue;
		}

		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
				     iov_size - iov_count);
		if (ret < 0) {
			vq_err(vq, "Translation failure %d descriptor idx %d\n",
			       ret, i);
			return vq->num;
		}
		if (desc.flags & VRING_DESC_F_WRITE) {
			/* If this is an input descriptor,
			 * increment that count. */
			*in_num += ret;
			if (unlikely(log)) {
				log[*log_num].addr = desc.addr;
				log[*log_num].len = desc.len;
				++*log_num;
			}
		} else {
			/* If it's an output descriptor, they're all supposed
			 * to come before any input descriptors. */
			if (*in_num) {
				vq_err(vq, "Descriptor has out after in: "
				       "idx %d\n", i);
				return vq->num;
			}
			*out_num += ret;
		}
	} while ((i = next_desc(&desc)) != -1);

	/* On success, increment avail index. */
	vq->last_avail_idx++;
	return head;
}
Example #7
0
static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
			     struct iovec iov[], unsigned int iov_size,
			     unsigned int *out_num, unsigned int *in_num,
			     struct vhost_log *log, unsigned int *log_num,
			     struct vring_desc *indirect)
{
	struct vring_desc desc;
	unsigned int i = 0, count, found = 0;
	int ret;

	/* Sanity check */
	if (indirect->len % sizeof desc) {
		vq_err(vq, "Invalid length in indirect descriptor: "
		       "len 0x%llx not multiple of 0x%zx\n",
		       (unsigned long long)indirect->len,
		       sizeof desc);
		return -EINVAL;
	}

	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
			     ARRAY_SIZE(vq->indirect));
	if (ret < 0) {
		vq_err(vq, "Translation failure %d in indirect.\n", ret);
		return ret;
	}

	/* We will use the result as an address to read from, so most
	 * architectures only need a compiler barrier here. */
	read_barrier_depends();

	count = indirect->len / sizeof desc;
	/* Buffers are chained via a 16 bit next field, so
	 * we can have at most 2^16 of these. */
	if (count > USHORT_MAX + 1) {
		vq_err(vq, "Indirect buffer length too big: %d\n",
		       indirect->len);
		return -E2BIG;
	}

	do {
		unsigned iov_count = *in_num + *out_num;
		if (++found > count) {
			vq_err(vq, "Loop detected: last one at %u "
			       "indirect size %u\n",
			       i, count);
			return -EINVAL;
		}
		if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
				     sizeof desc)) {
			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
			       i, (size_t)indirect->addr + i * sizeof desc);
			return -EINVAL;
		}
		if (desc.flags & VRING_DESC_F_INDIRECT) {
			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
			       i, (size_t)indirect->addr + i * sizeof desc);
			return -EINVAL;
		}

		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
				     iov_size - iov_count);
		if (ret < 0) {
			vq_err(vq, "Translation failure %d indirect idx %d\n",
			       ret, i);
			return ret;
		}
		/* If this is an input descriptor, increment that count. */
		if (desc.flags & VRING_DESC_F_WRITE) {
			*in_num += ret;
			if (unlikely(log)) {
				log[*log_num].addr = desc.addr;
				log[*log_num].len = desc.len;
				++*log_num;
			}
		} else {
			/* If it's an output descriptor, they're all supposed
			 * to come before any input descriptors. */
			if (*in_num) {
				vq_err(vq, "Indirect descriptor "
				       "has out after in: idx %d\n", i);
				return -EINVAL;
			}
			*out_num += ret;
		}
	} while ((i = next_desc(&desc)) != -1);
	return 0;
}
Example #8
0
/*
 * This looks in the virtqueue for the first available buffer, and converts
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
 * This function waits if necessary, and returns the descriptor number found.
 */
static unsigned wait_for_vq_desc(struct virtqueue *vq,
				 struct iovec iov[],
				 unsigned int *out_num, unsigned int *in_num)
{
	unsigned int i, head, max;
	struct vring_desc *desc;
	u16 last_avail = lg_last_avail(vq);

	/* There's nothing available? */
	while (last_avail == vq->vring.avail->idx) {
		u64 event;

		/*
		 * Since we're about to sleep, now is a good time to tell the
		 * Guest about what we've used up to now.
		 */
		trigger_irq(vq);

		/* OK, now we need to know about added descriptors. */
		vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;

		/*
		 * They could have slipped one in as we were doing that: make
		 * sure it's written, then check again.
		 */
		mb();
		if (last_avail != vq->vring.avail->idx) {
			vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
			break;
		}

		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
			errx(1, "Event read failed?");

		/* We don't need to be notified again. */
		vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
	}

	/* Check it isn't doing very strange things with descriptor numbers. */
	if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
		errx(1, "Guest moved used index from %u to %u",
		     last_avail, vq->vring.avail->idx);

	/*
	 * Grab the next descriptor number they're advertising, and increment
	 * the index we've seen.
	 */
	head = vq->vring.avail->ring[last_avail % vq->vring.num];
	lg_last_avail(vq)++;

	/* If their number is silly, that's a fatal mistake. */
	if (head >= vq->vring.num)
		errx(1, "Guest says index %u is available", head);

	/* When we start there are none of either input nor output. */
	*out_num = *in_num = 0;

	max = vq->vring.num;
	desc = vq->vring.desc;
	i = head;

	/*
	 * If this is an indirect entry, then this buffer contains a descriptor
	 * table which we handle as if it's any normal descriptor chain.
	 */
	if (desc[i].flags & VRING_DESC_F_INDIRECT) {
		if (desc[i].len % sizeof(struct vring_desc))
			errx(1, "Invalid size for indirect buffer table");

		max = desc[i].len / sizeof(struct vring_desc);
		desc = check_pointer(desc[i].addr, desc[i].len);
		i = 0;
	}

	do {
		/* Grab the first descriptor, and check it's OK. */
		iov[*out_num + *in_num].iov_len = desc[i].len;
		iov[*out_num + *in_num].iov_base
			= check_pointer(desc[i].addr, desc[i].len);
		/* If this is an input descriptor, increment that count. */
		if (desc[i].flags & VRING_DESC_F_WRITE)
			(*in_num)++;
		else {
			/*
			 * If it's an output descriptor, they're all supposed
			 * to come before any input descriptors.
			 */
			if (*in_num)
				errx(1, "Descriptor has out after in");
			(*out_num)++;
		}

		/* If we've got too many, that implies a descriptor loop. */
		if (*out_num + *in_num > max)
			errx(1, "Looped descriptor");
	} while ((i = next_desc(desc, i, max)) != max);

	return head;
}