Пример #1
0
/* Get packet from user space buffer(already verified) */
static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
{
	struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) };
	struct sk_buff *skb;
	size_t len = count, align = 0;

	if (!(tun->flags & TUN_NO_PI)) {
		if ((len -= sizeof(pi)) > count)
			return -EINVAL;

 		if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
 			return -EFAULT;
	}

	if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV)
		align = NET_IP_ALIGN;
  
	if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
		tun->stats.rx_dropped++;
		return -ENOMEM;
	}

	if (align)
		skb_reserve(skb, align);
 	if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
		tun->stats.rx_dropped++;
		kfree_skb(skb);
 		return -EFAULT;
	}

	skb->dev = &tun->dev;
	switch (tun->flags & TUN_TYPE_MASK) {
	case TUN_TUN_DEV:
		skb->mac.raw = skb->data;
		skb->protocol = pi.proto;
		break;
	case TUN_TAP_DEV:
		skb->protocol = eth_type_trans(skb, &tun->dev);
		break;
	};

	if (tun->flags & TUN_NOCHECKSUM)
		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
	netif_rx_ni(skb);
   
	tun->stats.rx_packets++;
	tun->stats.rx_bytes += len;

	return count;
} 
Пример #2
0
/* virtual */ SysStatusUval
FileLinuxFile::writev(const struct iovec *vec, uval vecCount,
		      ThreadWait **tw, GenState &moreAvail)
{
    FLFDEBUG("writev");

    SysStatusUval rc;
    uval length_write;
    char* buf_write;
    uval length = vecLength(vec, vecCount);

    lock();
    moreAvail.state = FileLinux::READ_AVAIL|FileLinux::WRITE_AVAIL;

    rc = locked_writeAlloc(length, buf_write, tw);
    if (_FAILURE(rc)) {
	unLock();
	return rc;
    }
    length_write = _SGETUVAL(rc);
    if (length_write) {
	memcpy_fromiovec(buf_write, vec, vecCount, length_write);
	locked_writeFree(buf_write);
    }
    unLock();
    return rc;
}
Пример #3
0
static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len, int noblock, 
	int flags)
{
	if(msg->msg_iovlen==1)
		return raw_sendto(sk,msg->msg_iov[0].iov_base,len, noblock, flags, msg->msg_name, msg->msg_namelen);
	else
	{
		/*
		 *	For awkward cases we linearise the buffer first. In theory this is only frames
		 *	whose iovec's don't split on 4 byte boundaries, and soon encrypted stuff (to keep
		 *	skip happy). We are a bit more general about it.
		 */
		 
		unsigned char *buf;
		int fs;
		int err;
		if(len>65515)
			return -EMSGSIZE;
		buf=kmalloc(len, GFP_KERNEL);
		if(buf==NULL)
			return -ENOBUFS;
		memcpy_fromiovec(buf, msg->msg_iov, len);
		fs=get_fs();
		set_fs(get_ds());
		err=raw_sendto(sk,buf,len, noblock, flags, msg->msg_name, msg->msg_namelen);
		set_fs(fs);
		kfree_s(buf,len);
		return err;
	}
}
Пример #4
0
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
	struct sco_conn *conn = sco_pi(sk)->conn;
	struct sk_buff *skb;
	int err, count;

	/* Check outgoing MTU */
	if (len > conn->mtu)
		return -EINVAL;

	BT_DBG("sk %p len %d", sk, len);

	count = MIN(conn->mtu, len);
	if (!(skb = bluez_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err)))
		return err;

	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
		err = -EFAULT;
		goto fail;
	}

	if ((err = hci_send_sco(conn->hcon, skb)) < 0)
		goto fail;

	return count;

fail:
	kfree_skb(skb);
	return err;
}
Пример #5
0
int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		 size_t len)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	const int flags = msg->msg_flags;
	const int noblock = flags & MSG_DONTWAIT;
	struct sk_buff *skb;
	int rc, size;
	long timeo;

	if (len > dp->dccps_mss_cache)
		return -EMSGSIZE;

	lock_sock(sk);

	if (sysctl_dccp_tx_qlen &&
	    (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
		rc = -EAGAIN;
		goto out_release;
	}

	timeo = sock_sndtimeo(sk, noblock);

	/*
	 * We have to use sk_stream_wait_connect here to set sk_write_pending,
	 * so that the trick in dccp_rcv_request_sent_state_process.
	 */
	/* Wait for a connection to finish. */
	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
			goto out_release;

	size = sk->sk_prot->max_header + len;
	release_sock(sk);
	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
	lock_sock(sk);
	if (skb == NULL)
		goto out_release;

	skb_reserve(skb, sk->sk_prot->max_header);
	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (rc != 0)
		goto out_discard;

	skb_queue_tail(&sk->sk_write_queue, skb);
	/*
	 * The xmit_timer is set if the TX CCID is rate-based and will expire
	 * when congestion control permits to release further packets into the
	 * network. Window-based CCIDs do not use this timer.
	 */
	if (!timer_pending(&dp->dccps_xmit_timer))
		dccp_write_xmit(sk);
out_release:
	release_sock(sk);
	return rc ? : len;
out_discard:
	kfree_skb(skb);
	goto out_release;
}
Пример #6
0
static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *msg, size_t size)
{
	struct sock *sk = sock->sk;
	struct raw_sock *ro = raw_sk(sk);
	struct sk_buff *skb;
	struct net_device *dev;
	int ifindex;
	int err;

	if (msg->msg_name) {
		struct sockaddr_can *addr =
			(struct sockaddr_can *)msg->msg_name;

		if (addr->can_family != AF_CAN)
			return -EINVAL;

		ifindex = addr->can_ifindex;
	} else
		ifindex = ro->ifindex;

	if (size != sizeof(struct can_frame))
		return -EINVAL;

	dev = dev_get_by_index(&init_net, ifindex);
	if (!dev)
		return -ENXIO;

	skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
				  &err);
	if (!skb)
		goto put_dev;

	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
	if (err < 0)
		goto free_skb;
	err = sock_tx_timestamp(msg, sk, skb_tx(skb));
	if (err < 0)
		goto free_skb;
	skb->dev = dev;
	skb->sk  = sk;

	err = can_send(skb, ro->loopback);

	dev_put(dev);

	if (err)
		goto send_failed;

	return size;

free_skb:
	kfree_skb(skb);
put_dev:
	dev_put(dev);
send_failed:
	return err;
}
Пример #7
0
static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *msg, size_t size)
{
	struct sock *sk = sock->sk;
	struct raw_opt *ro = raw_sk(sk);
	struct sk_buff *skb;
	struct net_device *dev;
	int ifindex;
	int err;

	DBG("socket %p, sk %p\n", sock, sk);

	if (msg->msg_name) {
		struct sockaddr_can *addr =
			(struct sockaddr_can *)msg->msg_name;

		if (addr->can_family != AF_CAN)
			return -EINVAL;

		ifindex = addr->can_ifindex;
	} else
		ifindex = ro->ifindex;

	dev = dev_get_by_index(ifindex);
	if (!dev) {
		DBG("device %d not found\n", ifindex);
		return -ENXIO;
	}

	skb = alloc_skb(size, GFP_KERNEL);
	if (!skb) {
		dev_put(dev);
		return -ENOMEM;
	}

	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
	if (err < 0) {
		kfree_skb(skb);
		dev_put(dev);
		return err;
	}
	skb->dev = dev;
	skb->sk  = sk;

	DBG("sending skbuff to interface %d\n", ifindex);
	DBG_SKB(skb);

	err = can_send(skb, ro->loopback);

	dev_put(dev);

	if (err)
		return err;

	return size;
}
Пример #8
0
int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		 size_t len)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	const int flags = msg->msg_flags;
	const int noblock = flags & MSG_DONTWAIT;
	struct sk_buff *skb;
	int rc, size;
	long timeo;

	if (len > dp->dccps_mss_cache)
		return -EMSGSIZE;

	lock_sock(sk);
	timeo = sock_sndtimeo(sk, noblock);

	/*
	 * We have to use sk_stream_wait_connect here to set sk_write_pending,
	 * so that the trick in dccp_rcv_request_sent_state_process.
	 */
	/* Wait for a connection to finish. */
	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
			goto out_release;

	size = sk->sk_prot->max_header + len;
	release_sock(sk);
	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
	lock_sock(sk);
	if (skb == NULL)
		goto out_release;

	skb_reserve(skb, sk->sk_prot->max_header);
	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (rc != 0)
		goto out_discard;

	rc = dccp_write_xmit(sk, skb, &timeo);
	/*
	 * XXX we don't use sk_write_queue, so just discard the packet.
	 *     Current plan however is to _use_ sk_write_queue with
	 *     an algorith similar to tcp_sendmsg, where the main difference
	 *     is that in DCCP we have to respect packet boundaries, so
	 *     no coalescing of skbs.
	 *
	 *     This bug was _quickly_ found & fixed by just looking at an OSTRA
	 *     generated callgraph 8) -acme
	 */
out_release:
	release_sock(sk);
	return rc ? : len;
out_discard:
	kfree_skb(skb);
	goto out_release;
}
Пример #9
0
int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		 size_t len)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	const int flags = msg->msg_flags;
	const int noblock = flags & MSG_DONTWAIT;
	struct sk_buff *skb;
	int rc, size;
	long timeo;

	if (len > dp->dccps_mss_cache)
		return -EMSGSIZE;

	lock_sock(sk);

	if (sysctl_dccp_tx_qlen &&
	    (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
		rc = -EAGAIN;
		goto out_release;
	}

	timeo = sock_sndtimeo(sk, noblock);

	
	
	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
			goto out_release;

	size = sk->sk_prot->max_header + len;
	release_sock(sk);
	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
	lock_sock(sk);
	if (skb == NULL)
		goto out_release;

	skb_reserve(skb, sk->sk_prot->max_header);
	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (rc != 0)
		goto out_discard;

	skb_queue_tail(&sk->sk_write_queue, skb);
	dccp_write_xmit(sk,0);
out_release:
	release_sock(sk);
	return rc ? : len;
out_discard:
	kfree_skb(skb);
	goto out_release;
}
Пример #10
0
int
__VMCIMemcpyToQueue(VMCIQueue *queue,   // OUT:
                    uint64 queueOffset, // IN:
                    const void *src,    // IN:
                    size_t size,        // IN:
                    Bool isIovec)       // IN: if src is a struct iovec *
{
    size_t bytesCopied = 0;

    while (bytesCopied < size) {
        uint64 pageIndex = (queueOffset + bytesCopied) / PAGE_SIZE;
        size_t pageOffset = (queueOffset + bytesCopied) & (PAGE_SIZE - 1);
        void *va = kmap(queue->page[pageIndex]);
        size_t toCopy;

        ASSERT(va);
        if (size - bytesCopied > PAGE_SIZE - pageOffset) {
            /* Enough payload to fill up from this page. */
            toCopy = PAGE_SIZE - pageOffset;
        } else {
            toCopy = size - bytesCopied;
        }

        if (isIovec) {
            struct iovec *iov = (struct iovec *)src;
            int err;

            /* The iovec will track bytesCopied internally. */
            err = memcpy_fromiovec((uint8 *)va + pageOffset, iov, toCopy);
            if (err != 0) {
                kunmap(queue->page[pageIndex]);
                return err;
            }
        } else {
            memcpy((uint8 *)va + pageOffset, (uint8 *)src + bytesCopied, toCopy);
        }

        bytesCopied += toCopy;
        kunmap(queue->page[pageIndex]);
    }

    return 0;
}
Пример #11
0
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
	unsigned char *buf;
	struct mgmt_hdr *hdr;
	u16 opcode, len;
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

	buf = kmalloc(msglen, GFP_ATOMIC);
	if (!buf)
		return -ENOMEM;

	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
		err = -EFAULT;
		goto done;
	}

	hdr = (struct mgmt_hdr *) buf;
	opcode = get_unaligned_le16(&hdr->opcode);
	len = get_unaligned_le16(&hdr->len);

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

	switch (opcode) {
	default:
		BT_DBG("Unknown op %u", opcode);
		cmd_status(sk, opcode, 0x01);
		break;
	}

	err = msglen;

done:
	kfree(buf);
	return err;
}
Пример #12
0
static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
                      struct msghdr *msg, size_t len)
{
    struct sockaddr_pn *target;
    struct sk_buff *skb;
    int err;

    if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
                           MSG_CMSG_COMPAT))
        return -EOPNOTSUPP;

    if (msg->msg_name == NULL)
        return -EDESTADDRREQ;

    if (msg->msg_namelen < sizeof(struct sockaddr_pn))
        return -EINVAL;

    target = (struct sockaddr_pn *)msg->msg_name;
    if (target->spn_family != AF_PHONET)
        return -EAFNOSUPPORT;

    skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
                              msg->msg_flags & MSG_DONTWAIT, &err);
    if (skb == NULL)
        return err;
    skb_reserve(skb, MAX_PHONET_HEADER);

    err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
    if (err < 0) {
        kfree_skb(skb);
        return err;
    }

    /*
     * Fill in the Phonet header and
     * finally pass the packet forwards.
     */
    err = pn_skb_send(sk, skb, target);

    /* If ok, return len. */
    return (err >= 0) ? len : err;
}
Пример #13
0
/* Get message/packet data from user-land */
static int spx_sendmsg(struct socket *sock, struct msghdr *msg, int len,
			struct scm_cookie *scm)
{
	struct sock *sk = sock->sk;
	int flags = msg->msg_flags;
	struct sk_buff *skb;
	int err, offset, size;

	if(len > 534)
                return (-EMSGSIZE);
        if(sk->zapped)
                return (-ENOTCONN); /* Socket not bound */
	if(flags&~MSG_DONTWAIT)
                return (-EINVAL);

	offset	= ipx_if_offset(sk->tp_pinfo.af_spx.dest_addr.net);
        size 	= offset + sizeof(struct ipxspxhdr) + len;

	cli();
        skb  	= sock_alloc_send_skb(sk, size, 0, flags&MSG_DONTWAIT, &err);
        if(skb == NULL)
                return (err);
	sti();

	skb->sk = sk;
        skb_reserve(skb, offset);
	skb->h.raw = skb->nh.raw = skb_put(skb, sizeof(struct ipxspxhdr));

	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if(err)
        {
                kfree_skb(skb);
                return (-EFAULT);
        }

	err = spx_transmit(sk, skb, DATA, len);
	if(err)
		return (-EAGAIN);

        return (len);
}
Пример #14
0
static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
					struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct nfc_dev *dev = nfc_rawsock(sk)->dev;
	struct sk_buff *skb;
	int rc;

	nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len);

	if (msg->msg_namelen)
		return -EOPNOTSUPP;

	if (sock->state != SS_CONNECTED)
		return -ENOTCONN;

	skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
					msg->msg_flags & MSG_DONTWAIT, &rc);
	if (!skb)
		return rc;

	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);

	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (rc < 0) {
		kfree_skb(skb);
		return rc;
	}

	spin_lock_bh(&sk->sk_write_queue.lock);
	__skb_queue_tail(&sk->sk_write_queue, skb);
	if (!nfc_rawsock(sk)->tx_work_scheduled) {
		schedule_work(&nfc_rawsock(sk)->tx_work);
		nfc_rawsock(sk)->tx_work_scheduled = true;
	}
	spin_unlock_bh(&sk->sk_write_queue.lock);

	return len;
}
Пример #15
0
static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
			struct msghdr *msg, size_t len)
{
	struct sockaddr_pn *target;
	struct sk_buff *skb;
	int err;

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (msg->msg_name == NULL)
		return -EDESTADDRREQ;

	if (msg->msg_namelen < sizeof(struct sockaddr_pn))
		return -EINVAL;

	target = (struct sockaddr_pn *)msg->msg_name;
	if (target->spn_family != AF_PHONET)
		return -EAFNOSUPPORT;

	skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
					msg->msg_flags & MSG_DONTWAIT, &err);
	if (skb == NULL)
		return err;
	skb_reserve(skb, MAX_PHONET_HEADER);

	err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
	if (err < 0) {
		kfree_skb(skb);
		return err;
	}

	
	err = pn_skb_send(sk, skb, target);

	
	return (err >= 0) ? len : err;
}
Пример #16
0
static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
			   struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct nfc_dev *dev = nfc_rawsock(sk)->dev;
	struct sk_buff *skb;
	int rc;

	pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);

	if (msg->msg_namelen)
		return -EOPNOTSUPP;

	if (sock->state != SS_CONNECTED)
		return -ENOTCONN;

	skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
	if (skb == NULL)
		return rc;

	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (rc < 0) {
		kfree_skb(skb);
		return rc;
	}

	spin_lock_bh(&sk->sk_write_queue.lock);
	__skb_queue_tail(&sk->sk_write_queue, skb);
	if (!nfc_rawsock(sk)->tx_work_scheduled) {
		schedule_work(&nfc_rawsock(sk)->tx_work);
		nfc_rawsock(sk)->tx_work_scheduled = true;
	}
	spin_unlock_bh(&sk->sk_write_queue.lock);

	return len;
}
Пример #17
0
/* virtual */ SysStatusUval
StreamServerConsole::sendto(struct iovec* vec, uval veclen, uval flags,
			    const char *addr, uval addrLen, 
			    GenState &moreAvail, 
			    void *controlData, uval controlDataLen,
			    __XHANDLE xhandle)
{
    uval len = vecLength(vec, veclen);
    char* buf = (char*)allocLocalStrict(len);

    tassertMsg((controlDataLen == 0), "oops\n");
    memcpy_fromiovec(buf, vec, veclen, len);

    SysConsole->write(buf, len);

    freeLocalStrict(buf, len);
    calcAvailable(moreAvail);

    if (XHANDLE_IDX(xhandle) != CObjGlobalsKern::ConsoleIndex) {
	clnt(xhandle)->setAvail(moreAvail);
    }

    return len;
}
Пример #18
0
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
                            struct scm_cookie *scm)
{
	struct sock *sk = sock->sk;
	struct hci_dev *hdev = hci_pi(sk)->hdev;
	struct sk_buff *skb;
	int err;

	DBG("sock %p sk %p", sock, sk);

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
		return -EINVAL;

	if (!hdev)
		return -EBADFD;

	if (!(skb = bluez_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
		return err;

	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
		kfree_skb(skb);
		return -EFAULT;
	}

	skb->dev = (void *) hdev;
	skb->pkt_type = *((unsigned char *) skb->data);
	skb_pull(skb, 1);

	/* Send frame to HCI core */
	hci_send_raw(skb);

	return len;
}
Пример #19
0
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
                            struct scm_cookie *scm)
{
	struct sock *sk = sock->sk;
	struct hci_dev *hdev;
	struct sk_buff *skb;
	int err;

	BT_DBG("sock %p sk %p", sock, sk);

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
		return -EINVAL;

	if (len < 4)
		return -EINVAL;
	
	lock_sock(sk);

	if (!(hdev = hci_pi(sk)->hdev)) {
		err = -EBADFD;
		goto done;
	}

	if (!(skb = bluez_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
		goto done;

	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
		err = -EFAULT;
		goto drop;
	}

	skb->pkt_type = *((unsigned char *) skb->data);
	skb_pull(skb, 1);
	skb->dev = (void *) hdev;

	if (skb->pkt_type == HCI_COMMAND_PKT) {
		u16 opcode = __le16_to_cpu(get_unaligned((u16 *)skb->data));
		u16 ogf = cmd_opcode_ogf(opcode);
		u16 ocf = cmd_opcode_ocf(opcode);

		if (((ogf > HCI_SFLT_MAX_OGF) || 
				!hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
		    			!capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto drop;
		}

		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) {
			skb_queue_tail(&hdev->raw_q, skb);
			hci_sched_tx(hdev);
		} else {
			skb_queue_tail(&hdev->cmd_q, skb);
			hci_sched_cmd(hdev);
		}
	} else {
		if (!capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto drop;
		}

		skb_queue_tail(&hdev->raw_q, skb);
		hci_sched_tx(hdev);
	}

	err = len;

done:
	release_sock(sk);
	return err;

drop:
	kfree_skb(skb);
	goto done;
}
Пример #20
0
/*
 * Route an outgoing frame from a socket.
 */
int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
			struct iovec *iov, size_t len, int noblock)
{
	struct sk_buff *skb;
	struct ipx_sock *ipxs = ipx_sk(sk);
	struct ipx_interface *intrfc;
	struct ipxhdr *ipx;
	size_t size;
	int ipx_offset;
	struct ipx_route *rt = NULL;
	int rc;

	/* Find the appropriate interface on which to send packet */
	if (!usipx->sipx_network && ipx_primary_net) {
		usipx->sipx_network = ipx_primary_net->if_netnum;
		intrfc = ipx_primary_net;
	} else {
		rt = ipxrtr_lookup(usipx->sipx_network);
		rc = -ENETUNREACH;
		if (!rt)
			goto out;
		intrfc = rt->ir_intrfc;
	}

	ipxitf_hold(intrfc);
	ipx_offset = intrfc->if_ipx_offset;
	size = sizeof(struct ipxhdr) + len + ipx_offset;

	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
	if (!skb)
		goto out_put;

	skb_reserve(skb, ipx_offset);
	skb->sk = sk;

	/* Fill in IPX header */
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
	skb_put(skb, sizeof(struct ipxhdr));
	ipx = ipx_hdr(skb);
	ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr));
	IPX_SKB_CB(skb)->ipx_tctrl = 0;
	ipx->ipx_type 	 = usipx->sipx_type;

	IPX_SKB_CB(skb)->last_hop.index = -1;
#ifdef CONFIG_IPX_INTERN
	IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum;
	memcpy(ipx->ipx_source.node, ipxs->node, IPX_NODE_LEN);
#else
	rc = ntohs(ipxs->port);
	if (rc == 0x453 || rc == 0x452) {
		/* RIP/SAP special handling for mars_nwe */
		IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum;
		memcpy(ipx->ipx_source.node, intrfc->if_node, IPX_NODE_LEN);
	} else {
		IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum;
		memcpy(ipx->ipx_source.node, ipxs->intrfc->if_node,
			IPX_NODE_LEN);
	}
#endif	/* CONFIG_IPX_INTERN */
	ipx->ipx_source.sock		= ipxs->port;
	IPX_SKB_CB(skb)->ipx_dest_net	= usipx->sipx_network;
	memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN);
	ipx->ipx_dest.sock		= usipx->sipx_port;

	rc = memcpy_fromiovec(skb_put(skb, len), iov, len);
	if (rc) {
		kfree_skb(skb);
		goto out_put;
	}

	/* Apply checksum. Not allowed on 802.3 links. */
	if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023))
		ipx->ipx_checksum = htons(0xFFFF);
	else
		ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));

	rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ?
			 rt->ir_router_node : ipx->ipx_dest.node);
out_put:
	ipxitf_put(intrfc);
	if (rt)
		ipxrtr_put(rt);
out:
	return rc;
}
Пример #21
0
static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
			struct msghdr *msg, size_t len)
{
	struct pep_sock *pn = pep_sk(sk);
	struct sk_buff *skb;
	long timeo;
	int flags = msg->msg_flags;
	int err, done;

	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
				MSG_CMSG_COMPAT)) ||
			!(msg->msg_flags & MSG_EOR))
		return -EOPNOTSUPP;

	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
					flags & MSG_DONTWAIT, &err);
	if (!skb)
		return -ENOBUFS;

	skb_reserve(skb, MAX_PHONET_HEADER + 3);
	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (err < 0)
		goto outfree;

	lock_sock(sk);
	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
		err = -ENOTCONN;
		goto out;
	}
	if (sk->sk_state != TCP_ESTABLISHED) {
		/* Wait until the pipe gets to enabled state */
disabled:
		err = sk_stream_wait_connect(sk, &timeo);
		if (err)
			goto out;

		if (sk->sk_state == TCP_CLOSE_WAIT) {
			err = -ECONNRESET;
			goto out;
		}
	}
	BUG_ON(sk->sk_state != TCP_ESTABLISHED);

	/* Wait until flow control allows TX */
	done = atomic_read(&pn->tx_credits);
	while (!done) {
		DEFINE_WAIT(wait);

		if (!timeo) {
			err = -EAGAIN;
			goto out;
		}
		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			goto out;
		}

		prepare_to_wait(sk_sleep(sk), &wait,
				TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
		finish_wait(sk_sleep(sk), &wait);

		if (sk->sk_state != TCP_ESTABLISHED)
			goto disabled;
	}

	err = pipe_skb_send(sk, skb);
	if (err >= 0)
		err = len; /* success! */
	skb = NULL;
out:
	release_sock(sk);
outfree:
	kfree_skb(skb);
	return err;
}
Пример #22
0
static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
			       struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
	struct sk_buff *skb;
	struct net_device *dev;
	unsigned short proto=0;
	int err;
	
	/*
	 *	Get and verify the address. 
	 */

	if (saddr)
	{
		if (msg->msg_namelen < sizeof(struct sockaddr))
			return(-EINVAL);
		if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
			proto=saddr->spkt_protocol;
	}
	else
		return(-ENOTCONN);	/* SOCK_PACKET must be sent giving an address */

	/*
	 *	Find the device first to size check it 
	 */

	saddr->spkt_device[13] = 0;
	dev = dev_get_by_name(saddr->spkt_device);
	err = -ENODEV;
	if (dev == NULL)
		goto out_unlock;
	
	/*
	 *	You may not queue a frame bigger than the mtu. This is the lowest level
	 *	raw protocol and you must do your own fragmentation at this level.
	 */
	 
	err = -EMSGSIZE;
 	if(len>dev->mtu+dev->hard_header_len)
		goto out_unlock;

	err = -ENOBUFS;
	skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);

	/*
	 *	If the write buffer is full, then tough. At this level the user gets to
	 *	deal with the problem - do your own algorithmic backoffs. That's far
	 *	more flexible.
	 */
	 
	if (skb == NULL) 
		goto out_unlock;

	/*
	 *	Fill it in 
	 */
	 
	/* FIXME: Save some space for broken drivers that write a
	 * hard header at transmission time by themselves. PPP is the
	 * notable one here. This should really be fixed at the driver level.
	 */
	skb_reserve(skb, LL_RESERVED_SPACE(dev));
	skb->nh.raw = skb->data;

	/* Try to align data part correctly */
	if (dev->hard_header) {
		skb->data -= dev->hard_header_len;
		skb->tail -= dev->hard_header_len;
		if (len < dev->hard_header_len)
			skb->nh.raw = skb->data;
	}

	/* Returns -EFAULT on error */
	err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
	skb->protocol = proto;
	skb->dev = dev;
	skb->priority = sk->sk_priority;
	if (err)
		goto out_free;

	err = -ENETDOWN;
	if (!(dev->flags & IFF_UP))
		goto out_free;

	/*
	 *	Now send it
	 */

	dev_queue_xmit(skb);
	dev_put(dev);
	return(len);

out_free:
	kfree_skb(skb);
out_unlock:
	if (dev)
		dev_put(dev);
	return err;
}
Пример #23
0
static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
			     struct iovec iov[], unsigned int iov_size,
			     unsigned int *out_num, unsigned int *in_num,
			     struct vhost_log *log, unsigned int *log_num,
			     struct vring_desc *indirect)
{
	struct vring_desc desc;
	unsigned int i = 0, count, found = 0;
	int ret;

	/* Sanity check */
	if (indirect->len % sizeof desc) {
		vq_err(vq, "Invalid length in indirect descriptor: "
		       "len 0x%llx not multiple of 0x%zx\n",
		       (unsigned long long)indirect->len,
		       sizeof desc);
		return -EINVAL;
	}

	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
			     ARRAY_SIZE(vq->indirect));
	if (ret < 0) {
		vq_err(vq, "Translation failure %d in indirect.\n", ret);
		return ret;
	}

	/* We will use the result as an address to read from, so most
	 * architectures only need a compiler barrier here. */
	read_barrier_depends();

	count = indirect->len / sizeof desc;
	/* Buffers are chained via a 16 bit next field, so
	 * we can have at most 2^16 of these. */
	if (count > USHORT_MAX + 1) {
		vq_err(vq, "Indirect buffer length too big: %d\n",
		       indirect->len);
		return -E2BIG;
	}

	do {
		unsigned iov_count = *in_num + *out_num;
		if (++found > count) {
			vq_err(vq, "Loop detected: last one at %u "
			       "indirect size %u\n",
			       i, count);
			return -EINVAL;
		}
		if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
				     sizeof desc)) {
			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
			       i, (size_t)indirect->addr + i * sizeof desc);
			return -EINVAL;
		}
		if (desc.flags & VRING_DESC_F_INDIRECT) {
			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
			       i, (size_t)indirect->addr + i * sizeof desc);
			return -EINVAL;
		}

		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
				     iov_size - iov_count);
		if (ret < 0) {
			vq_err(vq, "Translation failure %d indirect idx %d\n",
			       ret, i);
			return ret;
		}
		/* If this is an input descriptor, increment that count. */
		if (desc.flags & VRING_DESC_F_WRITE) {
			*in_num += ret;
			if (unlikely(log)) {
				log[*log_num].addr = desc.addr;
				log[*log_num].len = desc.len;
				++*log_num;
			}
		} else {
			/* If it's an output descriptor, they're all supposed
			 * to come before any input descriptors. */
			if (*in_num) {
				vq_err(vq, "Indirect descriptor "
				       "has out after in: idx %d\n", i);
				return -EINVAL;
			}
			*out_num += ret;
		}
	} while ((i = next_desc(&desc)) != -1);
	return 0;
}
static int CVE_2010_3848_linux2_6_23_econet_sendmsg(struct kiocb *iocb, struct socket *sock,
			  struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
	struct net_device *dev;
	struct ec_addr addr;
	int err;
	unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
	struct sk_buff *skb;
	struct ec_cb *eb;
#endif
#ifdef CONFIG_ECONET_AUNUDP
	struct msghdr udpmsg;
	struct iovec iov[msg->msg_iovlen+1];
	struct aunhdr ah;
	struct sockaddr_in udpdest;
	__kernel_size_t size;
	int i;
	mm_segment_t oldfs;
#endif

	/*
	 *	Check the flags.
	 */

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
		return -EINVAL;

	/*
	 *	Get and verify the address.
	 */

	mutex_lock(&econet_mutex);

	if (saddr == NULL) {
		struct econet_sock *eo = ec_sk(sk);

		addr.station = eo->station;
		addr.net     = eo->net;
		port	     = eo->port;
		cb	     = eo->cb;
	} else {
		if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
			mutex_unlock(&econet_mutex);
			return -EINVAL;
		}
		addr.station = saddr->addr.station;
		addr.net = saddr->addr.net;
		port = saddr->port;
		cb = saddr->cb;
	}

	/* Look for a device with the right network number. */
	dev = net2dev_map[addr.net];

	/* If not directly reachable, use some default */
	if (dev == NULL) {
		dev = net2dev_map[0];
		/* No interfaces at all? */
		if (dev == NULL) {
			mutex_unlock(&econet_mutex);
			return -ENETDOWN;
		}
	}

	if (len + 15 > dev->mtu) {
		mutex_unlock(&econet_mutex);
		return -EMSGSIZE;
	}

	if (dev->type == ARPHRD_ECONET) {
		/* Real hardware Econet.  We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
		unsigned short proto = 0;

		dev_hold(dev);

		skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev),
					  msg->msg_flags & MSG_DONTWAIT, &err);
		if (skb==NULL)
			goto out_unlock;

		skb_reserve(skb, LL_RESERVED_SPACE(dev));
		skb_reset_network_header(skb);

		eb = (struct ec_cb *)&skb->cb;

		/* BUG: saddr may be NULL */
		eb->cookie = saddr->cookie;
		eb->sec = *saddr;
		eb->sent = ec_tx_done;

		if (dev->hard_header) {
			int res;
			struct ec_framehdr *fh;
			err = -EINVAL;
			res = dev->hard_header(skb, dev, ntohs(proto),
					       &addr, NULL, len);
			/* Poke in our control byte and
			   port number.  Hack, hack.  */
			fh = (struct ec_framehdr *)(skb->data);
			fh->cb = cb;
			fh->port = port;
			if (sock->type != SOCK_DGRAM) {
				skb_reset_tail_pointer(skb);
				skb->len = 0;
			} else if (res < 0)
				goto out_free;
		}

		/* Copy the data. Returns -EFAULT on error */
		err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
		skb->protocol = proto;
		skb->dev = dev;
		skb->priority = sk->sk_priority;
		if (err)
			goto out_free;

		err = -ENETDOWN;
		if (!(dev->flags & IFF_UP))
			goto out_free;

		/*
		 *	Now send it
		 */

		dev_queue_xmit(skb);
		dev_put(dev);
		mutex_unlock(&econet_mutex);
		return(len);

	out_free:
		kfree_skb(skb);
	out_unlock:
		if (dev)
			dev_put(dev);
#else
		err = -EPROTOTYPE;
#endif
		mutex_unlock(&econet_mutex);

		return err;
	}

#ifdef CONFIG_ECONET_AUNUDP
	/* AUN virtual Econet. */

	if (udpsock == NULL) {
		mutex_unlock(&econet_mutex);
		return -ENETDOWN;		/* No socket - can't send */
	}

	/* Make up a UDP datagram and hand it off to some higher intellect. */

	memset(&udpdest, 0, sizeof(udpdest));
	udpdest.sin_family = AF_INET;
	udpdest.sin_port = htons(AUN_PORT);

	/* At the moment we use the stupid Acorn scheme of Econet address
	   y.x maps to IP a.b.c.x.  This should be replaced with something
	   more flexible and more aware of subnet masks.  */
	{
		struct in_device *idev;
		unsigned long network = 0;

		rcu_read_lock();
		idev = __in_dev_get_rcu(dev);
		if (idev) {
			if (idev->ifa_list)
				network = ntohl(idev->ifa_list->ifa_address) &
					0xffffff00;		/* !!! */
		}
		rcu_read_unlock();
		udpdest.sin_addr.s_addr = htonl(network | addr.station);
	}

	ah.port = port;
	ah.cb = cb & 0x7f;
	ah.code = 2;		/* magic */
	ah.pad = 0;

	/* tack our header on the front of the iovec */
	size = sizeof(struct aunhdr);
	/*
	 * XXX: that is b0rken.  We can't mix userland and kernel pointers
	 * in iovec, since on a lot of platforms copy_from_user() will
	 * *not* work with the kernel and userland ones at the same time,
	 * regardless of what we do with set_fs().  And we are talking about
	 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
	 * apply.  Any suggestions on fixing that code?		-- AV
	 */
	iov[0].iov_base = (void *)&ah;
	iov[0].iov_len = size;
	for (i = 0; i < msg->msg_iovlen; i++) {
		void __user *base = msg->msg_iov[i].iov_base;
		size_t len = msg->msg_iov[i].iov_len;
		/* Check it now since we switch to KERNEL_DS later. */
		if (!access_ok(VERIFY_READ, base, len)) {
			mutex_unlock(&econet_mutex);
			return -EFAULT;
		}
		iov[i+1].iov_base = base;
		iov[i+1].iov_len = len;
		size += len;
	}

	/* Get a skbuff (no data, just holds our cb information) */
	if ((skb = sock_alloc_send_skb(sk, 0,
				       msg->msg_flags & MSG_DONTWAIT,
				       &err)) == NULL) {
		mutex_unlock(&econet_mutex);
		return err;
	}

	eb = (struct ec_cb *)&skb->cb;

	eb->cookie = saddr->cookie;
	eb->timeout = (5*HZ);
	eb->start = jiffies;
	ah.handle = aun_seq;
	eb->seq = (aun_seq++);
	eb->sec = *saddr;

	skb_queue_tail(&aun_queue, skb);

	udpmsg.msg_name = (void *)&udpdest;
	udpmsg.msg_namelen = sizeof(udpdest);
	udpmsg.msg_iov = &iov[0];
	udpmsg.msg_iovlen = msg->msg_iovlen + 1;
	udpmsg.msg_control = NULL;
	udpmsg.msg_controllen = 0;
	udpmsg.msg_flags=0;

	oldfs = get_fs(); set_fs(KERNEL_DS);	/* More privs :-) */
	err = sock_sendmsg(udpsock, &udpmsg, size);
	set_fs(oldfs);
#else
	err = -EPROTOTYPE;
#endif
	mutex_unlock(&econet_mutex);

	return err;
}
Пример #25
0
static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
			  struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
	struct sk_buff *skb;
	struct net_device *dev;
	unsigned short proto;
	unsigned char *addr;
	int ifindex, err, reserve = 0;

	/*
	 *	Get and verify the address. 
	 */
	 
	if (saddr == NULL) {
		struct packet_sock *po = pkt_sk(sk);

		ifindex	= po->ifindex;
		proto	= po->num;
		addr	= NULL;
	} else {
		err = -EINVAL;
		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
			goto out;
		ifindex	= saddr->sll_ifindex;
		proto	= saddr->sll_protocol;
		addr	= saddr->sll_addr;
	}


	dev = dev_get_by_index(ifindex);
	err = -ENXIO;
	if (dev == NULL)
		goto out_unlock;
	if (sock->type == SOCK_RAW)
		reserve = dev->hard_header_len;

	err = -EMSGSIZE;
	if (len > dev->mtu+reserve)
		goto out_unlock;

	skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
				msg->msg_flags & MSG_DONTWAIT, &err);
	if (skb==NULL)
		goto out_unlock;

	skb_reserve(skb, LL_RESERVED_SPACE(dev));
	skb->nh.raw = skb->data;

	if (dev->hard_header) {
		int res;
		err = -EINVAL;
		res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
		if (sock->type != SOCK_DGRAM) {
			skb->tail = skb->data;
			skb->len = 0;
		} else if (res < 0)
			goto out_free;
	}

	/* Returns -EFAULT on error */
	err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
	if (err)
		goto out_free;

	skb->protocol = proto;
	skb->dev = dev;
	skb->priority = sk->sk_priority;

	err = -ENETDOWN;
	if (!(dev->flags & IFF_UP))
		goto out_free;

	/*
	 *	Now send it
	 */

	err = dev_queue_xmit(skb);
	if (err > 0 && (err = net_xmit_errno(err)) != 0)
		goto out_unlock;

	dev_put(dev);

	return(len);

out_free:
	kfree_skb(skb);
out_unlock:
	if (dev)
		dev_put(dev);
out:
	return err;
}
Пример #26
0
static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *msg, size_t size)
{
	struct sock *sk = sock->sk;
	struct raw_sock *ro = raw_sk(sk);
	struct sk_buff *skb;
	struct net_device *dev;
	int ifindex;
	int err;

	if (msg->msg_name) {
		struct sockaddr_can *addr =
			(struct sockaddr_can *)msg->msg_name;

		if (msg->msg_namelen < sizeof(*addr))
			return -EINVAL;

		if (addr->can_family != AF_CAN)
			return -EINVAL;

		ifindex = addr->can_ifindex;
	} else
		ifindex = ro->ifindex;

	if (size != sizeof(struct can_frame))
		return -EINVAL;

	dev = dev_get_by_index(&init_net, ifindex);
	if (!dev)
		return -ENXIO;

	skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
				  &err);
	if (!skb)
		goto put_dev;

	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
	if (err < 0)
		goto free_skb;
	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
	if (err < 0)
		goto free_skb;

	/* to be able to check the received tx sock reference in raw_rcv() */
	skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;

	skb->dev = dev;
	skb->sk  = sk;

	err = can_send(skb, ro->loopback);

	dev_put(dev);

	if (err)
		goto send_failed;

	return size;

free_skb:
	kfree_skb(skb);
put_dev:
	dev_put(dev);
send_failed:
	return err;
}
Пример #27
0
static int
MksckDgramSendMsg(struct kiocb *kiocb,
		  struct socket *sock,
		  struct msghdr *msg,
		  size_t len)
{
	int             err = 0;
	struct sock    *sk = sock->sk;
	Mksck          *peerMksck;
	Mksck_Datagram *dg;
	uint32          needed;
	uint32          write;
	Mksck_Address   fromAddr;

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (len > MKSCK_XFER_MAX)
		return -EMSGSIZE;

	lock_sock(sk);
	do {
		Mksck *mksck;
		Mksck_Address peerAddr = {
		    .addr =
			(msg->msg_name ?
			 ((struct sockaddr_mk *)msg->msg_name)->mk_addr.addr :
			 MKSCK_ADDR_UNDEF)
		};

		err = MksckTryBind(sk);
		if (err)
			break;

		mksck = sk->sk_protinfo;
		fromAddr = mksck->addr;

		peerMksck = mksck->peer;
		if (peerMksck) {
			if (peerAddr.addr != MKSCK_ADDR_UNDEF &&
			    peerAddr.addr != mksck->peerAddr.addr) {
				err = -EISCONN;
				break;
			}

			ATOMIC_ADDV(peerMksck->refCount, 1);
		} else if (peerAddr.addr == MKSCK_ADDR_UNDEF) {
			err = -ENOTCONN;
		} else {
			err = LockPeer(peerAddr, &peerMksck);
		}
	} while (0);
	release_sock(sk);

	if (err)
		return err;

	needed = MKSCK_DGSIZE(len);
	while (1) {
		err = Mutex_Lock(&peerMksck->mutex, MutexModeEX);
		if (err < 0)
			goto decRefc;

		if (peerMksck->shutDown & MKSCK_SHUT_RD) {
			err = -ENOTCONN;
			goto unlockDecRefc;
		}

		write = Mksck_FindSendRoom(peerMksck, needed);
		if (write != MKSCK_FINDSENDROOM_FULL)
			break;

		if (msg->msg_flags & MSG_DONTWAIT) {
			err = -EAGAIN;
			goto unlockDecRefc;
		}

		peerMksck->foundFull++;
		err = Mutex_UnlSleep(&peerMksck->mutex, MutexModeEX,
				     MKSCK_CVAR_ROOM);
		if (err < 0) {
			PRINTK("MksckDgramSendMsg: aborted\n");
			goto decRefc;
		}
	}

	dg = (void *)&peerMksck->buff[write];

	dg->fromAddr = fromAddr;
	dg->len      = len;

	err = memcpy_fromiovec(dg->data, msg->msg_iov, len);
	if (err != 0)
		goto unlockDecRefc;

	Mksck_IncWriteIndex(peerMksck, write, needed);

	Mutex_UnlWake(&peerMksck->mutex, MutexModeEX, MKSCK_CVAR_FILL, false);

	if (peerMksck->rcvCBEntryMVA != 0) {
		MksckPage *peerMksckPage = Mksck_ToSharedPage(peerMksck);

		err = Mutex_Lock(&peerMksckPage->mutex, MutexModeSH);
		if (err == 0) {
			uint32 sockIdx = peerMksck->index;
			struct MvpkmVM *vm =
				(struct MvpkmVM *)peerMksckPage->vmHKVA;

			if (vm) {
				WorldSwitchPage *wsp = vm->wsp;

				ASSERT(sockIdx <
				       8 * sizeof(peerMksckPage->wakeVMMRecv));
				ATOMIC_ORV(peerMksckPage->wakeVMMRecv,
					   1U << sockIdx);

				if (wsp)
					Mvpkm_WakeGuest(vm, ACTION_MKSCK);
			}
			Mutex_Unlock(&peerMksckPage->mutex, MutexModeSH);
		}
	}

	if (!err)
		err = len;

decRefc:
	Mksck_DecRefc(peerMksck);
	return err;

unlockDecRefc:
	Mutex_Unlock(&peerMksck->mutex, MutexModeEX);
	goto decRefc;
}


static int
MksckFault(struct vm_area_struct *vma,
	   struct vm_fault *vmf)
{
	return VM_FAULT_SIGBUS;
}

static int
MksckMMap(struct file *file,
	  struct socket *sock,
	  struct vm_area_struct *vma)
{
	vma->vm_ops = &mksckVMOps;

	return 0;
}
Пример #28
0
static int packet_sendmsg(struct sock *sk, struct msghdr *msg, int len,
	      int noblock, int flags)
{
	struct sk_buff *skb;
	struct device *dev;
	struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
	unsigned short proto=0;

	/*
	 *	Check the flags. 
	 */

	if (flags) 
		return(-EINVAL);

	/*
	 *	Get and verify the address. 
	 */
	 
	if (saddr) 
	{
		if (msg->msg_namelen < sizeof(struct sockaddr)) 
			return(-EINVAL);
		if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
			proto=saddr->spkt_protocol;
	} 
	else
		return(-ENOTCONN);	/* SOCK_PACKET must be sent giving an address */
	
	/*
	 *	Find the device first to size check it 
	 */

	saddr->spkt_device[13] = 0;
	dev = dev_get(saddr->spkt_device);
	if (dev == NULL) 
	{
		return(-ENODEV);
  	}
	
	/*
	 *	You may not queue a frame bigger than the mtu. This is the lowest level
	 *	raw protocol and you must do your own fragmentation at this level.
	 */
	 
	if(len>dev->mtu+dev->hard_header_len)
  		return -EMSGSIZE;

	skb = sock_wmalloc(sk, len, 0, GFP_KERNEL);

	/*
	 *	If the write buffer is full, then tough. At this level the user gets to
	 *	deal with the problem - do your own algorithmic backoffs. That's far
	 *	more flexible.
	 */
	 
	if (skb == NULL) 
	{
		return(-ENOBUFS);
	}
	
	/*
	 *	Fill it in 
	 */
	 
	skb->sk = sk;
	skb->free = 1;
	memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
	skb->arp = 1;		/* No ARP needs doing on this (complete) frame */
	skb->protocol = proto;

	/*
	 *	Now send it
	 */

	if (dev->flags & IFF_UP) 
		dev_queue_xmit(skb, dev, sk->priority);
	else
		kfree_skb(skb, FREE_WRITE);
	return(len);
}
Пример #29
0
static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len)
{
	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
	struct sk_buff *skb, **frag;
	int err, hlen, count, sent=0;
	l2cap_hdr *lh;

	BT_DBG("sk %p len %d", sk, len);

	/* First fragment (with L2CAP header) */
	if (sk->type == SOCK_DGRAM)
		hlen = L2CAP_HDR_SIZE + 2;
	else
		hlen = L2CAP_HDR_SIZE;

	count = MIN(conn->mtu - hlen, len);

	skb = bluez_skb_send_alloc(sk, hlen + count,
			msg->msg_flags & MSG_DONTWAIT, &err);
	if (!skb)
		return err;

	/* Create L2CAP header */
	lh = (l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
	lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
	lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));

	if (sk->type == SOCK_DGRAM)
		put_unaligned(l2cap_pi(sk)->psm, (__u16 *) skb_put(skb, 2));

	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
		err = -EFAULT;
		goto fail;
	}

	sent += count;
	len  -= count;

	/* Continuation fragments (no L2CAP header) */
	frag = &skb_shinfo(skb)->frag_list;
	while (len) {
		count = MIN(conn->mtu, len);

		*frag = bluez_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
		if (!*frag)
			goto fail;
		
		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
			err = -EFAULT;
			goto fail;
		}

		sent += count;
		len  -= count;

		frag = &(*frag)->next;
	}

	if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
		goto fail;

	return sent;

fail:
	kfree_skb(skb);
	return err;
}
Пример #30
0
static int econet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
			  struct scm_cookie *scm)
{
	struct sock *sk = sock->sk;
	struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
	struct device *dev;
	struct ec_addr addr;
	struct ec_device *edev;
	int err;
	unsigned char port, cb;
	struct sk_buff *skb;
	struct ec_cb *eb;
#ifdef CONFIG_ECONET_NATIVE
	unsigned short proto = 0;
#endif
#ifdef CONFIG_ECONET_AUNUDP
	struct msghdr udpmsg;
	struct iovec iov[msg->msg_iovlen+1];
	struct aunhdr ah;
	struct sockaddr_in udpdest;
	__kernel_size_t size;
	int i;
	mm_segment_t oldfs;
#endif
		
	/*
	 *	Check the flags. 
	 */

	if (msg->msg_flags&~MSG_DONTWAIT) 
		return(-EINVAL);

	/*
	 *	Get and verify the address. 
	 */
	 
	if (saddr == NULL) {
		addr.station = sk->protinfo.af_econet->station;
		addr.net = sk->protinfo.af_econet->net;
		port = sk->protinfo.af_econet->port;
		cb = sk->protinfo.af_econet->cb;
	} else {
		if (msg->msg_namelen < sizeof(struct sockaddr_ec)) 
			return -EINVAL;
		addr.station = saddr->addr.station;
		addr.net = saddr->addr.net;
		port = saddr->port;
		cb = saddr->cb;
	}

	/* Look for a device with the right network number. */
	for (edev = edevlist; edev && (edev->net != addr.net); 
	     edev = edev->next);

	/* Bridge?  What's that? */
	if (edev == NULL) 
		return -ENETUNREACH;

	dev = edev->dev;

	if (dev->type == ARPHRD_ECONET)
	{
		/* Real hardware Econet.  We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
		dev_lock_list();
		
		skb = sock_alloc_send_skb(sk, len+dev->hard_header_len+15, 0, 
					  msg->msg_flags & MSG_DONTWAIT, &err);
		if (skb==NULL)
			goto out_unlock;
		
		skb_reserve(skb, (dev->hard_header_len+15)&~15);
		skb->nh.raw = skb->data;
		
		eb = (struct ec_cb *)&skb->cb;
		
		eb->cookie = saddr->cookie;
		eb->sec = *saddr;
		eb->sent = ec_tx_done;

		if (dev->hard_header) {
			int res;
			err = -EINVAL;
			res = dev->hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
			if (sock->type != SOCK_DGRAM) {
				skb->tail = skb->data;
				skb->len = 0;
			} else if (res < 0)
				goto out_free;
		}
		
		/* Copy the data. Returns -EFAULT on error */
		err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
		skb->protocol = proto;
		skb->dev = dev;
		skb->priority = sk->priority;
		if (err)
			goto out_free;
		
		err = -ENETDOWN;
		if (!(dev->flags & IFF_UP))
			goto out_free;
		
		/*
		 *	Now send it
		 */
		
		dev_unlock_list();
		dev_queue_xmit(skb);
		return(len);

	out_free:
		kfree_skb(skb);
	out_unlock:
		dev_unlock_list();
#else
		err = -EPROTOTYPE;
#endif
		return err;
	}

#ifdef CONFIG_ECONET_AUNUDP
	/* AUN virtual Econet. */

	if (udpsock == NULL)
		return -ENETDOWN;		/* No socket - can't send */
	
	/* Make up a UDP datagram and hand it off to some higher intellect. */

	memset(&udpdest, 0, sizeof(udpdest));
	udpdest.sin_family = AF_INET;
	udpdest.sin_port = htons(AUN_PORT);

	/* At the moment we use the stupid Acorn scheme of Econet address
	   y.x maps to IP a.b.c.x.  This should be replaced with something
	   more flexible and more aware of subnet masks.  */
	{
		struct in_device *idev = (struct in_device *)dev->ip_ptr;
		unsigned long network = ntohl(idev->ifa_list->ifa_address) & 
			0xffffff00;		/* !!! */
		udpdest.sin_addr.s_addr = htonl(network | addr.station);
	}

	ah.port = port;
	ah.cb = cb & 0x7f;
	ah.code = 2;		/* magic */
	ah.pad = 0;

	/* tack our header on the front of the iovec */
	size = sizeof(struct aunhdr);
	iov[0].iov_base = (void *)&ah;
	iov[0].iov_len = size;
	for (i = 0; i < msg->msg_iovlen; i++) {
		void *base = msg->msg_iov[i].iov_base;
		size_t len = msg->msg_iov[i].iov_len;
		/* Check it now since we switch to KERNEL_DS later. */
		if ((err = verify_area(VERIFY_READ, base, len)) < 0)
			return err;
		iov[i+1].iov_base = base;
		iov[i+1].iov_len = len;
		size += len;
	}

	/* Get a skbuff (no data, just holds our cb information) */
	if ((skb = sock_alloc_send_skb(sk, 0, 0, 
			     msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
		return err;

	eb = (struct ec_cb *)&skb->cb;

	eb->cookie = saddr->cookie;
	eb->timeout = (5*HZ);
	eb->start = jiffies;
	ah.handle = aun_seq;
	eb->seq = (aun_seq++);
	eb->sec = *saddr;

	skb_queue_tail(&aun_queue, skb);

	udpmsg.msg_name = (void *)&udpdest;
	udpmsg.msg_namelen = sizeof(udpdest);
	udpmsg.msg_iov = &iov[0];
	udpmsg.msg_iovlen = msg->msg_iovlen + 1;
	udpmsg.msg_control = NULL;
	udpmsg.msg_controllen = 0;
	udpmsg.msg_flags=0;

	oldfs = get_fs(); set_fs(KERNEL_DS);	/* More privs :-) */
	err = sock_sendmsg(udpsock, &udpmsg, size);
	set_fs(oldfs);
#else
	err = -EPROTOTYPE;
#endif
	return err;
}