static int kni_sock_release(struct socket *sock) { struct kni_vhost_queue *q = container_of(sock->sk, struct kni_vhost_queue, sk); struct kni_dev *kni; if (q == NULL) return 0; if (NULL != (kni = q->kni)) { kni->vq_status = BE_STOP; KNI_VHOST_WAIT_WQ_SAFE(); kni->vhost_queue = NULL; q->kni = NULL; } if (q->sockfd != -1) q->sockfd = -1; sk_set_socket(&q->sk, NULL); sock->sk = NULL; sock_put(&q->sk); KNI_DBG("dummy sock release done\n"); return 0; }
static unsigned int kni_sock_poll(struct file *file, struct socket *sock, poll_table * wait) { struct kni_vhost_queue *q = container_of(sock->sk, struct kni_vhost_queue, sk); struct kni_dev *kni; unsigned int mask = 0; if (unlikely(q == NULL || q->kni == NULL)) return POLLERR; kni = q->kni; KNI_DBG("start kni_poll on group %d, wq 0x%16llx\n", kni->group_id, (uint64_t)sock->wq); poll_wait(file, &sock->wq->wait, wait); if (kni_fifo_count(kni->rx_q) > 0) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) && sock_writeable(&q->sk))) mask |= POLLOUT | POLLWRNORM; return mask; }
/* * Ioctl commands */ static int kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { KNI_DBG("kni_net_ioctl %d\n", ((struct kni_dev *)netdev_priv(dev))->group_id); return 0; }
int kni_sock_getname (struct socket *sock, struct sockaddr *addr, int *sockaddr_len, int peer) { KNI_DBG("dummy sock getname\n"); ((struct sockaddr_ll*)addr)->sll_family = AF_PACKET; return 0; }
/* * Deal with a transmit timeout. */ static void kni_net_tx_timeout (struct net_device *dev) { struct kni_dev *kni = netdev_priv(dev); KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies, jiffies - dev->trans_start); kni->stats.tx_errors++; netif_wake_queue(dev); return; }
void kni_net_lb_init(struct net_device *dev) { struct kni_dev *kni = netdev_priv(dev); KNI_DBG("kni_net_init\n"); init_waitqueue_head(&kni->wq); mutex_init(&kni->sync_lock); kni->dev_addr = NULL; kni_lb_setup(dev); dev->netdev_ops = &kni_net_netdev_ops; dev->watchdog_timeo = WD_TIMEOUT; }
/* * Ioctl commands */ static int kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct kni_dev *kni = netdev_priv(dev); if (kni->kni_released) { return -EAGAIN; } KNI_DBG("kni_net_ioctl %d\n", ((struct kni_dev *)netdev_priv(dev))->group_id); return 0; }
int kni_vhost_init(struct kni_dev *kni) { struct net_device *dev = kni->net_dev; if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp)) sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); kni->vq_status = BE_STOP; KNI_DBG("kni_vhost_init done\n"); return 0; }
void kni_net_init(struct net_device *dev) { struct kni_dev *kni = netdev_priv(dev); KNI_DBG("kni_net_init\n"); init_waitqueue_head(&kni->wq); mutex_init(&kni->sync_lock); ether_setup(dev); /* assign some of the fields */ dev->netdev_ops = &kni_net_netdev_ops; dev->header_ops = &kni_net_header_ops; dev->watchdog_timeo = WD_TIMEOUT; }
int kni_vhost_backend_release(struct kni_dev *kni) { struct kni_vhost_queue *q = kni->vhost_queue; if (q == NULL) return 0; /* dettach from kni */ q->kni = NULL; KNI_DBG("release backend done\n"); return 0; }
static int kni_net_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct rte_kni_request req; struct kni_dev *kni = netdev_priv(dev); KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu); memset(&req, 0, sizeof(req)); req.req_id = RTE_KNI_REQ_CHANGE_MTU; req.new_mtu = new_mtu; ret = kni_net_process_request(kni, &req); if (ret == 0 && req.result == 0) dev->mtu = new_mtu; return (ret == 0 ? req.result : ret); }
static int kni_vhost_backend_init(struct kni_dev *kni) { struct kni_vhost_queue *q; struct net *net = current->nsproxy->net_ns; int err, i, sockfd; struct rte_kni_fifo *fifo; struct sk_buff *elem; if (kni->vhost_queue != NULL) return -1; if (!(q = (struct kni_vhost_queue *)sk_alloc( net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto))) return -ENOMEM; err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock); if (err) goto free_sk; sockfd = kni_sock_map_fd(q->sock); if (sockfd < 0) { err = sockfd; goto free_sock; } /* cache init */ q->cache = (struct sk_buff*) kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff), GFP_KERNEL); if (!q->cache) goto free_fd; fifo = (struct rte_kni_fifo*) kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *) + sizeof(struct rte_kni_fifo), GFP_KERNEL); if (!fifo) goto free_cache; kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE); for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) { elem = &q->cache[i]; kni_fifo_put(fifo, (void**)&elem, 1); } q->fifo = fifo; /* store sockfd in vhost_queue */ q->sockfd = sockfd; /* init socket */ q->sock->type = SOCK_RAW; q->sock->state = SS_CONNECTED; q->sock->ops = &kni_socket_ops; sock_init_data(q->sock, &q->sk); /* init sock data */ q->sk.sk_write_space = kni_sk_write_space; q->sk.sk_destruct = kni_sk_destruct; q->flags = IFF_NO_PI | IFF_TAP; q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); #ifdef RTE_KNI_VHOST_VNET_HDR_EN q->flags |= IFF_VNET_HDR; #endif /* bind kni_dev with vhost_queue */ q->kni = kni; kni->vhost_queue = q; wmb(); kni->vq_status = BE_START; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx," "sk->sk_wq=0x%16llx", q->sockfd, (uint64_t)q->sock->wq, (uint64_t)q->sk.sk_wq); #else KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx," "sk->sk_sleep=0x%16llx", q->sockfd, (uint64_t)&q->sock->wait, (uint64_t)q->sk.sk_sleep); #endif return 0; free_cache: kfree(q->cache); q->cache = NULL; free_fd: put_unused_fd(sockfd); free_sock: q->kni = NULL; kni->vhost_queue = NULL; kni->vq_status |= BE_FINISH; sock_release(q->sock); q->sock->ops = NULL; q->sock = NULL; free_sk: sk_free((struct sock*)q); return err; }
kni_sock_rcvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) #endif /* HAVE_KIOCB_MSG_PARAM */ { int vnet_hdr_len = 0; int pkt_len = 0; struct kni_vhost_queue *q = container_of(sock->sk, struct kni_vhost_queue, sk); static struct virtio_net_hdr __attribute__ ((unused)) vnet_hdr = { .flags = 0, .gso_type = VIRTIO_NET_HDR_GSO_NONE }; if (unlikely(q == NULL || q->kni == NULL)) return 0; #ifdef RTE_KNI_VHOST_VNET_HDR_EN if (likely(q->flags & IFF_VNET_HDR)) { vnet_hdr_len = q->vnet_hdr_sz; if ((len -= vnet_hdr_len) < 0) return -EINVAL; } #endif if (unlikely(0 == (pkt_len = kni_vhost_net_rx(q->kni, m, vnet_hdr_len, len)))) return 0; #ifdef RTE_KNI_VHOST_VNET_HDR_EN /* no need to copy hdr when no pkt received */ #ifdef HAVE_IOV_ITER_MSGHDR if (unlikely(copy_to_iter((void *)&vnet_hdr, vnet_hdr_len, &m->msg_iter))) #else if (unlikely(memcpy_toiovecend(m->msg_iov, (void *)&vnet_hdr, 0, vnet_hdr_len))) #endif /* HAVE_IOV_ITER_MSGHDR */ return -EFAULT; #endif /* RTE_KNI_VHOST_VNET_HDR_EN */ KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n", (unsigned long)len, q->flags, pkt_len); return (pkt_len + vnet_hdr_len); } /* dummy tap like ioctl */ static int kni_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct ifreq __user *ifr = argp; unsigned int __user *up = argp; struct kni_vhost_queue *q = container_of(sock->sk, struct kni_vhost_queue, sk); struct kni_dev *kni; unsigned int u; int __user *sp = argp; int s; int ret; KNI_DBG("tap ioctl cmd 0x%08x\n", cmd); switch (cmd) { case TUNSETIFF: KNI_DBG("TUNSETIFF\n"); /* ignore the name, just look at flags */ if (get_user(u, &ifr->ifr_flags)) return -EFAULT; ret = 0; if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP)) ret = -EINVAL; else q->flags = u; return ret; case TUNGETIFF: KNI_DBG("TUNGETIFF\n"); rcu_read_lock_bh(); kni = rcu_dereference_bh(q->kni); if (kni) dev_hold(kni->net_dev); rcu_read_unlock_bh(); if (!kni) return -ENOLINK; ret = 0; if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ) || put_user(q->flags, &ifr->ifr_flags)) ret = -EFAULT; dev_put(kni->net_dev); return ret; case TUNGETFEATURES: KNI_DBG("TUNGETFEATURES\n"); u = IFF_TAP | IFF_NO_PI; #ifdef RTE_KNI_VHOST_VNET_HDR_EN u |= IFF_VNET_HDR; #endif if (put_user(u, up)) return -EFAULT; return 0; case TUNSETSNDBUF: KNI_DBG("TUNSETSNDBUF\n"); if (get_user(u, up)) return -EFAULT; q->sk.sk_sndbuf = u; return 0; case TUNGETVNETHDRSZ: s = q->vnet_hdr_sz; if (put_user(s, sp)) return -EFAULT; KNI_DBG("TUNGETVNETHDRSZ %d\n", s); return 0; case TUNSETVNETHDRSZ: if (get_user(s, sp)) return -EFAULT; if (s < (int)sizeof(struct virtio_net_hdr)) return -EINVAL; KNI_DBG("TUNSETVNETHDRSZ %d\n", s); q->vnet_hdr_sz = s; return 0; case TUNSETOFFLOAD: KNI_DBG("TUNSETOFFLOAD %lx\n", arg); #ifdef RTE_KNI_VHOST_VNET_HDR_EN /* not support any offload yet */ if (!(q->flags & IFF_VNET_HDR)) return -EINVAL; return 0; #else return -EINVAL; #endif default: KNI_DBG("NOT SUPPORT\n"); return -EINVAL; } }